diff --git a/.gitignore b/.gitignore
index 1ab9abd96b..c82256305c 100755
--- a/.gitignore
+++ b/.gitignore
@@ -74,3 +74,6 @@ tm.out
worker*.log
*-shim.sbt
test-output
+
+# Default sigar library extract location.
+native/
diff --git a/akka-cluster-metrics/build.sbt b/akka-cluster-metrics/build.sbt
new file mode 100644
index 0000000000..e895ccf06a
--- /dev/null
+++ b/akka-cluster-metrics/build.sbt
@@ -0,0 +1,29 @@
+import akka.{ AkkaBuild, Dependencies, Formatting, OSGi, MultiNode, Unidoc, SigarLoader }
+import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys._
+import com.typesafe.tools.mima.plugin.MimaKeys
+
+AkkaBuild.defaultSettings
+
+Formatting.formatSettings
+
+Unidoc.scaladocSettings
+
+Unidoc.javadocSettings
+
+MultiNode.multiJvmSettings
+
+SigarLoader.sigarSettings
+
+OSGi.clusterMetrics
+
+libraryDependencies ++= Dependencies.clusterMetrics
+
+//MimaKeys.previousArtifact := akkaPreviousArtifact("akka-cluster-metrics").value
+
+parallelExecution in Test := false
+
+extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src =>
+ (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq
+}
+
+scalatestOptions in MultiJvm := MultiNode.defaultMultiJvmScalatestOptions.value
diff --git a/akka-cluster-metrics/src/main/java/akka/cluster/metrics/protobuf/msg/ClusterMetricsMessages.java b/akka-cluster-metrics/src/main/java/akka/cluster/metrics/protobuf/msg/ClusterMetricsMessages.java
new file mode 100644
index 0000000000..b21b7b7081
--- /dev/null
+++ b/akka-cluster-metrics/src/main/java/akka/cluster/metrics/protobuf/msg/ClusterMetricsMessages.java
@@ -0,0 +1,6070 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: ClusterMetricsMessages.proto
+
+package akka.cluster.metrics.protobuf.msg;
+
+public final class ClusterMetricsMessages {
+ private ClusterMetricsMessages() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface MetricsGossipEnvelopeOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .Address from = 1;
+ /**
+ * required .Address from = 1;
+ */
+ boolean hasFrom();
+ /**
+ * required .Address from = 1;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address getFrom();
+ /**
+ * required .Address from = 1;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder getFromOrBuilder();
+
+ // required .MetricsGossip gossip = 2;
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ boolean hasGossip();
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip getGossip();
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipOrBuilder getGossipOrBuilder();
+
+ // required bool reply = 3;
+ /**
+ * required bool reply = 3;
+ */
+ boolean hasReply();
+ /**
+ * required bool reply = 3;
+ */
+ boolean getReply();
+ }
+ /**
+ * Protobuf type {@code MetricsGossipEnvelope}
+ *
+ *
+ **
+ * Metrics Gossip Envelope
+ *
+ */
+ public static final class MetricsGossipEnvelope extends
+ com.google.protobuf.GeneratedMessage
+ implements MetricsGossipEnvelopeOrBuilder {
+ // Use MetricsGossipEnvelope.newBuilder() to construct.
+ private MetricsGossipEnvelope(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private MetricsGossipEnvelope(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final MetricsGossipEnvelope defaultInstance;
+ public static MetricsGossipEnvelope getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public MetricsGossipEnvelope getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private MetricsGossipEnvelope(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = from_.toBuilder();
+ }
+ from_ = input.readMessage(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(from_);
+ from_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = gossip_.toBuilder();
+ }
+ gossip_ = input.readMessage(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(gossip_);
+ gossip_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ reply_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_MetricsGossipEnvelope_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_MetricsGossipEnvelope_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public MetricsGossipEnvelope parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new MetricsGossipEnvelope(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .Address from = 1;
+ public static final int FROM_FIELD_NUMBER = 1;
+ private akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address from_;
+ /**
+ * required .Address from = 1;
+ */
+ public boolean hasFrom() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .Address from = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address getFrom() {
+ return from_;
+ }
+ /**
+ * required .Address from = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder getFromOrBuilder() {
+ return from_;
+ }
+
+ // required .MetricsGossip gossip = 2;
+ public static final int GOSSIP_FIELD_NUMBER = 2;
+ private akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip gossip_;
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ public boolean hasGossip() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip getGossip() {
+ return gossip_;
+ }
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipOrBuilder getGossipOrBuilder() {
+ return gossip_;
+ }
+
+ // required bool reply = 3;
+ public static final int REPLY_FIELD_NUMBER = 3;
+ private boolean reply_;
+ /**
+ * required bool reply = 3;
+ */
+ public boolean hasReply() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required bool reply = 3;
+ */
+ public boolean getReply() {
+ return reply_;
+ }
+
+ private void initFields() {
+ from_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.getDefaultInstance();
+ gossip_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.getDefaultInstance();
+ reply_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasFrom()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasGossip()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasReply()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getFrom().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getGossip().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, from_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, gossip_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBool(3, reply_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, from_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, gossip_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(3, reply_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code MetricsGossipEnvelope}
+ *
+ *
+ **
+ * Metrics Gossip Envelope
+ *
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelopeOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_MetricsGossipEnvelope_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_MetricsGossipEnvelope_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope.Builder.class);
+ }
+
+ // Construct using akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getFromFieldBuilder();
+ getGossipFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (fromBuilder_ == null) {
+ from_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.getDefaultInstance();
+ } else {
+ fromBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (gossipBuilder_ == null) {
+ gossip_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.getDefaultInstance();
+ } else {
+ gossipBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ reply_ = false;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_MetricsGossipEnvelope_descriptor;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope getDefaultInstanceForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope.getDefaultInstance();
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope build() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope buildPartial() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope result = new akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (fromBuilder_ == null) {
+ result.from_ = from_;
+ } else {
+ result.from_ = fromBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (gossipBuilder_ == null) {
+ result.gossip_ = gossip_;
+ } else {
+ result.gossip_ = gossipBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.reply_ = reply_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope) {
+ return mergeFrom((akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope other) {
+ if (other == akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope.getDefaultInstance()) return this;
+ if (other.hasFrom()) {
+ mergeFrom(other.getFrom());
+ }
+ if (other.hasGossip()) {
+ mergeGossip(other.getGossip());
+ }
+ if (other.hasReply()) {
+ setReply(other.getReply());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasFrom()) {
+
+ return false;
+ }
+ if (!hasGossip()) {
+
+ return false;
+ }
+ if (!hasReply()) {
+
+ return false;
+ }
+ if (!getFrom().isInitialized()) {
+
+ return false;
+ }
+ if (!getGossip().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipEnvelope) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .Address from = 1;
+ private akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address from_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder> fromBuilder_;
+ /**
+ * required .Address from = 1;
+ */
+ public boolean hasFrom() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .Address from = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address getFrom() {
+ if (fromBuilder_ == null) {
+ return from_;
+ } else {
+ return fromBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .Address from = 1;
+ */
+ public Builder setFrom(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address value) {
+ if (fromBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ from_ = value;
+ onChanged();
+ } else {
+ fromBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .Address from = 1;
+ */
+ public Builder setFrom(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder builderForValue) {
+ if (fromBuilder_ == null) {
+ from_ = builderForValue.build();
+ onChanged();
+ } else {
+ fromBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .Address from = 1;
+ */
+ public Builder mergeFrom(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address value) {
+ if (fromBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ from_ != akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.getDefaultInstance()) {
+ from_ =
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.newBuilder(from_).mergeFrom(value).buildPartial();
+ } else {
+ from_ = value;
+ }
+ onChanged();
+ } else {
+ fromBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .Address from = 1;
+ */
+ public Builder clearFrom() {
+ if (fromBuilder_ == null) {
+ from_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.getDefaultInstance();
+ onChanged();
+ } else {
+ fromBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .Address from = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder getFromBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getFromFieldBuilder().getBuilder();
+ }
+ /**
+ * required .Address from = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder getFromOrBuilder() {
+ if (fromBuilder_ != null) {
+ return fromBuilder_.getMessageOrBuilder();
+ } else {
+ return from_;
+ }
+ }
+ /**
+ * required .Address from = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder>
+ getFromFieldBuilder() {
+ if (fromBuilder_ == null) {
+ fromBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder>(
+ from_,
+ getParentForChildren(),
+ isClean());
+ from_ = null;
+ }
+ return fromBuilder_;
+ }
+
+ // required .MetricsGossip gossip = 2;
+ private akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip gossip_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipOrBuilder> gossipBuilder_;
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ public boolean hasGossip() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip getGossip() {
+ if (gossipBuilder_ == null) {
+ return gossip_;
+ } else {
+ return gossipBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ public Builder setGossip(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip value) {
+ if (gossipBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ gossip_ = value;
+ onChanged();
+ } else {
+ gossipBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ public Builder setGossip(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.Builder builderForValue) {
+ if (gossipBuilder_ == null) {
+ gossip_ = builderForValue.build();
+ onChanged();
+ } else {
+ gossipBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ public Builder mergeGossip(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip value) {
+ if (gossipBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ gossip_ != akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.getDefaultInstance()) {
+ gossip_ =
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.newBuilder(gossip_).mergeFrom(value).buildPartial();
+ } else {
+ gossip_ = value;
+ }
+ onChanged();
+ } else {
+ gossipBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ public Builder clearGossip() {
+ if (gossipBuilder_ == null) {
+ gossip_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.getDefaultInstance();
+ onChanged();
+ } else {
+ gossipBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.Builder getGossipBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getGossipFieldBuilder().getBuilder();
+ }
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipOrBuilder getGossipOrBuilder() {
+ if (gossipBuilder_ != null) {
+ return gossipBuilder_.getMessageOrBuilder();
+ } else {
+ return gossip_;
+ }
+ }
+ /**
+ * required .MetricsGossip gossip = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipOrBuilder>
+ getGossipFieldBuilder() {
+ if (gossipBuilder_ == null) {
+ gossipBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipOrBuilder>(
+ gossip_,
+ getParentForChildren(),
+ isClean());
+ gossip_ = null;
+ }
+ return gossipBuilder_;
+ }
+
+ // required bool reply = 3;
+ private boolean reply_ ;
+ /**
+ * required bool reply = 3;
+ */
+ public boolean hasReply() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required bool reply = 3;
+ */
+ public boolean getReply() {
+ return reply_;
+ }
+ /**
+ * required bool reply = 3;
+ */
+ public Builder setReply(boolean value) {
+ bitField0_ |= 0x00000004;
+ reply_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bool reply = 3;
+ */
+ public Builder clearReply() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ reply_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:MetricsGossipEnvelope)
+ }
+
+ static {
+ defaultInstance = new MetricsGossipEnvelope(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:MetricsGossipEnvelope)
+ }
+
+ public interface MetricsGossipOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated .Address allAddresses = 1;
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ java.util.List
+ getAllAddressesList();
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address getAllAddresses(int index);
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ int getAllAddressesCount();
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ java.util.List extends akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder>
+ getAllAddressesOrBuilderList();
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder getAllAddressesOrBuilder(
+ int index);
+
+ // repeated string allMetricNames = 2;
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ java.util.List
+ getAllMetricNamesList();
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ int getAllMetricNamesCount();
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ java.lang.String getAllMetricNames(int index);
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ com.google.protobuf.ByteString
+ getAllMetricNamesBytes(int index);
+
+ // repeated .NodeMetrics nodeMetrics = 3;
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ java.util.List
+ getNodeMetricsList();
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics getNodeMetrics(int index);
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ int getNodeMetricsCount();
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ java.util.List extends akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetricsOrBuilder>
+ getNodeMetricsOrBuilderList();
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetricsOrBuilder getNodeMetricsOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code MetricsGossip}
+ *
+ *
+ **
+ * Metrics Gossip
+ *
+ */
+ public static final class MetricsGossip extends
+ com.google.protobuf.GeneratedMessage
+ implements MetricsGossipOrBuilder {
+ // Use MetricsGossip.newBuilder() to construct.
+ private MetricsGossip(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private MetricsGossip(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final MetricsGossip defaultInstance;
+ public static MetricsGossip getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public MetricsGossip getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private MetricsGossip(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ allAddresses_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ allAddresses_.add(input.readMessage(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.PARSER, extensionRegistry));
+ break;
+ }
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ allMetricNames_ = new com.google.protobuf.LazyStringArrayList();
+ mutable_bitField0_ |= 0x00000002;
+ }
+ allMetricNames_.add(input.readBytes());
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ nodeMetrics_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ nodeMetrics_.add(input.readMessage(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ allAddresses_ = java.util.Collections.unmodifiableList(allAddresses_);
+ }
+ if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ allMetricNames_ = new com.google.protobuf.UnmodifiableLazyStringList(allMetricNames_);
+ }
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ nodeMetrics_ = java.util.Collections.unmodifiableList(nodeMetrics_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_MetricsGossip_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_MetricsGossip_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public MetricsGossip parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new MetricsGossip(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ // repeated .Address allAddresses = 1;
+ public static final int ALLADDRESSES_FIELD_NUMBER = 1;
+ private java.util.List allAddresses_;
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public java.util.List getAllAddressesList() {
+ return allAddresses_;
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public java.util.List extends akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder>
+ getAllAddressesOrBuilderList() {
+ return allAddresses_;
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public int getAllAddressesCount() {
+ return allAddresses_.size();
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address getAllAddresses(int index) {
+ return allAddresses_.get(index);
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder getAllAddressesOrBuilder(
+ int index) {
+ return allAddresses_.get(index);
+ }
+
+ // repeated string allMetricNames = 2;
+ public static final int ALLMETRICNAMES_FIELD_NUMBER = 2;
+ private com.google.protobuf.LazyStringList allMetricNames_;
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public java.util.List
+ getAllMetricNamesList() {
+ return allMetricNames_;
+ }
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public int getAllMetricNamesCount() {
+ return allMetricNames_.size();
+ }
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public java.lang.String getAllMetricNames(int index) {
+ return allMetricNames_.get(index);
+ }
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public com.google.protobuf.ByteString
+ getAllMetricNamesBytes(int index) {
+ return allMetricNames_.getByteString(index);
+ }
+
+ // repeated .NodeMetrics nodeMetrics = 3;
+ public static final int NODEMETRICS_FIELD_NUMBER = 3;
+ private java.util.List nodeMetrics_;
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public java.util.List getNodeMetricsList() {
+ return nodeMetrics_;
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public java.util.List extends akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetricsOrBuilder>
+ getNodeMetricsOrBuilderList() {
+ return nodeMetrics_;
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public int getNodeMetricsCount() {
+ return nodeMetrics_.size();
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics getNodeMetrics(int index) {
+ return nodeMetrics_.get(index);
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetricsOrBuilder getNodeMetricsOrBuilder(
+ int index) {
+ return nodeMetrics_.get(index);
+ }
+
+ private void initFields() {
+ allAddresses_ = java.util.Collections.emptyList();
+ allMetricNames_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ nodeMetrics_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ for (int i = 0; i < getAllAddressesCount(); i++) {
+ if (!getAllAddresses(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ for (int i = 0; i < getNodeMetricsCount(); i++) {
+ if (!getNodeMetrics(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < allAddresses_.size(); i++) {
+ output.writeMessage(1, allAddresses_.get(i));
+ }
+ for (int i = 0; i < allMetricNames_.size(); i++) {
+ output.writeBytes(2, allMetricNames_.getByteString(i));
+ }
+ for (int i = 0; i < nodeMetrics_.size(); i++) {
+ output.writeMessage(3, nodeMetrics_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < allAddresses_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, allAddresses_.get(i));
+ }
+ {
+ int dataSize = 0;
+ for (int i = 0; i < allMetricNames_.size(); i++) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeBytesSizeNoTag(allMetricNames_.getByteString(i));
+ }
+ size += dataSize;
+ size += 1 * getAllMetricNamesList().size();
+ }
+ for (int i = 0; i < nodeMetrics_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, nodeMetrics_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code MetricsGossip}
+ *
+ *
+ **
+ * Metrics Gossip
+ *
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossipOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_MetricsGossip_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_MetricsGossip_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.Builder.class);
+ }
+
+ // Construct using akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getAllAddressesFieldBuilder();
+ getNodeMetricsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (allAddressesBuilder_ == null) {
+ allAddresses_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ allAddressesBuilder_.clear();
+ }
+ allMetricNames_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (nodeMetricsBuilder_ == null) {
+ nodeMetrics_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ nodeMetricsBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_MetricsGossip_descriptor;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip getDefaultInstanceForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.getDefaultInstance();
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip build() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip buildPartial() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip result = new akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip(this);
+ int from_bitField0_ = bitField0_;
+ if (allAddressesBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ allAddresses_ = java.util.Collections.unmodifiableList(allAddresses_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.allAddresses_ = allAddresses_;
+ } else {
+ result.allAddresses_ = allAddressesBuilder_.build();
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ allMetricNames_ = new com.google.protobuf.UnmodifiableLazyStringList(
+ allMetricNames_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.allMetricNames_ = allMetricNames_;
+ if (nodeMetricsBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ nodeMetrics_ = java.util.Collections.unmodifiableList(nodeMetrics_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.nodeMetrics_ = nodeMetrics_;
+ } else {
+ result.nodeMetrics_ = nodeMetricsBuilder_.build();
+ }
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip) {
+ return mergeFrom((akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip other) {
+ if (other == akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip.getDefaultInstance()) return this;
+ if (allAddressesBuilder_ == null) {
+ if (!other.allAddresses_.isEmpty()) {
+ if (allAddresses_.isEmpty()) {
+ allAddresses_ = other.allAddresses_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureAllAddressesIsMutable();
+ allAddresses_.addAll(other.allAddresses_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.allAddresses_.isEmpty()) {
+ if (allAddressesBuilder_.isEmpty()) {
+ allAddressesBuilder_.dispose();
+ allAddressesBuilder_ = null;
+ allAddresses_ = other.allAddresses_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ allAddressesBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getAllAddressesFieldBuilder() : null;
+ } else {
+ allAddressesBuilder_.addAllMessages(other.allAddresses_);
+ }
+ }
+ }
+ if (!other.allMetricNames_.isEmpty()) {
+ if (allMetricNames_.isEmpty()) {
+ allMetricNames_ = other.allMetricNames_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureAllMetricNamesIsMutable();
+ allMetricNames_.addAll(other.allMetricNames_);
+ }
+ onChanged();
+ }
+ if (nodeMetricsBuilder_ == null) {
+ if (!other.nodeMetrics_.isEmpty()) {
+ if (nodeMetrics_.isEmpty()) {
+ nodeMetrics_ = other.nodeMetrics_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureNodeMetricsIsMutable();
+ nodeMetrics_.addAll(other.nodeMetrics_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.nodeMetrics_.isEmpty()) {
+ if (nodeMetricsBuilder_.isEmpty()) {
+ nodeMetricsBuilder_.dispose();
+ nodeMetricsBuilder_ = null;
+ nodeMetrics_ = other.nodeMetrics_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ nodeMetricsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getNodeMetricsFieldBuilder() : null;
+ } else {
+ nodeMetricsBuilder_.addAllMessages(other.nodeMetrics_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ for (int i = 0; i < getAllAddressesCount(); i++) {
+ if (!getAllAddresses(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ for (int i = 0; i < getNodeMetricsCount(); i++) {
+ if (!getNodeMetrics(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.MetricsGossip) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // repeated .Address allAddresses = 1;
+ private java.util.List allAddresses_ =
+ java.util.Collections.emptyList();
+ private void ensureAllAddressesIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ allAddresses_ = new java.util.ArrayList(allAddresses_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder> allAddressesBuilder_;
+
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public java.util.List getAllAddressesList() {
+ if (allAddressesBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(allAddresses_);
+ } else {
+ return allAddressesBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public int getAllAddressesCount() {
+ if (allAddressesBuilder_ == null) {
+ return allAddresses_.size();
+ } else {
+ return allAddressesBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address getAllAddresses(int index) {
+ if (allAddressesBuilder_ == null) {
+ return allAddresses_.get(index);
+ } else {
+ return allAddressesBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public Builder setAllAddresses(
+ int index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address value) {
+ if (allAddressesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAllAddressesIsMutable();
+ allAddresses_.set(index, value);
+ onChanged();
+ } else {
+ allAddressesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public Builder setAllAddresses(
+ int index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder builderForValue) {
+ if (allAddressesBuilder_ == null) {
+ ensureAllAddressesIsMutable();
+ allAddresses_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ allAddressesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public Builder addAllAddresses(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address value) {
+ if (allAddressesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAllAddressesIsMutable();
+ allAddresses_.add(value);
+ onChanged();
+ } else {
+ allAddressesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public Builder addAllAddresses(
+ int index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address value) {
+ if (allAddressesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAllAddressesIsMutable();
+ allAddresses_.add(index, value);
+ onChanged();
+ } else {
+ allAddressesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public Builder addAllAddresses(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder builderForValue) {
+ if (allAddressesBuilder_ == null) {
+ ensureAllAddressesIsMutable();
+ allAddresses_.add(builderForValue.build());
+ onChanged();
+ } else {
+ allAddressesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public Builder addAllAddresses(
+ int index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder builderForValue) {
+ if (allAddressesBuilder_ == null) {
+ ensureAllAddressesIsMutable();
+ allAddresses_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ allAddressesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public Builder addAllAllAddresses(
+ java.lang.Iterable extends akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address> values) {
+ if (allAddressesBuilder_ == null) {
+ ensureAllAddressesIsMutable();
+ super.addAll(values, allAddresses_);
+ onChanged();
+ } else {
+ allAddressesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public Builder clearAllAddresses() {
+ if (allAddressesBuilder_ == null) {
+ allAddresses_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ allAddressesBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public Builder removeAllAddresses(int index) {
+ if (allAddressesBuilder_ == null) {
+ ensureAllAddressesIsMutable();
+ allAddresses_.remove(index);
+ onChanged();
+ } else {
+ allAddressesBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder getAllAddressesBuilder(
+ int index) {
+ return getAllAddressesFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder getAllAddressesOrBuilder(
+ int index) {
+ if (allAddressesBuilder_ == null) {
+ return allAddresses_.get(index); } else {
+ return allAddressesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public java.util.List extends akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder>
+ getAllAddressesOrBuilderList() {
+ if (allAddressesBuilder_ != null) {
+ return allAddressesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(allAddresses_);
+ }
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder addAllAddressesBuilder() {
+ return getAllAddressesFieldBuilder().addBuilder(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.getDefaultInstance());
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder addAllAddressesBuilder(
+ int index) {
+ return getAllAddressesFieldBuilder().addBuilder(
+ index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.getDefaultInstance());
+ }
+ /**
+ * repeated .Address allAddresses = 1;
+ */
+ public java.util.List
+ getAllAddressesBuilderList() {
+ return getAllAddressesFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder>
+ getAllAddressesFieldBuilder() {
+ if (allAddressesBuilder_ == null) {
+ allAddressesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder>(
+ allAddresses_,
+ ((bitField0_ & 0x00000001) == 0x00000001),
+ getParentForChildren(),
+ isClean());
+ allAddresses_ = null;
+ }
+ return allAddressesBuilder_;
+ }
+
+ // repeated string allMetricNames = 2;
+ private com.google.protobuf.LazyStringList allMetricNames_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ private void ensureAllMetricNamesIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ allMetricNames_ = new com.google.protobuf.LazyStringArrayList(allMetricNames_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public java.util.List
+ getAllMetricNamesList() {
+ return java.util.Collections.unmodifiableList(allMetricNames_);
+ }
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public int getAllMetricNamesCount() {
+ return allMetricNames_.size();
+ }
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public java.lang.String getAllMetricNames(int index) {
+ return allMetricNames_.get(index);
+ }
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public com.google.protobuf.ByteString
+ getAllMetricNamesBytes(int index) {
+ return allMetricNames_.getByteString(index);
+ }
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public Builder setAllMetricNames(
+ int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAllMetricNamesIsMutable();
+ allMetricNames_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public Builder addAllMetricNames(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAllMetricNamesIsMutable();
+ allMetricNames_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public Builder addAllAllMetricNames(
+ java.lang.Iterable values) {
+ ensureAllMetricNamesIsMutable();
+ super.addAll(values, allMetricNames_);
+ onChanged();
+ return this;
+ }
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public Builder clearAllMetricNames() {
+ allMetricNames_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ return this;
+ }
+ /**
+ * repeated string allMetricNames = 2;
+ */
+ public Builder addAllMetricNamesBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAllMetricNamesIsMutable();
+ allMetricNames_.add(value);
+ onChanged();
+ return this;
+ }
+
+ // repeated .NodeMetrics nodeMetrics = 3;
+ private java.util.List nodeMetrics_ =
+ java.util.Collections.emptyList();
+ private void ensureNodeMetricsIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ nodeMetrics_ = new java.util.ArrayList(nodeMetrics_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetricsOrBuilder> nodeMetricsBuilder_;
+
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public java.util.List getNodeMetricsList() {
+ if (nodeMetricsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(nodeMetrics_);
+ } else {
+ return nodeMetricsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public int getNodeMetricsCount() {
+ if (nodeMetricsBuilder_ == null) {
+ return nodeMetrics_.size();
+ } else {
+ return nodeMetricsBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics getNodeMetrics(int index) {
+ if (nodeMetricsBuilder_ == null) {
+ return nodeMetrics_.get(index);
+ } else {
+ return nodeMetricsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public Builder setNodeMetrics(
+ int index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics value) {
+ if (nodeMetricsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureNodeMetricsIsMutable();
+ nodeMetrics_.set(index, value);
+ onChanged();
+ } else {
+ nodeMetricsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public Builder setNodeMetrics(
+ int index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Builder builderForValue) {
+ if (nodeMetricsBuilder_ == null) {
+ ensureNodeMetricsIsMutable();
+ nodeMetrics_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ nodeMetricsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public Builder addNodeMetrics(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics value) {
+ if (nodeMetricsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureNodeMetricsIsMutable();
+ nodeMetrics_.add(value);
+ onChanged();
+ } else {
+ nodeMetricsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public Builder addNodeMetrics(
+ int index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics value) {
+ if (nodeMetricsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureNodeMetricsIsMutable();
+ nodeMetrics_.add(index, value);
+ onChanged();
+ } else {
+ nodeMetricsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public Builder addNodeMetrics(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Builder builderForValue) {
+ if (nodeMetricsBuilder_ == null) {
+ ensureNodeMetricsIsMutable();
+ nodeMetrics_.add(builderForValue.build());
+ onChanged();
+ } else {
+ nodeMetricsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public Builder addNodeMetrics(
+ int index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Builder builderForValue) {
+ if (nodeMetricsBuilder_ == null) {
+ ensureNodeMetricsIsMutable();
+ nodeMetrics_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ nodeMetricsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public Builder addAllNodeMetrics(
+ java.lang.Iterable extends akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics> values) {
+ if (nodeMetricsBuilder_ == null) {
+ ensureNodeMetricsIsMutable();
+ super.addAll(values, nodeMetrics_);
+ onChanged();
+ } else {
+ nodeMetricsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public Builder clearNodeMetrics() {
+ if (nodeMetricsBuilder_ == null) {
+ nodeMetrics_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ nodeMetricsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public Builder removeNodeMetrics(int index) {
+ if (nodeMetricsBuilder_ == null) {
+ ensureNodeMetricsIsMutable();
+ nodeMetrics_.remove(index);
+ onChanged();
+ } else {
+ nodeMetricsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Builder getNodeMetricsBuilder(
+ int index) {
+ return getNodeMetricsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetricsOrBuilder getNodeMetricsOrBuilder(
+ int index) {
+ if (nodeMetricsBuilder_ == null) {
+ return nodeMetrics_.get(index); } else {
+ return nodeMetricsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public java.util.List extends akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetricsOrBuilder>
+ getNodeMetricsOrBuilderList() {
+ if (nodeMetricsBuilder_ != null) {
+ return nodeMetricsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(nodeMetrics_);
+ }
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Builder addNodeMetricsBuilder() {
+ return getNodeMetricsFieldBuilder().addBuilder(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.getDefaultInstance());
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Builder addNodeMetricsBuilder(
+ int index) {
+ return getNodeMetricsFieldBuilder().addBuilder(
+ index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.getDefaultInstance());
+ }
+ /**
+ * repeated .NodeMetrics nodeMetrics = 3;
+ */
+ public java.util.List
+ getNodeMetricsBuilderList() {
+ return getNodeMetricsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetricsOrBuilder>
+ getNodeMetricsFieldBuilder() {
+ if (nodeMetricsBuilder_ == null) {
+ nodeMetricsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetricsOrBuilder>(
+ nodeMetrics_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ nodeMetrics_ = null;
+ }
+ return nodeMetricsBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:MetricsGossip)
+ }
+
+ static {
+ defaultInstance = new MetricsGossip(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:MetricsGossip)
+ }
+
+ public interface NodeMetricsOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required int32 addressIndex = 1;
+ /**
+ * required int32 addressIndex = 1;
+ */
+ boolean hasAddressIndex();
+ /**
+ * required int32 addressIndex = 1;
+ */
+ int getAddressIndex();
+
+ // required int64 timestamp = 2;
+ /**
+ * required int64 timestamp = 2;
+ */
+ boolean hasTimestamp();
+ /**
+ * required int64 timestamp = 2;
+ */
+ long getTimestamp();
+
+ // repeated .NodeMetrics.Metric metrics = 3;
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ java.util.List
+ getMetricsList();
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric getMetrics(int index);
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ int getMetricsCount();
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ java.util.List extends akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.MetricOrBuilder>
+ getMetricsOrBuilderList();
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.MetricOrBuilder getMetricsOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code NodeMetrics}
+ *
+ *
+ **
+ * Node Metrics
+ *
+ */
+ public static final class NodeMetrics extends
+ com.google.protobuf.GeneratedMessage
+ implements NodeMetricsOrBuilder {
+ // Use NodeMetrics.newBuilder() to construct.
+ private NodeMetrics(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private NodeMetrics(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final NodeMetrics defaultInstance;
+ public static NodeMetrics getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public NodeMetrics getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private NodeMetrics(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ addressIndex_ = input.readInt32();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ timestamp_ = input.readInt64();
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ metrics_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ metrics_.add(input.readMessage(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ metrics_ = java.util.Collections.unmodifiableList(metrics_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public NodeMetrics parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new NodeMetrics(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ /**
+ * Protobuf enum {@code NodeMetrics.NumberType}
+ */
+ public enum NumberType
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * Serialized = 0;
+ */
+ Serialized(0, 0),
+ /**
+ * Double = 1;
+ */
+ Double(1, 1),
+ /**
+ * Float = 2;
+ */
+ Float(2, 2),
+ /**
+ * Integer = 3;
+ */
+ Integer(3, 3),
+ /**
+ * Long = 4;
+ */
+ Long(4, 4),
+ ;
+
+ /**
+ * Serialized = 0;
+ */
+ public static final int Serialized_VALUE = 0;
+ /**
+ * Double = 1;
+ */
+ public static final int Double_VALUE = 1;
+ /**
+ * Float = 2;
+ */
+ public static final int Float_VALUE = 2;
+ /**
+ * Integer = 3;
+ */
+ public static final int Integer_VALUE = 3;
+ /**
+ * Long = 4;
+ */
+ public static final int Long_VALUE = 4;
+
+
+ public final int getNumber() { return value; }
+
+ public static NumberType valueOf(int value) {
+ switch (value) {
+ case 0: return Serialized;
+ case 1: return Double;
+ case 2: return Float;
+ case 3: return Integer;
+ case 4: return Long;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public NumberType findValueByNumber(int number) {
+ return NumberType.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final NumberType[] VALUES = values();
+
+ public static NumberType valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private NumberType(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:NodeMetrics.NumberType)
+ }
+
+ public interface NumberOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .NodeMetrics.NumberType type = 1;
+ /**
+ * required .NodeMetrics.NumberType type = 1;
+ */
+ boolean hasType();
+ /**
+ * required .NodeMetrics.NumberType type = 1;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberType getType();
+
+ // optional uint32 value32 = 2;
+ /**
+ * optional uint32 value32 = 2;
+ */
+ boolean hasValue32();
+ /**
+ * optional uint32 value32 = 2;
+ */
+ int getValue32();
+
+ // optional uint64 value64 = 3;
+ /**
+ * optional uint64 value64 = 3;
+ */
+ boolean hasValue64();
+ /**
+ * optional uint64 value64 = 3;
+ */
+ long getValue64();
+
+ // optional bytes serialized = 4;
+ /**
+ * optional bytes serialized = 4;
+ */
+ boolean hasSerialized();
+ /**
+ * optional bytes serialized = 4;
+ */
+ com.google.protobuf.ByteString getSerialized();
+ }
+ /**
+ * Protobuf type {@code NodeMetrics.Number}
+ */
+ public static final class Number extends
+ com.google.protobuf.GeneratedMessage
+ implements NumberOrBuilder {
+ // Use Number.newBuilder() to construct.
+ private Number(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Number(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Number defaultInstance;
+ public static Number getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Number getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Number(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ int rawValue = input.readEnum();
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberType value = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberType.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(1, rawValue);
+ } else {
+ bitField0_ |= 0x00000001;
+ type_ = value;
+ }
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ value32_ = input.readUInt32();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ value64_ = input.readUInt64();
+ break;
+ }
+ case 34: {
+ bitField0_ |= 0x00000008;
+ serialized_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_Number_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_Number_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public Number parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Number(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .NodeMetrics.NumberType type = 1;
+ public static final int TYPE_FIELD_NUMBER = 1;
+ private akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberType type_;
+ /**
+ * required .NodeMetrics.NumberType type = 1;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .NodeMetrics.NumberType type = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberType getType() {
+ return type_;
+ }
+
+ // optional uint32 value32 = 2;
+ public static final int VALUE32_FIELD_NUMBER = 2;
+ private int value32_;
+ /**
+ * optional uint32 value32 = 2;
+ */
+ public boolean hasValue32() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional uint32 value32 = 2;
+ */
+ public int getValue32() {
+ return value32_;
+ }
+
+ // optional uint64 value64 = 3;
+ public static final int VALUE64_FIELD_NUMBER = 3;
+ private long value64_;
+ /**
+ * optional uint64 value64 = 3;
+ */
+ public boolean hasValue64() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional uint64 value64 = 3;
+ */
+ public long getValue64() {
+ return value64_;
+ }
+
+ // optional bytes serialized = 4;
+ public static final int SERIALIZED_FIELD_NUMBER = 4;
+ private com.google.protobuf.ByteString serialized_;
+ /**
+ * optional bytes serialized = 4;
+ */
+ public boolean hasSerialized() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional bytes serialized = 4;
+ */
+ public com.google.protobuf.ByteString getSerialized() {
+ return serialized_;
+ }
+
+ private void initFields() {
+ type_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberType.Serialized;
+ value32_ = 0;
+ value64_ = 0L;
+ serialized_ = com.google.protobuf.ByteString.EMPTY;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasType()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeEnum(1, type_.getNumber());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt32(2, value32_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt64(3, value64_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, serialized_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(1, type_.getNumber());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt32Size(2, value32_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(3, value64_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, serialized_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code NodeMetrics.Number}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_Number_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_Number_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.Builder.class);
+ }
+
+ // Construct using akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ type_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberType.Serialized;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value32_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ value64_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ serialized_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_Number_descriptor;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number getDefaultInstanceForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.getDefaultInstance();
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number build() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number buildPartial() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number result = new akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.type_ = type_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.value32_ = value32_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.value64_ = value64_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.serialized_ = serialized_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number) {
+ return mergeFrom((akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number other) {
+ if (other == akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.getDefaultInstance()) return this;
+ if (other.hasType()) {
+ setType(other.getType());
+ }
+ if (other.hasValue32()) {
+ setValue32(other.getValue32());
+ }
+ if (other.hasValue64()) {
+ setValue64(other.getValue64());
+ }
+ if (other.hasSerialized()) {
+ setSerialized(other.getSerialized());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasType()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .NodeMetrics.NumberType type = 1;
+ private akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberType type_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberType.Serialized;
+ /**
+ * required .NodeMetrics.NumberType type = 1;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .NodeMetrics.NumberType type = 1;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberType getType() {
+ return type_;
+ }
+ /**
+ * required .NodeMetrics.NumberType type = 1;
+ */
+ public Builder setType(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberType value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required .NodeMetrics.NumberType type = 1;
+ */
+ public Builder clearType() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ type_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberType.Serialized;
+ onChanged();
+ return this;
+ }
+
+ // optional uint32 value32 = 2;
+ private int value32_ ;
+ /**
+ * optional uint32 value32 = 2;
+ */
+ public boolean hasValue32() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional uint32 value32 = 2;
+ */
+ public int getValue32() {
+ return value32_;
+ }
+ /**
+ * optional uint32 value32 = 2;
+ */
+ public Builder setValue32(int value) {
+ bitField0_ |= 0x00000002;
+ value32_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint32 value32 = 2;
+ */
+ public Builder clearValue32() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ value32_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional uint64 value64 = 3;
+ private long value64_ ;
+ /**
+ * optional uint64 value64 = 3;
+ */
+ public boolean hasValue64() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional uint64 value64 = 3;
+ */
+ public long getValue64() {
+ return value64_;
+ }
+ /**
+ * optional uint64 value64 = 3;
+ */
+ public Builder setValue64(long value) {
+ bitField0_ |= 0x00000004;
+ value64_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 value64 = 3;
+ */
+ public Builder clearValue64() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ value64_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional bytes serialized = 4;
+ private com.google.protobuf.ByteString serialized_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * optional bytes serialized = 4;
+ */
+ public boolean hasSerialized() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional bytes serialized = 4;
+ */
+ public com.google.protobuf.ByteString getSerialized() {
+ return serialized_;
+ }
+ /**
+ * optional bytes serialized = 4;
+ */
+ public Builder setSerialized(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ serialized_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional bytes serialized = 4;
+ */
+ public Builder clearSerialized() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ serialized_ = getDefaultInstance().getSerialized();
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:NodeMetrics.Number)
+ }
+
+ static {
+ defaultInstance = new Number(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:NodeMetrics.Number)
+ }
+
+ public interface EWMAOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required double value = 1;
+ /**
+ * required double value = 1;
+ */
+ boolean hasValue();
+ /**
+ * required double value = 1;
+ */
+ double getValue();
+
+ // required double alpha = 2;
+ /**
+ * required double alpha = 2;
+ */
+ boolean hasAlpha();
+ /**
+ * required double alpha = 2;
+ */
+ double getAlpha();
+ }
+ /**
+ * Protobuf type {@code NodeMetrics.EWMA}
+ */
+ public static final class EWMA extends
+ com.google.protobuf.GeneratedMessage
+ implements EWMAOrBuilder {
+ // Use EWMA.newBuilder() to construct.
+ private EWMA(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private EWMA(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final EWMA defaultInstance;
+ public static EWMA getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public EWMA getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private EWMA(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 9: {
+ bitField0_ |= 0x00000001;
+ value_ = input.readDouble();
+ break;
+ }
+ case 17: {
+ bitField0_ |= 0x00000002;
+ alpha_ = input.readDouble();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_EWMA_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_EWMA_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public EWMA parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new EWMA(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required double value = 1;
+ public static final int VALUE_FIELD_NUMBER = 1;
+ private double value_;
+ /**
+ * required double value = 1;
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required double value = 1;
+ */
+ public double getValue() {
+ return value_;
+ }
+
+ // required double alpha = 2;
+ public static final int ALPHA_FIELD_NUMBER = 2;
+ private double alpha_;
+ /**
+ * required double alpha = 2;
+ */
+ public boolean hasAlpha() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required double alpha = 2;
+ */
+ public double getAlpha() {
+ return alpha_;
+ }
+
+ private void initFields() {
+ value_ = 0D;
+ alpha_ = 0D;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasValue()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasAlpha()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeDouble(1, value_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeDouble(2, alpha_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(1, value_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(2, alpha_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code NodeMetrics.EWMA}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMAOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_EWMA_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_EWMA_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.Builder.class);
+ }
+
+ // Construct using akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ value_ = 0D;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ alpha_ = 0D;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_EWMA_descriptor;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA getDefaultInstanceForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.getDefaultInstance();
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA build() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA buildPartial() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA result = new akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.value_ = value_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.alpha_ = alpha_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA) {
+ return mergeFrom((akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA other) {
+ if (other == akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.getDefaultInstance()) return this;
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ if (other.hasAlpha()) {
+ setAlpha(other.getAlpha());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasValue()) {
+
+ return false;
+ }
+ if (!hasAlpha()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required double value = 1;
+ private double value_ ;
+ /**
+ * required double value = 1;
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required double value = 1;
+ */
+ public double getValue() {
+ return value_;
+ }
+ /**
+ * required double value = 1;
+ */
+ public Builder setValue(double value) {
+ bitField0_ |= 0x00000001;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required double value = 1;
+ */
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ // required double alpha = 2;
+ private double alpha_ ;
+ /**
+ * required double alpha = 2;
+ */
+ public boolean hasAlpha() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required double alpha = 2;
+ */
+ public double getAlpha() {
+ return alpha_;
+ }
+ /**
+ * required double alpha = 2;
+ */
+ public Builder setAlpha(double value) {
+ bitField0_ |= 0x00000002;
+ alpha_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required double alpha = 2;
+ */
+ public Builder clearAlpha() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ alpha_ = 0D;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:NodeMetrics.EWMA)
+ }
+
+ static {
+ defaultInstance = new EWMA(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:NodeMetrics.EWMA)
+ }
+
+ public interface MetricOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required int32 nameIndex = 1;
+ /**
+ * required int32 nameIndex = 1;
+ */
+ boolean hasNameIndex();
+ /**
+ * required int32 nameIndex = 1;
+ */
+ int getNameIndex();
+
+ // required .NodeMetrics.Number number = 2;
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ boolean hasNumber();
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number getNumber();
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberOrBuilder getNumberOrBuilder();
+
+ // optional .NodeMetrics.EWMA ewma = 3;
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ boolean hasEwma();
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA getEwma();
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMAOrBuilder getEwmaOrBuilder();
+ }
+ /**
+ * Protobuf type {@code NodeMetrics.Metric}
+ */
+ public static final class Metric extends
+ com.google.protobuf.GeneratedMessage
+ implements MetricOrBuilder {
+ // Use Metric.newBuilder() to construct.
+ private Metric(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Metric(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Metric defaultInstance;
+ public static Metric getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Metric getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Metric(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ nameIndex_ = input.readInt32();
+ break;
+ }
+ case 18: {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = number_.toBuilder();
+ }
+ number_ = input.readMessage(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(number_);
+ number_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 26: {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ subBuilder = ewma_.toBuilder();
+ }
+ ewma_ = input.readMessage(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(ewma_);
+ ewma_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000004;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_Metric_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_Metric_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public Metric parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Metric(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required int32 nameIndex = 1;
+ public static final int NAMEINDEX_FIELD_NUMBER = 1;
+ private int nameIndex_;
+ /**
+ * required int32 nameIndex = 1;
+ */
+ public boolean hasNameIndex() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required int32 nameIndex = 1;
+ */
+ public int getNameIndex() {
+ return nameIndex_;
+ }
+
+ // required .NodeMetrics.Number number = 2;
+ public static final int NUMBER_FIELD_NUMBER = 2;
+ private akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number number_;
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ public boolean hasNumber() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number getNumber() {
+ return number_;
+ }
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberOrBuilder getNumberOrBuilder() {
+ return number_;
+ }
+
+ // optional .NodeMetrics.EWMA ewma = 3;
+ public static final int EWMA_FIELD_NUMBER = 3;
+ private akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA ewma_;
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ public boolean hasEwma() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA getEwma() {
+ return ewma_;
+ }
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMAOrBuilder getEwmaOrBuilder() {
+ return ewma_;
+ }
+
+ private void initFields() {
+ nameIndex_ = 0;
+ number_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.getDefaultInstance();
+ ewma_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasNameIndex()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasNumber()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getNumber().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (hasEwma()) {
+ if (!getEwma().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeInt32(1, nameIndex_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, number_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(3, ewma_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(1, nameIndex_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, number_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, ewma_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code NodeMetrics.Metric}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.MetricOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_Metric_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_Metric_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.Builder.class);
+ }
+
+ // Construct using akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getNumberFieldBuilder();
+ getEwmaFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ nameIndex_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (numberBuilder_ == null) {
+ number_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.getDefaultInstance();
+ } else {
+ numberBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (ewmaBuilder_ == null) {
+ ewma_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.getDefaultInstance();
+ } else {
+ ewmaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_Metric_descriptor;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric getDefaultInstanceForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.getDefaultInstance();
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric build() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric buildPartial() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric result = new akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.nameIndex_ = nameIndex_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (numberBuilder_ == null) {
+ result.number_ = number_;
+ } else {
+ result.number_ = numberBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ if (ewmaBuilder_ == null) {
+ result.ewma_ = ewma_;
+ } else {
+ result.ewma_ = ewmaBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric) {
+ return mergeFrom((akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric other) {
+ if (other == akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.getDefaultInstance()) return this;
+ if (other.hasNameIndex()) {
+ setNameIndex(other.getNameIndex());
+ }
+ if (other.hasNumber()) {
+ mergeNumber(other.getNumber());
+ }
+ if (other.hasEwma()) {
+ mergeEwma(other.getEwma());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasNameIndex()) {
+
+ return false;
+ }
+ if (!hasNumber()) {
+
+ return false;
+ }
+ if (!getNumber().isInitialized()) {
+
+ return false;
+ }
+ if (hasEwma()) {
+ if (!getEwma().isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required int32 nameIndex = 1;
+ private int nameIndex_ ;
+ /**
+ * required int32 nameIndex = 1;
+ */
+ public boolean hasNameIndex() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required int32 nameIndex = 1;
+ */
+ public int getNameIndex() {
+ return nameIndex_;
+ }
+ /**
+ * required int32 nameIndex = 1;
+ */
+ public Builder setNameIndex(int value) {
+ bitField0_ |= 0x00000001;
+ nameIndex_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required int32 nameIndex = 1;
+ */
+ public Builder clearNameIndex() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ nameIndex_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // required .NodeMetrics.Number number = 2;
+ private akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number number_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberOrBuilder> numberBuilder_;
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ public boolean hasNumber() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number getNumber() {
+ if (numberBuilder_ == null) {
+ return number_;
+ } else {
+ return numberBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ public Builder setNumber(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number value) {
+ if (numberBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ number_ = value;
+ onChanged();
+ } else {
+ numberBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ public Builder setNumber(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.Builder builderForValue) {
+ if (numberBuilder_ == null) {
+ number_ = builderForValue.build();
+ onChanged();
+ } else {
+ numberBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ public Builder mergeNumber(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number value) {
+ if (numberBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ number_ != akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.getDefaultInstance()) {
+ number_ =
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.newBuilder(number_).mergeFrom(value).buildPartial();
+ } else {
+ number_ = value;
+ }
+ onChanged();
+ } else {
+ numberBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ public Builder clearNumber() {
+ if (numberBuilder_ == null) {
+ number_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.getDefaultInstance();
+ onChanged();
+ } else {
+ numberBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.Builder getNumberBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getNumberFieldBuilder().getBuilder();
+ }
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberOrBuilder getNumberOrBuilder() {
+ if (numberBuilder_ != null) {
+ return numberBuilder_.getMessageOrBuilder();
+ } else {
+ return number_;
+ }
+ }
+ /**
+ * required .NodeMetrics.Number number = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberOrBuilder>
+ getNumberFieldBuilder() {
+ if (numberBuilder_ == null) {
+ numberBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Number.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.NumberOrBuilder>(
+ number_,
+ getParentForChildren(),
+ isClean());
+ number_ = null;
+ }
+ return numberBuilder_;
+ }
+
+ // optional .NodeMetrics.EWMA ewma = 3;
+ private akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA ewma_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMAOrBuilder> ewmaBuilder_;
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ public boolean hasEwma() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA getEwma() {
+ if (ewmaBuilder_ == null) {
+ return ewma_;
+ } else {
+ return ewmaBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ public Builder setEwma(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA value) {
+ if (ewmaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ewma_ = value;
+ onChanged();
+ } else {
+ ewmaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ public Builder setEwma(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.Builder builderForValue) {
+ if (ewmaBuilder_ == null) {
+ ewma_ = builderForValue.build();
+ onChanged();
+ } else {
+ ewmaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ public Builder mergeEwma(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA value) {
+ if (ewmaBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ ewma_ != akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.getDefaultInstance()) {
+ ewma_ =
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.newBuilder(ewma_).mergeFrom(value).buildPartial();
+ } else {
+ ewma_ = value;
+ }
+ onChanged();
+ } else {
+ ewmaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ public Builder clearEwma() {
+ if (ewmaBuilder_ == null) {
+ ewma_ = akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.getDefaultInstance();
+ onChanged();
+ } else {
+ ewmaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.Builder getEwmaBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getEwmaFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMAOrBuilder getEwmaOrBuilder() {
+ if (ewmaBuilder_ != null) {
+ return ewmaBuilder_.getMessageOrBuilder();
+ } else {
+ return ewma_;
+ }
+ }
+ /**
+ * optional .NodeMetrics.EWMA ewma = 3;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMAOrBuilder>
+ getEwmaFieldBuilder() {
+ if (ewmaBuilder_ == null) {
+ ewmaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMA.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.EWMAOrBuilder>(
+ ewma_,
+ getParentForChildren(),
+ isClean());
+ ewma_ = null;
+ }
+ return ewmaBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:NodeMetrics.Metric)
+ }
+
+ static {
+ defaultInstance = new Metric(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:NodeMetrics.Metric)
+ }
+
+ private int bitField0_;
+ // required int32 addressIndex = 1;
+ public static final int ADDRESSINDEX_FIELD_NUMBER = 1;
+ private int addressIndex_;
+ /**
+ * required int32 addressIndex = 1;
+ */
+ public boolean hasAddressIndex() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required int32 addressIndex = 1;
+ */
+ public int getAddressIndex() {
+ return addressIndex_;
+ }
+
+ // required int64 timestamp = 2;
+ public static final int TIMESTAMP_FIELD_NUMBER = 2;
+ private long timestamp_;
+ /**
+ * required int64 timestamp = 2;
+ */
+ public boolean hasTimestamp() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required int64 timestamp = 2;
+ */
+ public long getTimestamp() {
+ return timestamp_;
+ }
+
+ // repeated .NodeMetrics.Metric metrics = 3;
+ public static final int METRICS_FIELD_NUMBER = 3;
+ private java.util.List metrics_;
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public java.util.List getMetricsList() {
+ return metrics_;
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public java.util.List extends akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.MetricOrBuilder>
+ getMetricsOrBuilderList() {
+ return metrics_;
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public int getMetricsCount() {
+ return metrics_.size();
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric getMetrics(int index) {
+ return metrics_.get(index);
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.MetricOrBuilder getMetricsOrBuilder(
+ int index) {
+ return metrics_.get(index);
+ }
+
+ private void initFields() {
+ addressIndex_ = 0;
+ timestamp_ = 0L;
+ metrics_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasAddressIndex()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTimestamp()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getMetricsCount(); i++) {
+ if (!getMetrics(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeInt32(1, addressIndex_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeInt64(2, timestamp_);
+ }
+ for (int i = 0; i < metrics_.size(); i++) {
+ output.writeMessage(3, metrics_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(1, addressIndex_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(2, timestamp_);
+ }
+ for (int i = 0; i < metrics_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, metrics_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code NodeMetrics}
+ *
+ *
+ **
+ * Node Metrics
+ *
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetricsOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Builder.class);
+ }
+
+ // Construct using akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getMetricsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ addressIndex_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ timestamp_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (metricsBuilder_ == null) {
+ metrics_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ metricsBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_NodeMetrics_descriptor;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics getDefaultInstanceForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.getDefaultInstance();
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics build() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics buildPartial() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics result = new akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.addressIndex_ = addressIndex_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.timestamp_ = timestamp_;
+ if (metricsBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ metrics_ = java.util.Collections.unmodifiableList(metrics_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.metrics_ = metrics_;
+ } else {
+ result.metrics_ = metricsBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics) {
+ return mergeFrom((akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics other) {
+ if (other == akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.getDefaultInstance()) return this;
+ if (other.hasAddressIndex()) {
+ setAddressIndex(other.getAddressIndex());
+ }
+ if (other.hasTimestamp()) {
+ setTimestamp(other.getTimestamp());
+ }
+ if (metricsBuilder_ == null) {
+ if (!other.metrics_.isEmpty()) {
+ if (metrics_.isEmpty()) {
+ metrics_ = other.metrics_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureMetricsIsMutable();
+ metrics_.addAll(other.metrics_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.metrics_.isEmpty()) {
+ if (metricsBuilder_.isEmpty()) {
+ metricsBuilder_.dispose();
+ metricsBuilder_ = null;
+ metrics_ = other.metrics_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ metricsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getMetricsFieldBuilder() : null;
+ } else {
+ metricsBuilder_.addAllMessages(other.metrics_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasAddressIndex()) {
+
+ return false;
+ }
+ if (!hasTimestamp()) {
+
+ return false;
+ }
+ for (int i = 0; i < getMetricsCount(); i++) {
+ if (!getMetrics(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required int32 addressIndex = 1;
+ private int addressIndex_ ;
+ /**
+ * required int32 addressIndex = 1;
+ */
+ public boolean hasAddressIndex() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required int32 addressIndex = 1;
+ */
+ public int getAddressIndex() {
+ return addressIndex_;
+ }
+ /**
+ * required int32 addressIndex = 1;
+ */
+ public Builder setAddressIndex(int value) {
+ bitField0_ |= 0x00000001;
+ addressIndex_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required int32 addressIndex = 1;
+ */
+ public Builder clearAddressIndex() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ addressIndex_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // required int64 timestamp = 2;
+ private long timestamp_ ;
+ /**
+ * required int64 timestamp = 2;
+ */
+ public boolean hasTimestamp() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required int64 timestamp = 2;
+ */
+ public long getTimestamp() {
+ return timestamp_;
+ }
+ /**
+ * required int64 timestamp = 2;
+ */
+ public Builder setTimestamp(long value) {
+ bitField0_ |= 0x00000002;
+ timestamp_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required int64 timestamp = 2;
+ */
+ public Builder clearTimestamp() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ timestamp_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // repeated .NodeMetrics.Metric metrics = 3;
+ private java.util.List metrics_ =
+ java.util.Collections.emptyList();
+ private void ensureMetricsIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ metrics_ = new java.util.ArrayList(metrics_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.MetricOrBuilder> metricsBuilder_;
+
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public java.util.List getMetricsList() {
+ if (metricsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(metrics_);
+ } else {
+ return metricsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public int getMetricsCount() {
+ if (metricsBuilder_ == null) {
+ return metrics_.size();
+ } else {
+ return metricsBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric getMetrics(int index) {
+ if (metricsBuilder_ == null) {
+ return metrics_.get(index);
+ } else {
+ return metricsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public Builder setMetrics(
+ int index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric value) {
+ if (metricsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureMetricsIsMutable();
+ metrics_.set(index, value);
+ onChanged();
+ } else {
+ metricsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public Builder setMetrics(
+ int index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.Builder builderForValue) {
+ if (metricsBuilder_ == null) {
+ ensureMetricsIsMutable();
+ metrics_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ metricsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public Builder addMetrics(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric value) {
+ if (metricsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureMetricsIsMutable();
+ metrics_.add(value);
+ onChanged();
+ } else {
+ metricsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public Builder addMetrics(
+ int index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric value) {
+ if (metricsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureMetricsIsMutable();
+ metrics_.add(index, value);
+ onChanged();
+ } else {
+ metricsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public Builder addMetrics(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.Builder builderForValue) {
+ if (metricsBuilder_ == null) {
+ ensureMetricsIsMutable();
+ metrics_.add(builderForValue.build());
+ onChanged();
+ } else {
+ metricsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public Builder addMetrics(
+ int index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.Builder builderForValue) {
+ if (metricsBuilder_ == null) {
+ ensureMetricsIsMutable();
+ metrics_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ metricsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public Builder addAllMetrics(
+ java.lang.Iterable extends akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric> values) {
+ if (metricsBuilder_ == null) {
+ ensureMetricsIsMutable();
+ super.addAll(values, metrics_);
+ onChanged();
+ } else {
+ metricsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public Builder clearMetrics() {
+ if (metricsBuilder_ == null) {
+ metrics_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ metricsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public Builder removeMetrics(int index) {
+ if (metricsBuilder_ == null) {
+ ensureMetricsIsMutable();
+ metrics_.remove(index);
+ onChanged();
+ } else {
+ metricsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.Builder getMetricsBuilder(
+ int index) {
+ return getMetricsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.MetricOrBuilder getMetricsOrBuilder(
+ int index) {
+ if (metricsBuilder_ == null) {
+ return metrics_.get(index); } else {
+ return metricsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public java.util.List extends akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.MetricOrBuilder>
+ getMetricsOrBuilderList() {
+ if (metricsBuilder_ != null) {
+ return metricsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(metrics_);
+ }
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.Builder addMetricsBuilder() {
+ return getMetricsFieldBuilder().addBuilder(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.getDefaultInstance());
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.Builder addMetricsBuilder(
+ int index) {
+ return getMetricsFieldBuilder().addBuilder(
+ index, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.getDefaultInstance());
+ }
+ /**
+ * repeated .NodeMetrics.Metric metrics = 3;
+ */
+ public java.util.List
+ getMetricsBuilderList() {
+ return getMetricsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.MetricOrBuilder>
+ getMetricsFieldBuilder() {
+ if (metricsBuilder_ == null) {
+ metricsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.Metric.Builder, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.NodeMetrics.MetricOrBuilder>(
+ metrics_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ metrics_ = null;
+ }
+ return metricsBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:NodeMetrics)
+ }
+
+ static {
+ defaultInstance = new NodeMetrics(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:NodeMetrics)
+ }
+
+ public interface AddressOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string system = 1;
+ /**
+ * required string system = 1;
+ */
+ boolean hasSystem();
+ /**
+ * required string system = 1;
+ */
+ java.lang.String getSystem();
+ /**
+ * required string system = 1;
+ */
+ com.google.protobuf.ByteString
+ getSystemBytes();
+
+ // required string hostname = 2;
+ /**
+ * required string hostname = 2;
+ */
+ boolean hasHostname();
+ /**
+ * required string hostname = 2;
+ */
+ java.lang.String getHostname();
+ /**
+ * required string hostname = 2;
+ */
+ com.google.protobuf.ByteString
+ getHostnameBytes();
+
+ // required uint32 port = 3;
+ /**
+ * required uint32 port = 3;
+ */
+ boolean hasPort();
+ /**
+ * required uint32 port = 3;
+ */
+ int getPort();
+
+ // optional string protocol = 4;
+ /**
+ * optional string protocol = 4;
+ */
+ boolean hasProtocol();
+ /**
+ * optional string protocol = 4;
+ */
+ java.lang.String getProtocol();
+ /**
+ * optional string protocol = 4;
+ */
+ com.google.protobuf.ByteString
+ getProtocolBytes();
+ }
+ /**
+ * Protobuf type {@code Address}
+ *
+ *
+ **
+ * Defines a remote address.
+ *
+ */
+ public static final class Address extends
+ com.google.protobuf.GeneratedMessage
+ implements AddressOrBuilder {
+ // Use Address.newBuilder() to construct.
+ private Address(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private Address(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final Address defaultInstance;
+ public static Address getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Address getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private Address(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ system_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ hostname_ = input.readBytes();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ port_ = input.readUInt32();
+ break;
+ }
+ case 34: {
+ bitField0_ |= 0x00000008;
+ protocol_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_Address_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_Address_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public Address parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new Address(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string system = 1;
+ public static final int SYSTEM_FIELD_NUMBER = 1;
+ private java.lang.Object system_;
+ /**
+ * required string system = 1;
+ */
+ public boolean hasSystem() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string system = 1;
+ */
+ public java.lang.String getSystem() {
+ java.lang.Object ref = system_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ system_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string system = 1;
+ */
+ public com.google.protobuf.ByteString
+ getSystemBytes() {
+ java.lang.Object ref = system_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ system_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string hostname = 2;
+ public static final int HOSTNAME_FIELD_NUMBER = 2;
+ private java.lang.Object hostname_;
+ /**
+ * required string hostname = 2;
+ */
+ public boolean hasHostname() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string hostname = 2;
+ */
+ public java.lang.String getHostname() {
+ java.lang.Object ref = hostname_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ hostname_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string hostname = 2;
+ */
+ public com.google.protobuf.ByteString
+ getHostnameBytes() {
+ java.lang.Object ref = hostname_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ hostname_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required uint32 port = 3;
+ public static final int PORT_FIELD_NUMBER = 3;
+ private int port_;
+ /**
+ * required uint32 port = 3;
+ */
+ public boolean hasPort() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required uint32 port = 3;
+ */
+ public int getPort() {
+ return port_;
+ }
+
+ // optional string protocol = 4;
+ public static final int PROTOCOL_FIELD_NUMBER = 4;
+ private java.lang.Object protocol_;
+ /**
+ * optional string protocol = 4;
+ */
+ public boolean hasProtocol() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional string protocol = 4;
+ */
+ public java.lang.String getProtocol() {
+ java.lang.Object ref = protocol_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ protocol_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string protocol = 4;
+ */
+ public com.google.protobuf.ByteString
+ getProtocolBytes() {
+ java.lang.Object ref = protocol_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ protocol_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ system_ = "";
+ hostname_ = "";
+ port_ = 0;
+ protocol_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasSystem()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasHostname()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasPort()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getSystemBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getHostnameBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt32(3, port_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, getProtocolBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getSystemBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getHostnameBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt32Size(3, port_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, getProtocolBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code Address}
+ *
+ *
+ **
+ * Defines a remote address.
+ *
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.AddressOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_Address_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_Address_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.class, akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.Builder.class);
+ }
+
+ // Construct using akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ system_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ hostname_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ port_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ protocol_ = "";
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.internal_static_Address_descriptor;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address getDefaultInstanceForType() {
+ return akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.getDefaultInstance();
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address build() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address buildPartial() {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address result = new akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.system_ = system_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.hostname_ = hostname_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.port_ = port_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.protocol_ = protocol_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address) {
+ return mergeFrom((akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address other) {
+ if (other == akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address.getDefaultInstance()) return this;
+ if (other.hasSystem()) {
+ bitField0_ |= 0x00000001;
+ system_ = other.system_;
+ onChanged();
+ }
+ if (other.hasHostname()) {
+ bitField0_ |= 0x00000002;
+ hostname_ = other.hostname_;
+ onChanged();
+ }
+ if (other.hasPort()) {
+ setPort(other.getPort());
+ }
+ if (other.hasProtocol()) {
+ bitField0_ |= 0x00000008;
+ protocol_ = other.protocol_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasSystem()) {
+
+ return false;
+ }
+ if (!hasHostname()) {
+
+ return false;
+ }
+ if (!hasPort()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (akka.cluster.metrics.protobuf.msg.ClusterMetricsMessages.Address) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string system = 1;
+ private java.lang.Object system_ = "";
+ /**
+ * required string system = 1;
+ */
+ public boolean hasSystem() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string system = 1;
+ */
+ public java.lang.String getSystem() {
+ java.lang.Object ref = system_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ system_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string system = 1;
+ */
+ public com.google.protobuf.ByteString
+ getSystemBytes() {
+ java.lang.Object ref = system_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ system_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string system = 1;
+ */
+ public Builder setSystem(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ system_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string system = 1;
+ */
+ public Builder clearSystem() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ system_ = getDefaultInstance().getSystem();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string system = 1;
+ */
+ public Builder setSystemBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ system_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string hostname = 2;
+ private java.lang.Object hostname_ = "";
+ /**
+ * required string hostname = 2;
+ */
+ public boolean hasHostname() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string hostname = 2;
+ */
+ public java.lang.String getHostname() {
+ java.lang.Object ref = hostname_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ hostname_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string hostname = 2;
+ */
+ public com.google.protobuf.ByteString
+ getHostnameBytes() {
+ java.lang.Object ref = hostname_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ hostname_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string hostname = 2;
+ */
+ public Builder setHostname(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ hostname_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string hostname = 2;
+ */
+ public Builder clearHostname() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ hostname_ = getDefaultInstance().getHostname();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string hostname = 2;
+ */
+ public Builder setHostnameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ hostname_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required uint32 port = 3;
+ private int port_ ;
+ /**
+ * required uint32 port = 3;
+ */
+ public boolean hasPort() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required uint32 port = 3;
+ */
+ public int getPort() {
+ return port_;
+ }
+ /**
+ * required uint32 port = 3;
+ */
+ public Builder setPort(int value) {
+ bitField0_ |= 0x00000004;
+ port_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required uint32 port = 3;
+ */
+ public Builder clearPort() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ port_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // optional string protocol = 4;
+ private java.lang.Object protocol_ = "";
+ /**
+ * optional string protocol = 4;
+ */
+ public boolean hasProtocol() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional string protocol = 4;
+ */
+ public java.lang.String getProtocol() {
+ java.lang.Object ref = protocol_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ protocol_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string protocol = 4;
+ */
+ public com.google.protobuf.ByteString
+ getProtocolBytes() {
+ java.lang.Object ref = protocol_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ protocol_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string protocol = 4;
+ */
+ public Builder setProtocol(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ protocol_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string protocol = 4;
+ */
+ public Builder clearProtocol() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ protocol_ = getDefaultInstance().getProtocol();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string protocol = 4;
+ */
+ public Builder setProtocolBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ protocol_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:Address)
+ }
+
+ static {
+ defaultInstance = new Address(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:Address)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MetricsGossipEnvelope_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MetricsGossipEnvelope_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MetricsGossip_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MetricsGossip_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_NodeMetrics_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_NodeMetrics_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_NodeMetrics_Number_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_NodeMetrics_Number_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_NodeMetrics_EWMA_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_NodeMetrics_EWMA_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_NodeMetrics_Metric_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_NodeMetrics_Metric_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_Address_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_Address_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\034ClusterMetricsMessages.proto\"^\n\025Metric" +
+ "sGossipEnvelope\022\026\n\004from\030\001 \002(\0132\010.Address\022" +
+ "\036\n\006gossip\030\002 \002(\0132\016.MetricsGossip\022\r\n\005reply" +
+ "\030\003 \002(\010\"j\n\rMetricsGossip\022\036\n\014allAddresses\030" +
+ "\001 \003(\0132\010.Address\022\026\n\016allMetricNames\030\002 \003(\t\022" +
+ "!\n\013nodeMetrics\030\003 \003(\0132\014.NodeMetrics\"\230\003\n\013N" +
+ "odeMetrics\022\024\n\014addressIndex\030\001 \002(\005\022\021\n\ttime" +
+ "stamp\030\002 \002(\003\022$\n\007metrics\030\003 \003(\0132\023.NodeMetri" +
+ "cs.Metric\032e\n\006Number\022%\n\004type\030\001 \002(\0162\027.Node" +
+ "Metrics.NumberType\022\017\n\007value32\030\002 \001(\r\022\017\n\007v",
+ "alue64\030\003 \001(\004\022\022\n\nserialized\030\004 \001(\014\032$\n\004EWMA" +
+ "\022\r\n\005value\030\001 \002(\001\022\r\n\005alpha\030\002 \002(\001\032a\n\006Metric" +
+ "\022\021\n\tnameIndex\030\001 \002(\005\022#\n\006number\030\002 \002(\0132\023.No" +
+ "deMetrics.Number\022\037\n\004ewma\030\003 \001(\0132\021.NodeMet" +
+ "rics.EWMA\"J\n\nNumberType\022\016\n\nSerialized\020\000\022" +
+ "\n\n\006Double\020\001\022\t\n\005Float\020\002\022\013\n\007Integer\020\003\022\010\n\004L" +
+ "ong\020\004\"K\n\007Address\022\016\n\006system\030\001 \002(\t\022\020\n\010host" +
+ "name\030\002 \002(\t\022\014\n\004port\030\003 \002(\r\022\020\n\010protocol\030\004 \001" +
+ "(\tB%\n!akka.cluster.metrics.protobuf.msgH" +
+ "\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_MetricsGossipEnvelope_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_MetricsGossipEnvelope_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_MetricsGossipEnvelope_descriptor,
+ new java.lang.String[] { "From", "Gossip", "Reply", });
+ internal_static_MetricsGossip_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_MetricsGossip_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_MetricsGossip_descriptor,
+ new java.lang.String[] { "AllAddresses", "AllMetricNames", "NodeMetrics", });
+ internal_static_NodeMetrics_descriptor =
+ getDescriptor().getMessageTypes().get(2);
+ internal_static_NodeMetrics_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_NodeMetrics_descriptor,
+ new java.lang.String[] { "AddressIndex", "Timestamp", "Metrics", });
+ internal_static_NodeMetrics_Number_descriptor =
+ internal_static_NodeMetrics_descriptor.getNestedTypes().get(0);
+ internal_static_NodeMetrics_Number_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_NodeMetrics_Number_descriptor,
+ new java.lang.String[] { "Type", "Value32", "Value64", "Serialized", });
+ internal_static_NodeMetrics_EWMA_descriptor =
+ internal_static_NodeMetrics_descriptor.getNestedTypes().get(1);
+ internal_static_NodeMetrics_EWMA_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_NodeMetrics_EWMA_descriptor,
+ new java.lang.String[] { "Value", "Alpha", });
+ internal_static_NodeMetrics_Metric_descriptor =
+ internal_static_NodeMetrics_descriptor.getNestedTypes().get(2);
+ internal_static_NodeMetrics_Metric_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_NodeMetrics_Metric_descriptor,
+ new java.lang.String[] { "NameIndex", "Number", "Ewma", });
+ internal_static_Address_descriptor =
+ getDescriptor().getMessageTypes().get(3);
+ internal_static_Address_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_Address_descriptor,
+ new java.lang.String[] { "System", "Hostname", "Port", "Protocol", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/akka-cluster-metrics/src/main/protobuf/ClusterMetricsMessages.proto b/akka-cluster-metrics/src/main/protobuf/ClusterMetricsMessages.proto
new file mode 100644
index 0000000000..948f0a4a25
--- /dev/null
+++ b/akka-cluster-metrics/src/main/protobuf/ClusterMetricsMessages.proto
@@ -0,0 +1,73 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+option java_package = "akka.cluster.metrics.protobuf.msg";
+option optimize_for = SPEED;
+
+/****************************************
+ * Metrics Gossip Messages
+ ****************************************/
+
+/**
+ * Metrics Gossip Envelope
+ */
+message MetricsGossipEnvelope {
+ required Address from = 1;
+ required MetricsGossip gossip = 2;
+ required bool reply = 3;
+}
+
+/**
+ * Metrics Gossip
+ */
+message MetricsGossip {
+ repeated Address allAddresses = 1;
+ repeated string allMetricNames = 2;
+ repeated NodeMetrics nodeMetrics = 3;
+}
+
+/**
+ * Node Metrics
+ */
+message NodeMetrics {
+ enum NumberType {
+ Serialized = 0;
+ Double = 1;
+ Float = 2;
+ Integer = 3;
+ Long = 4;
+ }
+ message Number {
+ required NumberType type = 1;
+ optional uint32 value32 = 2;
+ optional uint64 value64 = 3;
+ optional bytes serialized = 4;
+ }
+ message EWMA {
+ required double value = 1;
+ required double alpha = 2;
+ }
+ message Metric {
+ required int32 nameIndex = 1;
+ required Number number = 2;
+ optional EWMA ewma = 3;
+ }
+ required int32 addressIndex = 1;
+ required int64 timestamp = 2;
+ repeated Metric metrics = 3;
+}
+
+/****************************************
+ * Common Datatypes and Messages
+ ****************************************/
+
+/**
+ * Defines a remote address.
+ */
+message Address {
+ required string system = 1;
+ required string hostname = 2;
+ required uint32 port = 3;
+ optional string protocol = 4;
+}
diff --git a/akka-cluster-metrics/src/main/resources/reference.conf b/akka-cluster-metrics/src/main/resources/reference.conf
new file mode 100644
index 0000000000..b00a354bb6
--- /dev/null
+++ b/akka-cluster-metrics/src/main/resources/reference.conf
@@ -0,0 +1,105 @@
+##############################################
+# Akka Cluster Metrics Reference Config File #
+##############################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits in your application.conf in order to override these settings.
+
+# Sigar provisioning:
+#
+# User can provision sigar classes and native library in one of the following ways:
+#
+# 1) Use https://github.com/kamon-io/sigar-loader Kamon sigar-loader as a project dependency for the user project.
+# Metrics extension will extract and load sigar library on demand with help of Kamon sigar provisioner.
+#
+# 2) Use https://github.com/kamon-io/sigar-loader Kamon sigar-loader as java agent: `java -javaagent:/path/to/sigar-loader.jar`
+# Kamon sigar loader agent will extract and load sigar library during JVM start.
+#
+# 3) Place `sigar.jar` on the `classpath` and sigar native library for the o/s on the `java.library.path`
+# User is required to manage both project dependency and library deployment manually.
+
+# Cluster metrics extension.
+# Provides periodic statistics collection and publication throughout the cluster.
+akka.cluster.metrics {
+ # Full path of dispatcher configuration key.
+ # Use "" for default key `akka.actor.default-dispatcher`.
+ dispatcher = ""
+ # How long should any actor wait before starting the periodic tasks.
+ periodic-tasks-initial-delay = 1s
+ # Sigar native library extract location.
+ # Use per-application-instance scoped location, such as program working directory.
+ native-library-extract-folder = ${user.dir}"/native"
+ # Unique serialization identifier. Must not conflict with any other in an akka system.
+ serializer-identifier = 10
+ # Metrics supervisor actor.
+ supervisor {
+ # Actor name. Example name space: /system/cluster-metrics
+ name = "cluster-metrics"
+ # Supervision strategy.
+ strategy {
+ #
+ # FQCN of class providing `akka.actor.SupervisorStrategy`.
+ # Must have a constructor with signature `(com.typesafe.config.Config)`.
+ # Default metrics strategy provider is a configurable extension of `OneForOneStrategy`.
+ provider = "akka.cluster.metrics.ClusterMetricsStrategy"
+ #
+ # Configuration of the default strategy provider.
+ # Replace with custom settings when overriding the provider.
+ configuration = {
+ # Log restart attempts.
+ loggingEnabled = true
+ # Child actor restart-on-failure window.
+ withinTimeRange = 3s
+ # Maximum number of restart attempts before child actor is stopped.
+ maxNrOfRetries = 3
+ }
+ }
+ }
+ # Metrics collector actor.
+ collector {
+ # Enable or disable metrics collector for load-balancing nodes.
+ # Metrics collection can also be controlled at runtime by sending control messages
+ # to /system/cluster-metrics actor: `akka.cluster.metrics.{CollectionStartMessage,CollectionStopMessage}`
+ enabled = on
+ # FQCN of the metrics collector implementation.
+ # It must implement `akka.cluster.metrics.MetricsCollector` and
+ # have public constructor with akka.actor.ActorSystem parameter.
+ # Will try to load in the following order of priority:
+ # 1) configured custom collector 2) internal `SigarMetricsCollector` 3) internal `JmxMetricsCollector`
+ provider = ""
+ # Try all 3 available collector providers, or else fail on the configured custom collector provider.
+ fallback = true
+ # How often metrics are sampled on a node.
+ # Shorter interval will collect the metrics more often.
+ # Also controls frequency of the metrics publication to the node system event bus.
+ sample-interval = 3s
+ # How often a node publishes metrics information to the other nodes in the cluster.
+ # Shorter interval will publish the metrics gossip more often.
+ gossip-interval = 3s
+ # How quickly the exponential weighting of past data is decayed compared to
+ # new data. Set lower to increase the bias toward newer values.
+ # The relevance of each data sample is halved for every passing half-life
+ # duration, i.e. after 4 times the half-life, a data sample’s relevance is
+ # reduced to 6% of its original relevance. The initial relevance of a data
+ # sample is given by 1 – 0.5 ^ (collect-interval / half-life).
+ # See http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ moving-average-half-life = 12s
+ }
+}
+
+# Cluster metrics extension serializers and routers.
+akka.actor {
+ # Protobuf serializer for remote cluster metrics messages.
+ serializers {
+ akka-cluster-metrics = "akka.cluster.metrics.protobuf.MessageSerializer"
+ }
+ # Interface binding for remote cluster metrics messages.
+ serialization-bindings {
+ "akka.cluster.metrics.ClusterMetricsMessage" = akka-cluster-metrics
+ }
+ # Provide routing of messages based on cluster metrics.
+ router.type-mapping {
+ cluster-metrics-adaptive-pool = "akka.cluster.metrics.AdaptiveLoadBalancingPool"
+ cluster-metrics-adaptive-group = "akka.cluster.metrics.AdaptiveLoadBalancingGroup"
+ }
+}
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala
new file mode 100644
index 0000000000..e0ae7bff66
--- /dev/null
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala
@@ -0,0 +1,257 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+package akka.cluster.metrics
+
+import akka.actor.Actor
+import akka.actor.ActorLogging
+import akka.actor.Props
+import akka.actor.Address
+import akka.cluster.InternalClusterAction
+import akka.cluster.ClusterEvent
+import akka.cluster.Member
+import akka.cluster.Cluster
+import scala.collection.immutable
+import akka.cluster.MemberStatus
+import scala.concurrent.forkjoin.ThreadLocalRandom
+import akka.actor.Terminated
+
+/**
+ * Runtime collection management commands.
+ */
+sealed abstract class CollectionControlMessage extends Serializable
+
+/**
+ * Command for [[ClusterMetricsSupervisor]] to start metrics collection.
+ */
+@SerialVersionUID(1L)
+case object CollectionStartMessage extends CollectionControlMessage {
+ /** Java API */
+ def getInstance = CollectionStartMessage
+}
+
+/**
+ * Command for [[ClusterMetricsSupervisor]] to stop metrics collection.
+ */
+@SerialVersionUID(1L)
+case object CollectionStopMessage extends CollectionControlMessage {
+ /** Java API */
+ def getInstance = CollectionStopMessage
+}
+
+/**
+ * INTERNAL API.
+ *
+ * Actor providing customizable metrics collection supervision.
+ */
+private[metrics] class ClusterMetricsSupervisor extends Actor with ActorLogging {
+ import ClusterMetricsExtension._
+ val metrics = ClusterMetricsExtension(context.system)
+ import metrics.settings._
+ import context._
+
+ override val supervisorStrategy = metrics.strategy
+
+ var collectorInstance = 0
+
+ def collectorName = s"collector-${collectorInstance}"
+
+ override def preStart() = {
+ if (CollectorEnabled) {
+ self ! CollectionStartMessage
+ } else {
+ log.warning(s"Metrics collection is disabled in configuration. Use subtypes of ${classOf[CollectionControlMessage].getName} to manage collection at runtime.")
+ }
+ }
+
+ override def receive = {
+ case CollectionStartMessage ⇒
+ children.foreach(stop)
+ collectorInstance += 1
+ actorOf(Props(classOf[ClusterMetricsCollector]), collectorName)
+ log.debug(s"Collection started.")
+ case CollectionStopMessage ⇒
+ children.foreach(stop)
+ log.debug(s"Collection stopped.")
+ }
+
+}
+
+/**
+ * Local cluster metrics extension events.
+ *
+ * Published to local event bus subscribers by [[ClusterMetricsCollector]].
+ */
+trait ClusterMetricsEvent
+
+/**
+ * Current snapshot of cluster node metrics.
+ */
+final case class ClusterMetricsChanged(nodeMetrics: Set[NodeMetrics]) extends ClusterMetricsEvent {
+ /** Java API */
+ def getNodeMetrics: java.lang.Iterable[NodeMetrics] =
+ scala.collection.JavaConverters.asJavaIterableConverter(nodeMetrics).asJava
+}
+
+/**
+ * INTERNAL API.
+ *
+ * Remote cluster metrics extension messages.
+ *
+ * Published to cluster members with metrics extension.
+ */
+private[metrics] trait ClusterMetricsMessage extends Serializable
+
+/**
+ * INTERNAL API.
+ *
+ * Envelope adding a sender address to the cluster metrics gossip.
+ */
+@SerialVersionUID(1L)
+private[metrics] final case class MetricsGossipEnvelope(from: Address, gossip: MetricsGossip, reply: Boolean) extends ClusterMetricsMessage
+
+/**
+ * INTERNAL API.
+ *
+ * Actor responsible for periodic data sampling in the node and publication to the cluster.
+ */
+private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging {
+ import InternalClusterAction._
+ // TODO collapse to ClusterEvent._ after akka-cluster metrics is gone
+ import ClusterEvent.MemberEvent
+ import ClusterEvent.MemberUp
+ import ClusterEvent.MemberRemoved
+ import ClusterEvent.MemberExited
+ import ClusterEvent.ReachabilityEvent
+ import ClusterEvent.ReachableMember
+ import ClusterEvent.UnreachableMember
+ import ClusterEvent.CurrentClusterState
+ import Member.addressOrdering
+ import context.dispatcher
+ val cluster = Cluster(context.system)
+ import cluster.{ selfAddress, scheduler, settings }
+ import cluster.InfoLogger._
+ val metrics = ClusterMetricsExtension(context.system)
+ import metrics.settings._
+
+ /**
+ * The node ring gossipped that contains only members that are Up.
+ */
+ var nodes: immutable.SortedSet[Address] = immutable.SortedSet.empty
+
+ /**
+ * The latest metric values with their statistical data.
+ */
+ var latestGossip: MetricsGossip = MetricsGossip.empty
+
+ /**
+ * The metrics collector that samples data on the node.
+ */
+ val collector: MetricsCollector = MetricsCollector(context.system)
+
+ /**
+ * Start periodic gossip to random nodes in cluster
+ */
+ val gossipTask = scheduler.schedule(PeriodicTasksInitialDelay max CollectorGossipInterval,
+ CollectorGossipInterval, self, GossipTick)
+
+ /**
+ * Start periodic metrics collection
+ */
+ val sampleTask = scheduler.schedule(PeriodicTasksInitialDelay max CollectorSampleInterval,
+ CollectorSampleInterval, self, MetricsTick)
+
+ override def preStart(): Unit = {
+ cluster.subscribe(self, classOf[MemberEvent], classOf[ReachabilityEvent])
+ logInfo("Metrics collection has started successfully")
+ }
+
+ def receive = {
+ case GossipTick ⇒ gossip()
+ case MetricsTick ⇒ sample()
+ case msg: MetricsGossipEnvelope ⇒ receiveGossip(msg)
+ case state: CurrentClusterState ⇒ receiveState(state)
+ case MemberUp(m) ⇒ addMember(m)
+ case MemberRemoved(m, _) ⇒ removeMember(m)
+ case MemberExited(m) ⇒ removeMember(m)
+ case UnreachableMember(m) ⇒ removeMember(m)
+ case ReachableMember(m) ⇒ if (m.status == MemberStatus.Up) addMember(m)
+ case _: MemberEvent ⇒ // not interested in other types of MemberEvent
+
+ }
+
+ override def postStop: Unit = {
+ cluster unsubscribe self
+ gossipTask.cancel()
+ sampleTask.cancel()
+ collector.close()
+ }
+
+ /**
+ * Adds a member to the node ring.
+ */
+ def addMember(member: Member): Unit = nodes += member.address
+
+ /**
+ * Removes a member from the member node ring.
+ */
+ def removeMember(member: Member): Unit = {
+ nodes -= member.address
+ latestGossip = latestGossip remove member.address
+ publish()
+ }
+
+ /**
+ * Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus.Up]].
+ */
+ def receiveState(state: CurrentClusterState): Unit =
+ nodes = (state.members -- state.unreachable) collect { case m if m.status == MemberStatus.Up ⇒ m.address }
+
+ /**
+ * Samples the latest metrics for the node, updates metrics statistics in
+ * [[MetricsGossip]], and publishes the change to the event bus.
+ *
+ * @see [[MetricsCollector]]
+ */
+ def sample(): Unit = {
+ latestGossip :+= collector.sample()
+ publish()
+ }
+
+ /**
+ * Receives changes from peer nodes, merges remote with local gossip nodes, then publishes
+ * changes to the event stream for load balancing router consumption, and gossip back.
+ */
+ def receiveGossip(envelope: MetricsGossipEnvelope): Unit = {
+ // remote node might not have same view of member nodes, this side should only care
+ // about nodes that are known here, otherwise removed nodes can come back
+ val otherGossip = envelope.gossip.filter(nodes)
+ latestGossip = latestGossip merge otherGossip
+ // changes will be published in the period collect task
+ if (!envelope.reply)
+ replyGossipTo(envelope.from)
+ }
+
+ /**
+ * Gossip to peer nodes.
+ */
+ def gossip(): Unit = selectRandomNode((nodes - selfAddress).toVector) foreach gossipTo
+
+ def gossipTo(address: Address): Unit =
+ sendGossip(address, MetricsGossipEnvelope(selfAddress, latestGossip, reply = false))
+
+ def replyGossipTo(address: Address): Unit =
+ sendGossip(address, MetricsGossipEnvelope(selfAddress, latestGossip, reply = true))
+
+ def sendGossip(address: Address, envelope: MetricsGossipEnvelope): Unit =
+ context.actorSelection(self.path.toStringWithAddress(address)) ! envelope
+
+ def selectRandomNode(addresses: immutable.IndexedSeq[Address]): Option[Address] =
+ if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current nextInt addresses.size))
+
+ /**
+ * Publishes to the event stream.
+ */
+ def publish(): Unit = context.system.eventStream publish ClusterMetricsChanged(latestGossip.nodes)
+
+}
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala
new file mode 100644
index 0000000000..80be044ec7
--- /dev/null
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala
@@ -0,0 +1,87 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+package akka.cluster.metrics
+
+import akka.actor.ExtendedActorSystem
+import akka.actor.Extension
+import akka.actor.SupervisorStrategy
+import akka.event.LoggingAdapter
+import akka.event.Logging
+import com.typesafe.config.Config
+import scala.collection.immutable
+import akka.actor.Props
+import akka.actor.Deploy
+import akka.actor.ExtensionId
+import akka.actor.ExtensionIdProvider
+import akka.actor.ActorSystem
+import akka.actor.ActorRef
+
+/**
+ * Cluster metrics extension.
+ *
+ * Cluster metrics is primarily for load-balancing of nodes. It controls metrics sampling
+ * at a regular frequency, prepares highly variable data for further analysis by other entities,
+ * and publishes the latest cluster metrics data around the node ring and local eventStream
+ * to assist in determining the need to redirect traffic to the least-loaded nodes.
+ *
+ * Metrics sampling is delegated to the [[MetricsCollector]].
+ *
+ * Smoothing of the data for each monitored process is delegated to the
+ * [[EWMA]] for exponential weighted moving average.
+ */
+class ClusterMetricsExtension(system: ExtendedActorSystem) extends Extension {
+
+ /**
+ * Metrics extension configuration.
+ */
+ val settings = ClusterMetricsSettings(system.settings.config)
+ import settings._
+
+ /**
+ * INTERNAL API
+ *
+ * Supervision strategy.
+ */
+ private[metrics] val strategy = system.dynamicAccess.createInstanceFor[SupervisorStrategy](
+ SupervisorStrategyProvider, immutable.Seq(classOf[Config] -> SupervisorStrategyConfiguration))
+ .getOrElse {
+ val log: LoggingAdapter = Logging(system, getClass.getName)
+ log.error(s"Configured strategy provider ${SupervisorStrategyProvider} failed to load, using default ${classOf[ClusterMetricsStrategy].getName}.")
+ new ClusterMetricsStrategy(SupervisorStrategyConfiguration)
+ }
+
+ /**
+ * Supervisor actor.
+ * Accepts subtypes of [[CollectionControlMessage]]s to manage metrics collection at runtime.
+ */
+ val supervisor = system.systemActorOf(
+ Props(classOf[ClusterMetricsSupervisor]).withDispatcher(MetricsDispatcher).withDeploy(Deploy.local),
+ SupervisorName)
+
+ /**
+ * Subscribe user metrics listener actor unto [[ClusterMetricsEvent]]
+ * events published by extension on the system event bus.
+ */
+ def subscribe(metricsListener: ActorRef): Unit = {
+ system.eventStream.subscribe(metricsListener, classOf[ClusterMetricsEvent])
+ }
+
+ /**
+ * Unsubscribe user metrics listener actor from [[ClusterMetricsEvent]]
+ * events published by extension on the system event bus.
+ */
+ def unsubscribe(metricsListenter: ActorRef): Unit = {
+ system.eventStream.unsubscribe(metricsListenter, classOf[ClusterMetricsEvent])
+ }
+
+}
+
+/**
+ * Cluster metrics extension provider.
+ */
+object ClusterMetricsExtension extends ExtensionId[ClusterMetricsExtension] with ExtensionIdProvider {
+ override def lookup = ClusterMetricsExtension
+ override def get(system: ActorSystem): ClusterMetricsExtension = super.get(system)
+ override def createExtension(system: ExtendedActorSystem): ClusterMetricsExtension = new ClusterMetricsExtension(system)
+}
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala
new file mode 100644
index 0000000000..6b4dbfa3f0
--- /dev/null
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala
@@ -0,0 +1,525 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+package akka.cluster.metrics
+
+import java.util.Arrays
+import java.util.concurrent.atomic.AtomicReference
+import scala.annotation.tailrec
+import scala.collection.immutable
+import scala.concurrent.forkjoin.ThreadLocalRandom
+import com.typesafe.config.Config
+import akka.actor.Actor
+import akka.actor.ActorSystem
+import akka.actor.Address
+import akka.actor.DynamicAccess
+import akka.actor.NoSerializationVerificationNeeded
+import akka.actor.Props
+import akka.actor.SupervisorStrategy
+import akka.cluster.Cluster
+import akka.cluster.ClusterEvent.CurrentClusterState
+import akka.dispatch.Dispatchers
+import akka.japi.Util.immutableSeq
+import akka.routing._
+
+/**
+ * Load balancing of messages to cluster nodes based on cluster metric data.
+ *
+ * It uses random selection of routees based on probabilities derived from
+ * the remaining capacity of corresponding node.
+ *
+ * @param system the actor system hosting this router
+ *
+ * @param metricsSelector decides what probability to use for selecting a routee, based
+ * on remaining capacity as indicated by the node metrics
+ */
+final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem, metricsSelector: MetricsSelector = MixMetricsSelector)
+ extends RoutingLogic with NoSerializationVerificationNeeded {
+
+ private val cluster = Cluster(system)
+
+ // The current weighted routees, if any. Weights are produced by the metricsSelector
+ // via the metricsListener Actor. It's only updated by the actor, but accessed from
+ // the threads of the sender()s.
+ private val weightedRouteesRef =
+ new AtomicReference[(immutable.IndexedSeq[Routee], Set[NodeMetrics], Option[WeightedRoutees])](
+ (Vector.empty, Set.empty, None))
+
+ @tailrec final def metricsChanged(event: ClusterMetricsChanged): Unit = {
+ val oldValue = weightedRouteesRef.get
+ val (routees, _, _) = oldValue
+ val weightedRoutees = Some(new WeightedRoutees(routees, cluster.selfAddress,
+ metricsSelector.weights(event.nodeMetrics)))
+ // retry when CAS failure
+ if (!weightedRouteesRef.compareAndSet(oldValue, (routees, event.nodeMetrics, weightedRoutees)))
+ metricsChanged(event)
+ }
+
+ override def select(message: Any, routees: immutable.IndexedSeq[Routee]): Routee =
+ if (routees.isEmpty) NoRoutee
+ else {
+
+ def updateWeightedRoutees(): Option[WeightedRoutees] = {
+ val oldValue = weightedRouteesRef.get
+ val (oldRoutees, oldMetrics, oldWeightedRoutees) = oldValue
+
+ if (routees ne oldRoutees) {
+ val weightedRoutees = Some(new WeightedRoutees(routees, cluster.selfAddress,
+ metricsSelector.weights(oldMetrics)))
+ // ignore, don't update, in case of CAS failure
+ weightedRouteesRef.compareAndSet(oldValue, (routees, oldMetrics, weightedRoutees))
+ weightedRoutees
+ } else oldWeightedRoutees
+ }
+
+ updateWeightedRoutees() match {
+ case Some(weighted) ⇒
+ if (weighted.isEmpty) NoRoutee
+ else weighted(ThreadLocalRandom.current.nextInt(weighted.total) + 1)
+ case None ⇒
+ routees(ThreadLocalRandom.current.nextInt(routees.size))
+ }
+
+ }
+}
+
+/**
+ * A router pool that performs load balancing of messages to cluster nodes based on
+ * cluster metric data.
+ *
+ * It uses random selection of routees based on probabilities derived from
+ * the remaining capacity of corresponding node.
+ *
+ * The configuration parameter trumps the constructor arguments. This means that
+ * if you provide `nrOfInstances` during instantiation they will be ignored if
+ * the router is defined in the configuration file for the actor being used.
+ *
+ * Supervision Setup
+ *
+ * Any routees that are created by a router will be created as the router's children.
+ * The router is therefore also the children's supervisor.
+ *
+ * The supervision strategy of the router actor can be configured with
+ * [[#withSupervisorStrategy]]. If no strategy is provided, routers default to
+ * a strategy of “always escalate”. This means that errors are passed up to the
+ * router's supervisor for handling.
+ *
+ * The router's supervisor will treat the error as an error with the router itself.
+ * Therefore a directive to stop or restart will cause the router itself to stop or
+ * restart. The router, in turn, will cause its children to stop and restart.
+ *
+ * @param metricsSelector decides what probability to use for selecting a routee, based
+ * on remaining capacity as indicated by the node metrics
+ *
+ * @param nrOfInstances initial number of routees in the pool
+ *
+ * @param supervisorStrategy strategy for supervising the routees, see 'Supervision Setup'
+ *
+ * @param routerDispatcher dispatcher to use for the router head actor, which handles
+ * supervision, death watch and router management messages
+ */
+@SerialVersionUID(1L)
+final case class AdaptiveLoadBalancingPool(
+ metricsSelector: MetricsSelector = MixMetricsSelector,
+ val nrOfInstances: Int = 0,
+ override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
+ override val usePoolDispatcher: Boolean = false)
+ extends Pool {
+
+ def this(config: Config, dynamicAccess: DynamicAccess) =
+ this(nrOfInstances = config.getInt("nr-of-instances"),
+ metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess),
+ usePoolDispatcher = config.hasPath("pool-dispatcher"))
+
+ /**
+ * Java API
+ * @param metricsSelector decides what probability to use for selecting a routee, based
+ * on remaining capacity as indicated by the node metrics
+ * @param nr initial number of routees in the pool
+ */
+ def this(metricsSelector: MetricsSelector, nr: Int) = this(nrOfInstances = nr)
+
+ override def resizer: Option[Resizer] = None
+
+ override def nrOfInstances(sys: ActorSystem) = this.nrOfInstances
+
+ override def createRouter(system: ActorSystem): Router =
+ new Router(AdaptiveLoadBalancingRoutingLogic(system, metricsSelector))
+
+ override def routingLogicController(routingLogic: RoutingLogic): Option[Props] =
+ Some(Props(classOf[AdaptiveLoadBalancingMetricsListener],
+ routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic]))
+
+ /**
+ * Setting the supervisor strategy to be used for the “head” Router actor.
+ */
+ def withSupervisorStrategy(strategy: SupervisorStrategy): AdaptiveLoadBalancingPool = copy(supervisorStrategy = strategy)
+
+ /**
+ * Setting the dispatcher to be used for the router head actor, which handles
+ * supervision, death watch and router management messages.
+ */
+ def withDispatcher(dispatcherId: String): AdaptiveLoadBalancingPool = copy(routerDispatcher = dispatcherId)
+
+ /**
+ * Uses the supervisor strategy of the given Routerconfig
+ * if this RouterConfig doesn't have one
+ */
+ override def withFallback(other: RouterConfig): RouterConfig =
+ if (this.supervisorStrategy ne Pool.defaultSupervisorStrategy) this
+ else other match {
+ case _: FromConfig | _: NoRouter ⇒ this // NoRouter is the default, hence “neutral”
+ case otherRouter: AdaptiveLoadBalancingPool ⇒
+ if (otherRouter.supervisorStrategy eq Pool.defaultSupervisorStrategy) this
+ else this.withSupervisorStrategy(otherRouter.supervisorStrategy)
+ case _ ⇒ throw new IllegalArgumentException("Expected AdaptiveLoadBalancingPool, got [%s]".format(other))
+ }
+
+}
+
+/**
+ * A router group that performs load balancing of messages to cluster nodes based on
+ * cluster metric data.
+ *
+ * It uses random selection of routees based on probabilities derived from
+ * the remaining capacity of corresponding node.
+ *
+ * The configuration parameter trumps the constructor arguments. This means that
+ * if you provide `paths` during instantiation they will be ignored if
+ * the router is defined in the configuration file for the actor being used.
+ *
+ * @param metricsSelector decides what probability to use for selecting a routee, based
+ * on remaining capacity as indicated by the node metrics
+ *
+ * @param paths string representation of the actor paths of the routees, messages are
+ * sent with [[akka.actor.ActorSelection]] to these paths
+ *
+ * @param routerDispatcher dispatcher to use for the router head actor, which handles
+ * router management messages
+ */
+@SerialVersionUID(1L)
+final case class AdaptiveLoadBalancingGroup(
+ metricsSelector: MetricsSelector = MixMetricsSelector,
+ paths: immutable.Iterable[String] = Nil,
+ override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
+ extends Group {
+
+ def this(config: Config, dynamicAccess: DynamicAccess) =
+ this(metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess),
+ paths = immutableSeq(config.getStringList("routees.paths")))
+
+ /**
+ * Java API
+ * @param metricsSelector decides what probability to use for selecting a routee, based
+ * on remaining capacity as indicated by the node metrics
+ * @param routeesPaths string representation of the actor paths of the routees, messages are
+ * sent with [[akka.actor.ActorSelection]] to these paths
+ */
+ def this(metricsSelector: MetricsSelector,
+ routeesPaths: java.lang.Iterable[String]) = this(paths = immutableSeq(routeesPaths))
+
+ override def createRouter(system: ActorSystem): Router =
+ new Router(AdaptiveLoadBalancingRoutingLogic(system, metricsSelector))
+
+ override def routingLogicController(routingLogic: RoutingLogic): Option[Props] =
+ Some(Props(classOf[AdaptiveLoadBalancingMetricsListener],
+ routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic]))
+
+ /**
+ * Setting the dispatcher to be used for the router head actor, which handles
+ * router management messages
+ */
+ def withDispatcher(dispatcherId: String): AdaptiveLoadBalancingGroup = copy(routerDispatcher = dispatcherId)
+
+}
+
+/**
+ * MetricsSelector that uses the heap metrics.
+ * Low heap capacity => small weight.
+ */
+@SerialVersionUID(1L)
+case object HeapMetricsSelector extends CapacityMetricsSelector {
+ import akka.cluster.metrics.StandardMetrics.HeapMemory
+ /**
+ * Java API: get the singleton instance
+ */
+ def getInstance = this
+
+ override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = {
+ nodeMetrics.collect {
+ case HeapMemory(address, _, used, committed, max) ⇒
+ val capacity = max match {
+ case None ⇒ (committed - used).toDouble / committed
+ case Some(m) ⇒ (m - used).toDouble / m
+ }
+ (address, capacity)
+ }.toMap
+ }
+}
+
+/**
+ * MetricsSelector that uses the combined CPU time metrics and stolen CPU time metrics.
+ * In modern Linux kernels: CpuCombined + CpuStolen + CpuIdle = 1.0 or 100%.
+ * Combined CPU is sum of User + Sys + Nice + Wait times, as percentage.
+ * Stolen CPU is the amount of CPU taken away from this virtual machine by the hypervisor, as percentage.
+ *
+ * Low CPU capacity => small node weight.
+ */
+@SerialVersionUID(1L)
+case object CpuMetricsSelector extends CapacityMetricsSelector {
+ import akka.cluster.metrics.StandardMetrics.Cpu
+ /**
+ * Java API: get the singleton instance
+ */
+ def getInstance = this
+
+ // Notes from reading around:
+ // In modern Linux kernels: CpuCombined + CpuStolen + CpuIdle = 1.0 or 100%. More convoluted for other o/s.
+ // We could use CpuIdle as the only capacity measure: http://axibase.com/news/ec2-monitoring-the-case-of-stolen-cpu/
+ // But not all "idle time"s are created equal: https://docs.newrelic.com/docs/servers/new-relic-servers-linux/maintenance/servers-linux-faq
+ // Example: assume that combined+stolen=70%, idle=30%. Then 50/20/30 system will be more responsive then 20/50/30 system (combined/stolen/idle ratio).
+ // Current approach: "The more stolen resources there are, the less active the virtual machine needs to be to generate a high load rating."
+
+ // TODO read factor from reference.conf
+ /** How much extra weight to give to the stolen time. */
+ val factor = 0.3
+ require(0.0 <= factor, s"factor must be non negative: ${factor}")
+
+ override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = {
+ nodeMetrics.collect {
+ case Cpu(address, _, _, Some(cpuCombined), Some(cpuStolen), _) ⇒
+ // Arbitrary load rating function which skews in favor of stolen time.
+ val load = cpuCombined + cpuStolen * (1.0 + factor)
+ val capacity = if (load >= 1.0) 0.0 else 1.0 - load
+ (address, capacity)
+ }.toMap
+ }
+}
+
+/**
+ * MetricsSelector that uses the system load average metrics.
+ * System load average is OS-specific average load on the CPUs in the system,
+ * for the past 1 minute. The system is possibly nearing a bottleneck if the
+ * system load average is nearing number of cpus/cores.
+ * Low load average capacity => small weight.
+ */
+@SerialVersionUID(1L)
+case object SystemLoadAverageMetricsSelector extends CapacityMetricsSelector {
+ import akka.cluster.metrics.StandardMetrics.Cpu
+ /**
+ * Java API: get the singleton instance
+ */
+ def getInstance = this
+
+ override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = {
+ nodeMetrics.collect {
+ case Cpu(address, _, Some(systemLoadAverage), _, _, processors) ⇒
+ val capacity = 1.0 - math.min(1.0, systemLoadAverage / processors)
+ (address, capacity)
+ }.toMap
+ }
+}
+
+/**
+ * Singleton instance of the default MixMetricsSelector, which uses [akka.cluster.routing.HeapMetricsSelector],
+ * [akka.cluster.routing.CpuMetricsSelector], and [akka.cluster.routing.SystemLoadAverageMetricsSelector]
+ */
+@SerialVersionUID(1L)
+object MixMetricsSelector extends MixMetricsSelectorBase(
+ Vector(HeapMetricsSelector, CpuMetricsSelector, SystemLoadAverageMetricsSelector)) {
+
+ /**
+ * Java API: get the default singleton instance
+ */
+ def getInstance = this
+}
+
+/**
+ * MetricsSelector that combines other selectors and aggregates their capacity
+ * values. By default it uses [akka.cluster.routing.HeapMetricsSelector],
+ * [akka.cluster.routing.CpuMetricsSelector], and [akka.cluster.routing.SystemLoadAverageMetricsSelector]
+ */
+@SerialVersionUID(1L)
+final case class MixMetricsSelector(
+ selectors: immutable.IndexedSeq[CapacityMetricsSelector])
+ extends MixMetricsSelectorBase(selectors)
+
+/**
+ * Base class for MetricsSelector that combines other selectors and aggregates their capacity.
+ */
+@SerialVersionUID(1L)
+abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMetricsSelector])
+ extends CapacityMetricsSelector {
+
+ /**
+ * Java API: construct a mix-selector from a sequence of selectors
+ */
+ def this(selectors: java.lang.Iterable[CapacityMetricsSelector]) = this(immutableSeq(selectors).toVector)
+
+ override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = {
+ val combined: immutable.IndexedSeq[(Address, Double)] = selectors.flatMap(_.capacity(nodeMetrics).toSeq)
+ // aggregated average of the capacities by address
+ combined.foldLeft(Map.empty[Address, (Double, Int)].withDefaultValue((0.0, 0))) {
+ case (acc, (address, capacity)) ⇒
+ val (sum, count) = acc(address)
+ acc + (address -> ((sum + capacity, count + 1)))
+ }.map {
+ case (addr, (sum, count)) ⇒ (addr -> sum / count)
+ }
+ }
+
+}
+
+object MetricsSelector {
+ def fromConfig(config: Config, dynamicAccess: DynamicAccess) =
+ config.getString("metrics-selector") match {
+ case "mix" ⇒ MixMetricsSelector
+ case "heap" ⇒ HeapMetricsSelector
+ case "cpu" ⇒ CpuMetricsSelector
+ case "load" ⇒ SystemLoadAverageMetricsSelector
+ case fqn ⇒
+ val args = List(classOf[Config] -> config)
+ dynamicAccess.createInstanceFor[MetricsSelector](fqn, args).recover({
+ case exception ⇒ throw new IllegalArgumentException(
+ (s"Cannot instantiate metrics-selector [$fqn], " +
+ "make sure it extends [akka.cluster.routing.MetricsSelector] and " +
+ "has constructor with [com.typesafe.config.Config] parameter"), exception)
+ }).get
+ }
+}
+
+/**
+ * A MetricsSelector is responsible for producing weights from the node metrics.
+ */
+@SerialVersionUID(1L)
+trait MetricsSelector extends Serializable {
+ /**
+ * The weights per address, based on the the nodeMetrics.
+ */
+ def weights(nodeMetrics: Set[NodeMetrics]): Map[Address, Int]
+}
+
+/**
+ * A MetricsSelector producing weights from remaining capacity.
+ * The weights are typically proportional to the remaining capacity.
+ */
+@SerialVersionUID(1L)
+abstract class CapacityMetricsSelector extends MetricsSelector {
+
+ /**
+ * Remaining capacity for each node. The value is between
+ * 0.0 and 1.0, where 0.0 means no remaining capacity (full
+ * utilization) and 1.0 means full remaining capacity (zero
+ * utilization).
+ */
+ def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double]
+
+ /**
+ * Converts the capacity values to weights. The node with lowest
+ * capacity gets weight 1 (lowest usable capacity is 1%) and other
+ * nodes gets weights proportional to their capacity compared to
+ * the node with lowest capacity.
+ */
+ def weights(capacity: Map[Address, Double]): Map[Address, Int] = {
+ if (capacity.isEmpty) Map.empty[Address, Int]
+ else {
+ val (_, min) = capacity.minBy { case (_, c) ⇒ c }
+ // lowest usable capacity is 1% (>= 0.5% will be rounded to weight 1), also avoids div by zero
+ val divisor = math.max(0.01, min)
+ capacity map { case (addr, c) ⇒ (addr -> math.round((c) / divisor).toInt) }
+ }
+ }
+
+ /**
+ * The weights per address, based on the capacity produced by
+ * the nodeMetrics.
+ */
+ override def weights(nodeMetrics: Set[NodeMetrics]): Map[Address, Int] =
+ weights(capacity(nodeMetrics))
+
+}
+
+/**
+ * INTERNAL API
+ *
+ * Pick routee based on its weight. Higher weight, higher probability.
+ */
+private[metrics] class WeightedRoutees(routees: immutable.IndexedSeq[Routee], selfAddress: Address, weights: Map[Address, Int]) {
+
+ // fill an array of same size as the refs with accumulated weights,
+ // binarySearch is used to pick the right bucket from a requested value
+ // from 1 to the total sum of the used weights.
+ private val buckets: Array[Int] = {
+ def fullAddress(routee: Routee): Address = {
+ val a = routee match {
+ case ActorRefRoutee(ref) ⇒ ref.path.address
+ case ActorSelectionRoutee(sel) ⇒ sel.anchor.path.address
+ }
+ a match {
+ case Address(_, _, None, None) ⇒ selfAddress
+ case a ⇒ a
+ }
+ }
+ val buckets = Array.ofDim[Int](routees.size)
+ val meanWeight = if (weights.isEmpty) 1 else weights.values.sum / weights.size
+ val w = weights.withDefaultValue(meanWeight) // we don’t necessarily have metrics for all addresses
+ var i = 0
+ var sum = 0
+ routees foreach { r ⇒
+ sum += w(fullAddress(r))
+ buckets(i) = sum
+ i += 1
+ }
+ buckets
+ }
+
+ def isEmpty: Boolean = buckets.length == 0 || buckets(buckets.length - 1) == 0
+
+ def total: Int = {
+ require(!isEmpty, "WeightedRoutees must not be used when empty")
+ buckets(buckets.length - 1)
+ }
+
+ /**
+ * Pick the routee matching a value, from 1 to total.
+ */
+ def apply(value: Int): Routee = {
+ require(1 <= value && value <= total, "value must be between [1 - %s]" format total)
+ routees(idx(Arrays.binarySearch(buckets, value)))
+ }
+
+ /**
+ * Converts the result of Arrays.binarySearch into a index in the buckets array
+ * see documentation of Arrays.binarySearch for what it returns
+ */
+ private def idx(i: Int): Int = {
+ if (i >= 0) i // exact match
+ else {
+ val j = math.abs(i + 1)
+ if (j >= buckets.length) throw new IndexOutOfBoundsException(
+ "Requested index [%s] is > max index [%s]".format(i, buckets.length))
+ else j
+ }
+ }
+}
+
+/**
+ * INTERNAL API
+ * Subscribe to [[ClusterMetricsEvent]]s and update routing logic depending on the events.
+ */
+private[metrics] class AdaptiveLoadBalancingMetricsListener(routingLogic: AdaptiveLoadBalancingRoutingLogic)
+ extends Actor {
+
+ def extension = ClusterMetricsExtension(context.system)
+
+ override def preStart(): Unit = extension.subscribe(self)
+
+ override def postStop(): Unit = extension.unsubscribe(self)
+
+ def receive = {
+ case event: ClusterMetricsChanged ⇒ routingLogic.metricsChanged(event)
+ case _ ⇒ // ignore
+ }
+
+}
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsSettings.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsSettings.scala
new file mode 100644
index 0000000000..93d06660cc
--- /dev/null
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsSettings.scala
@@ -0,0 +1,49 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics
+
+import com.typesafe.config.Config
+import akka.dispatch.Dispatchers
+import scala.concurrent.duration.FiniteDuration
+import akka.util.Helpers.Requiring
+import akka.util.Helpers.ConfigOps
+import scala.concurrent.duration.Duration
+
+/**
+ * Metrics extension settings. Documented in: `src/main/resources/reference.conf`.
+ */
+case class ClusterMetricsSettings(config: Config) {
+
+ private val cc = config.getConfig("akka.cluster.metrics")
+
+ // Extension.
+ val MetricsDispatcher: String = cc.getString("dispatcher") match {
+ case "" ⇒ Dispatchers.DefaultDispatcherId
+ case id ⇒ id
+ }
+ val PeriodicTasksInitialDelay: FiniteDuration = cc.getMillisDuration("periodic-tasks-initial-delay")
+ val NativeLibraryExtractFolder: String = cc.getString("native-library-extract-folder")
+ val SerializerIdentifier: Int = cc.getInt("serializer-identifier")
+
+ // Supervisor.
+ val SupervisorName: String = cc.getString("supervisor.name")
+ val SupervisorStrategyProvider: String = cc.getString("supervisor.strategy.provider")
+ val SupervisorStrategyConfiguration: Config = cc.getConfig("supervisor.strategy.configuration")
+
+ // Collector.
+ val CollectorEnabled: Boolean = cc.getBoolean("collector.enabled")
+ val CollectorProvider: String = cc.getString("collector.provider")
+ val CollectorFallback: Boolean = cc.getBoolean("collector.fallback")
+ val CollectorSampleInterval: FiniteDuration = {
+ cc.getMillisDuration("collector.sample-interval")
+ } requiring (_ > Duration.Zero, "collector.sample-interval must be > 0")
+ val CollectorGossipInterval: FiniteDuration = {
+ cc.getMillisDuration("collector.gossip-interval")
+ } requiring (_ > Duration.Zero, "collector.gossip-interval must be > 0")
+ val CollectorMovingAverageHalfLife: FiniteDuration = {
+ cc.getMillisDuration("collector.moving-average-half-life")
+ } requiring (_ > Duration.Zero, "collector.moving-average-half-life must be > 0")
+
+}
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala
new file mode 100644
index 0000000000..d0e03695a9
--- /dev/null
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala
@@ -0,0 +1,37 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics
+
+import com.typesafe.config.Config
+import akka.actor.OneForOneStrategy
+import akka.util.Helpers.ConfigOps
+
+/**
+ * Default [[ClusterMetricsSupervisor]] strategy:
+ * A configurable [[OneForOneStrategy]] with restart-on-throwable decider.
+ */
+class ClusterMetricsStrategy(config: Config) extends OneForOneStrategy(
+ maxNrOfRetries = config.getInt("maxNrOfRetries"),
+ withinTimeRange = config.getMillisDuration("withinTimeRange"),
+ loggingEnabled = config.getBoolean("loggingEnabled"))(ClusterMetricsStrategy.metricsDecider)
+
+/**
+ * Provide custom metrics strategy resources.
+ */
+object ClusterMetricsStrategy {
+ import akka.actor._
+ import akka.actor.SupervisorStrategy._
+
+ /**
+ * [[SupervisorStrategy.Decider]] which allows to survive intermittent Sigar native method calls failures.
+ */
+ val metricsDecider: SupervisorStrategy.Decider = {
+ case _: ActorInitializationException ⇒ Stop
+ case _: ActorKilledException ⇒ Stop
+ case _: DeathPactException ⇒ Stop
+ case _: Throwable ⇒ Restart
+ }
+
+}
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala
new file mode 100644
index 0000000000..5670b223ef
--- /dev/null
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala
@@ -0,0 +1,68 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+package akka.cluster.metrics
+
+import scala.concurrent.duration.FiniteDuration
+
+/**
+ * The exponentially weighted moving average (EWMA) approach captures short-term
+ * movements in volatility for a conditional volatility forecasting model. By virtue
+ * of its alpha, or decay factor, this provides a statistical streaming data model
+ * that is exponentially biased towards newer entries.
+ *
+ * http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ *
+ * An EWMA only needs the most recent forecast value to be kept, as opposed to a standard
+ * moving average model.
+ *
+ * @param alpha decay factor, sets how quickly the exponential weighting decays for past data compared to new data,
+ * see http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ *
+ * @param value the current exponentially weighted moving average, e.g. Y(n - 1), or,
+ * the sampled value resulting from the previous smoothing iteration.
+ * This value is always used as the previous EWMA to calculate the new EWMA.
+ *
+ */
+@SerialVersionUID(1L)
+final case class EWMA(value: Double, alpha: Double) {
+
+ require(0.0 <= alpha && alpha <= 1.0, "alpha must be between 0.0 and 1.0")
+
+ /**
+ * Calculates the exponentially weighted moving average for a given monitored data set.
+ *
+ * @param xn the new data point
+ * @return a new [[akka.cluster.EWMA]] with the updated value
+ */
+ def :+(xn: Double): EWMA = {
+ val newValue = (alpha * xn) + (1 - alpha) * value
+ if (newValue == value) this // no change
+ else copy(value = newValue)
+ }
+
+}
+
+object EWMA {
+
+ /**
+ * math.log(2)
+ */
+ private val LogOf2 = 0.69315
+
+ /**
+ * Calculate the alpha (decay factor) used in [[akka.cluster.EWMA]]
+ * from specified half-life and interval between observations.
+ * Half-life is the interval over which the weights decrease by a factor of two.
+ * The relevance of each data sample is halved for every passing half-life duration,
+ * i.e. after 4 times the half-life, a data sample’s relevance is reduced to 6% of
+ * its original relevance. The initial relevance of a data sample is given by
+ * 1 – 0.5 ^ (collect-interval / half-life).
+ */
+ def alpha(halfLife: FiniteDuration, collectInterval: FiniteDuration): Double = {
+ val halfLifeMillis = halfLife.toMillis
+ require(halfLife.toMillis > 0, "halfLife must be > 0 s")
+ val decayRate = LogOf2 / halfLifeMillis
+ 1 - math.exp(-decayRate * collectInterval.toMillis)
+ }
+}
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala
new file mode 100644
index 0000000000..bbbf1756c1
--- /dev/null
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala
@@ -0,0 +1,377 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+package akka.cluster.metrics
+
+import akka.actor.Address
+import scala.util.Success
+import scala.util.Failure
+import scala.util.Try
+
+/**
+ * Metrics key/value.
+ *
+ * Equality of Metric is based on its name.
+ *
+ * @param name the metric name
+ * @param value the metric value, which must be a valid numerical value,
+ * a valid value is neither negative nor NaN/Infinite.
+ * @param average the data stream of the metric value, for trending over time. Metrics that are already
+ * averages (e.g. system load average) or finite (e.g. as number of processors), are not trended.
+ */
+@SerialVersionUID(1L)
+final case class Metric private[metrics] (name: String, value: Number, average: Option[EWMA])
+ extends MetricNumericConverter {
+
+ require(defined(value), s"Invalid Metric [$name] value [$value]")
+
+ /**
+ * Updates the data point, and if defined, updates the data stream (average).
+ * Returns the updated metric.
+ */
+ def :+(latest: Metric): Metric =
+ if (this sameAs latest) average match {
+ case Some(avg) ⇒ copy(value = latest.value, average = Some(avg :+ latest.value.doubleValue))
+ case None if latest.average.isDefined ⇒ copy(value = latest.value, average = latest.average)
+ case _ ⇒ copy(value = latest.value)
+ }
+ else this
+
+ /**
+ * The numerical value of the average, if defined, otherwise the latest value
+ */
+ def smoothValue: Double = average match {
+ case Some(avg) ⇒ avg.value
+ case None ⇒ value.doubleValue
+ }
+
+ /**
+ * @return true if this value is smoothed
+ */
+ def isSmooth: Boolean = average.isDefined
+
+ /**
+ * Returns true if that is tracking the same metric as this.
+ */
+ def sameAs(that: Metric): Boolean = name == that.name
+
+ override def hashCode = name.##
+ override def equals(obj: Any) = obj match {
+ case other: Metric ⇒ sameAs(other)
+ case _ ⇒ false
+ }
+
+}
+
+/**
+ * Factory for creating valid Metric instances.
+ */
+object Metric extends MetricNumericConverter {
+
+ /**
+ * Creates a new Metric instance if the value is valid, otherwise None
+ * is returned. Invalid numeric values are negative and NaN/Infinite.
+ */
+ def create(name: String, value: Number, decayFactor: Option[Double]): Option[Metric] =
+ if (defined(value)) Some(new Metric(name, value, createEWMA(value.doubleValue, decayFactor)))
+ else None
+
+ /**
+ * Creates a new Metric instance if the Try is successful and the value is valid,
+ * otherwise None is returned. Invalid numeric values are negative and NaN/Infinite.
+ */
+ def create(name: String, value: Try[Number], decayFactor: Option[Double]): Option[Metric] = value match {
+ case Success(v) ⇒ create(name, v, decayFactor)
+ case Failure(_) ⇒ None
+ }
+
+ def createEWMA(value: Double, decayFactor: Option[Double]): Option[EWMA] = decayFactor match {
+ case Some(alpha) ⇒ Some(EWMA(value, alpha))
+ case None ⇒ None
+ }
+
+}
+
+/**
+ * Definitions of the built-in standard metrics.
+ *
+ * The following extractors and data structures makes it easy to consume the
+ * [[NodeMetrics]] in for example load balancers.
+ */
+object StandardMetrics {
+
+ // Constants for the heap related Metric names
+ final val HeapMemoryUsed = "heap-memory-used"
+ final val HeapMemoryCommitted = "heap-memory-committed"
+ final val HeapMemoryMax = "heap-memory-max"
+
+ // Constants for the cpu related Metric names
+ final val SystemLoadAverage = "system-load-average"
+ final val Processors = "processors"
+ // In latest Linux kernels: CpuCombined + CpuStolen + CpuIdle = 1.0 or 100%.
+ /** Sum of User + Sys + Nice + Wait. See [[org.hyperic.sigar.CpuPerc]] */
+ final val CpuCombined = "cpu-combined"
+ /** The amount of CPU 'stolen' from this virtual machine by the hypervisor for other tasks (such as running another virtual machine). */
+ final val CpuStolen = "cpu-stolen"
+ /** Amount of CPU time left after combined and stolen are removed. */
+ final val CpuIdle = "cpu-idle"
+
+ object HeapMemory {
+
+ /**
+ * Given a NodeMetrics it returns the HeapMemory data if the nodeMetrics contains
+ * necessary heap metrics.
+ * @return if possible a tuple matching the HeapMemory constructor parameters
+ */
+ def unapply(nodeMetrics: NodeMetrics): Option[(Address, Long, Long, Long, Option[Long])] = {
+ for {
+ used ← nodeMetrics.metric(HeapMemoryUsed)
+ committed ← nodeMetrics.metric(HeapMemoryCommitted)
+ } yield (nodeMetrics.address, nodeMetrics.timestamp,
+ used.smoothValue.longValue, committed.smoothValue.longValue,
+ nodeMetrics.metric(HeapMemoryMax).map(_.smoothValue.longValue))
+ }
+
+ }
+
+ /**
+ * Java API to extract HeapMemory data from nodeMetrics, if the nodeMetrics
+ * contains necessary heap metrics, otherwise it returns null.
+ */
+ def extractHeapMemory(nodeMetrics: NodeMetrics): HeapMemory = nodeMetrics match {
+ case HeapMemory(address, timestamp, used, committed, max) ⇒
+ // note that above extractor returns tuple
+ HeapMemory(address, timestamp, used, committed, max)
+ case _ ⇒ null
+ }
+
+ /**
+ * The amount of used and committed memory will always be <= max if max is defined.
+ * A memory allocation may fail if it attempts to increase the used memory such that used > committed
+ * even if used <= max is true (e.g. when the system virtual memory is low).
+ *
+ * @param address [[akka.actor.Address]] of the node the metrics are gathered at
+ * @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC
+ * @param used the current sum of heap memory used from all heap memory pools (in bytes)
+ * @param committed the current sum of heap memory guaranteed to be available to the JVM
+ * from all heap memory pools (in bytes). Committed will always be greater than or equal to used.
+ * @param max the maximum amount of memory (in bytes) that can be used for JVM memory management.
+ * Can be undefined on some OS.
+ */
+ @SerialVersionUID(1L)
+ final case class HeapMemory(address: Address, timestamp: Long, used: Long, committed: Long, max: Option[Long]) {
+ require(committed > 0L, "committed heap expected to be > 0 bytes")
+ require(max.isEmpty || max.get > 0L, "max heap expected to be > 0 bytes")
+ }
+
+ object Cpu {
+
+ /**
+ * Given a NodeMetrics it returns the Cpu data if the nodeMetrics contains
+ * necessary cpu metrics.
+ * @return if possible a tuple matching the Cpu constructor parameters
+ */
+ def unapply(nodeMetrics: NodeMetrics): Option[(Address, Long, Option[Double], Option[Double], Option[Double], Int)] = {
+ for {
+ processors ← nodeMetrics.metric(Processors)
+ } yield (nodeMetrics.address, nodeMetrics.timestamp,
+ nodeMetrics.metric(SystemLoadAverage).map(_.smoothValue),
+ nodeMetrics.metric(CpuCombined).map(_.smoothValue),
+ nodeMetrics.metric(CpuStolen).map(_.smoothValue),
+ processors.value.intValue)
+ }
+
+ }
+
+ /**
+ * Java API to extract Cpu data from nodeMetrics, if the nodeMetrics
+ * contains necessary cpu metrics, otherwise it returns null.
+ */
+ def extractCpu(nodeMetrics: NodeMetrics): Cpu = nodeMetrics match {
+ case Cpu(address, timestamp, systemLoadAverage, cpuCombined, cpuStolen, processors) ⇒
+ // note that above extractor returns tuple
+ Cpu(address, timestamp, systemLoadAverage, cpuCombined, cpuStolen, processors)
+ case _ ⇒ null
+ }
+
+ /**
+ * @param address [[akka.actor.Address]] of the node the metrics are gathered at
+ * @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC
+ * @param systemLoadAverage OS-specific average load on the CPUs in the system, for the past 1 minute,
+ * The system is possibly nearing a bottleneck if the system load average is nearing number of cpus/cores.
+ * @param cpuCombined combined CPU sum of User + Sys + Nice + Wait, in percentage ([0.0 - 1.0]. This
+ * metric can describe the amount of time the CPU spent executing code during n-interval and how
+ * much more it could theoretically.
+ * @param cpuStolen stolen CPU time, in percentage ([0.0 - 1.0].
+ * @param processors the number of available processors
+ */
+ @SerialVersionUID(1L)
+ final case class Cpu(
+ address: Address,
+ timestamp: Long,
+ systemLoadAverage: Option[Double],
+ cpuCombined: Option[Double],
+ cpuStolen: Option[Double],
+ processors: Int) {
+
+ cpuCombined match {
+ case Some(x) ⇒ require(0.0 <= x && x <= 1.0, s"cpuCombined must be between [0.0 - 1.0], was [$x]")
+ case None ⇒
+ }
+
+ cpuStolen match {
+ case Some(x) ⇒ require(0.0 <= x && x <= 1.0, s"cpuStolen must be between [0.0 - 1.0], was [$x]")
+ case None ⇒
+ }
+
+ }
+
+}
+
+/**
+ * INTERNAL API
+ *
+ * Encapsulates evaluation of validity of metric values, conversion of an actual metric value to
+ * a [[akka.cluster.Metric]] for consumption by subscribed cluster entities.
+ */
+private[metrics] trait MetricNumericConverter {
+
+ /**
+ * An defined value is neither negative nor NaN/Infinite:
+ * - JMX system load average and max heap can be 'undefined' for certain OS, in which case a -1 is returned
+ * - SIGAR combined CPU can occasionally return a NaN or Infinite (known bug)
+ */
+ def defined(value: Number): Boolean = convertNumber(value) match {
+ case Left(a) ⇒ a >= 0
+ case Right(b) ⇒ !(b < 0.0 || b.isNaN || b.isInfinite)
+ }
+
+ /**
+ * May involve rounding or truncation.
+ */
+ def convertNumber(from: Any): Either[Long, Double] = from match {
+ case n: Int ⇒ Left(n)
+ case n: Long ⇒ Left(n)
+ case n: Double ⇒ Right(n)
+ case n: Float ⇒ Right(n)
+ case n: BigInt ⇒ Left(n.longValue)
+ case n: BigDecimal ⇒ Right(n.doubleValue)
+ case x ⇒ throw new IllegalArgumentException(s"Not a number [$x]")
+ }
+
+}
+
+/**
+ * The snapshot of current sampled health metrics for any monitored process.
+ * Collected and gossipped at regular intervals for dynamic cluster management strategies.
+ *
+ * Equality of NodeMetrics is based on its address.
+ *
+ * @param address [[akka.actor.Address]] of the node the metrics are gathered at
+ * @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC
+ * @param metrics the set of sampled [[akka.actor.Metric]]
+ */
+@SerialVersionUID(1L)
+final case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Metric] = Set.empty[Metric]) {
+
+ /**
+ * Returns the most recent data.
+ */
+ def merge(that: NodeMetrics): NodeMetrics = {
+ require(address == that.address, s"merge only allowed for same address, [$address] != [$that.address]")
+ if (timestamp >= that.timestamp) this // that is older
+ else {
+ // equality is based on the name of the Metric and Set doesn't replace existing element
+ copy(metrics = that.metrics ++ metrics, timestamp = that.timestamp)
+ }
+ }
+
+ /**
+ * Returns the most recent data with [[EWMA]] averaging.
+ */
+ def update(that: NodeMetrics): NodeMetrics = {
+ require(address == that.address, s"update only allowed for same address, [$address] != [$that.address]")
+ // Apply sample ordering.
+ val (latestNode, currentNode) = if (this.timestamp >= that.timestamp) (this, that) else (that, this)
+ // Average metrics present in both latest and current.
+ val updated = for {
+ latest ← latestNode.metrics
+ current ← currentNode.metrics
+ if (latest sameAs current)
+ } yield {
+ current :+ latest
+ }
+ // Append metrics missing from either latest or current.
+ // Equality is based on the [[Metric.name]] and [[Set]] doesn't replace existing elements.
+ val merged = updated ++ latestNode.metrics ++ currentNode.metrics
+ copy(metrics = merged, timestamp = latestNode.timestamp)
+ }
+
+ def metric(key: String): Option[Metric] = metrics.collectFirst { case m if m.name == key ⇒ m }
+
+ /**
+ * Java API
+ */
+ def getMetrics: java.lang.Iterable[Metric] =
+ scala.collection.JavaConverters.asJavaIterableConverter(metrics).asJava
+
+ /**
+ * Returns true if that address is the same as this
+ */
+ def sameAs(that: NodeMetrics): Boolean = address == that.address
+
+ override def hashCode = address.##
+ override def equals(obj: Any) = obj match {
+ case other: NodeMetrics ⇒ sameAs(other)
+ case _ ⇒ false
+ }
+
+}
+
+/**
+ * INTERNAL API
+ */
+private[metrics] object MetricsGossip {
+ val empty = MetricsGossip(Set.empty[NodeMetrics])
+}
+
+/**
+ * INTERNAL API
+ *
+ * @param nodes metrics per node
+ */
+@SerialVersionUID(1L)
+private[metrics] final case class MetricsGossip(nodes: Set[NodeMetrics]) {
+
+ /**
+ * Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus.Up]]
+ */
+ def remove(node: Address): MetricsGossip = copy(nodes = nodes filterNot (_.address == node))
+
+ /**
+ * Only the nodes that are in the `includeNodes` Set.
+ */
+ def filter(includeNodes: Set[Address]): MetricsGossip =
+ copy(nodes = nodes filter { includeNodes contains _.address })
+
+ /**
+ * Adds new remote [[NodeMetrics]] and merges existing from a remote gossip.
+ */
+ def merge(otherGossip: MetricsGossip): MetricsGossip =
+ otherGossip.nodes.foldLeft(this) { (gossip, nodeMetrics) ⇒ gossip :+ nodeMetrics }
+
+ /**
+ * Adds new local [[NodeMetrics]], or merges an existing.
+ */
+ def :+(newNodeMetrics: NodeMetrics): MetricsGossip = nodeMetricsFor(newNodeMetrics.address) match {
+ case Some(existingNodeMetrics) ⇒
+ copy(nodes = nodes - existingNodeMetrics + (existingNodeMetrics update newNodeMetrics))
+ case None ⇒ copy(nodes = nodes + newNodeMetrics)
+ }
+
+ /**
+ * Returns [[NodeMetrics]] for a node if exists.
+ */
+ def nodeMetricsFor(address: Address): Option[NodeMetrics] = nodes find { n ⇒ n.address == address }
+
+}
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala
new file mode 100644
index 0000000000..91856148cc
--- /dev/null
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala
@@ -0,0 +1,277 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics
+
+import akka.actor.ActorSystem
+import akka.actor.ExtendedActorSystem
+import akka.event.Logging
+import akka.event.LoggingAdapter
+import akka.ConfigurationException
+import akka.actor.Address
+import java.lang.management.MemoryMXBean
+import java.lang.management.ManagementFactory
+import java.lang.management.OperatingSystemMXBean
+import java.lang.management.MemoryUsage
+import java.lang.System.{ currentTimeMillis ⇒ newTimestamp }
+import akka.cluster.Cluster
+import java.io.Closeable
+import org.hyperic.sigar.SigarProxy
+
+/**
+ * Metrics sampler.
+ *
+ * Implementations of cluster system metrics collectors extend this trait.
+ */
+trait MetricsCollector extends Closeable {
+ /**
+ * Samples and collects new data points.
+ * This method is invoked periodically and should return
+ * current metrics for this node.
+ */
+ def sample(): NodeMetrics
+}
+
+/**
+ * INTERNAL API
+ *
+ * Factory to create configured [[MetricsCollector]].
+ *
+ * Metrics collector instantiation priority order:
+ * 1) Provided custom collector
+ * 2) Internal [[SigarMetricsCollector]]
+ * 3) Internal [[JmxMetricsCollector]]
+ */
+private[metrics] object MetricsCollector {
+
+ /** Try to create collector instance in the order of priority. */
+ def apply(system: ActorSystem): MetricsCollector = {
+ val log = Logging(system, getClass.getName)
+ val settings = ClusterMetricsSettings(system.settings.config)
+ import settings._
+
+ val collectorCustom = CollectorProvider
+ val collectorSigar = classOf[SigarMetricsCollector].getName
+ val collectorJMX = classOf[JmxMetricsCollector].getName
+
+ val useCustom = !CollectorFallback
+ val useInternal = CollectorFallback && CollectorProvider == ""
+
+ def create(provider: String) = TryNative {
+ log.debug(s"Trying ${provider}.")
+ system.asInstanceOf[ExtendedActorSystem].dynamicAccess
+ .createInstanceFor[MetricsCollector](provider, List(classOf[ActorSystem] -> system)).get
+ }
+
+ val collector = if (useCustom)
+ create(collectorCustom)
+ else if (useInternal)
+ create(collectorSigar) orElse create(collectorJMX)
+ else // Use complete fall back chain.
+ create(collectorCustom) orElse create(collectorSigar) orElse create(collectorJMX)
+
+ collector.recover {
+ case e ⇒ throw new ConfigurationException(s"Could not create metrics collector: ${e}")
+ }.get
+ }
+}
+
+/**
+ * Loads JVM and system metrics through JMX monitoring beans.
+ *
+ * @param address The [[akka.actor.Address]] of the node being sampled
+ * @param decay how quickly the exponential weighting of past data is decayed
+ */
+class JmxMetricsCollector(address: Address, decayFactor: Double) extends MetricsCollector {
+ import StandardMetrics._
+
+ private def this(address: Address, settings: ClusterMetricsSettings) =
+ this(address,
+ EWMA.alpha(settings.CollectorMovingAverageHalfLife, settings.CollectorSampleInterval))
+
+ /**
+ * This constructor is used when creating an instance from configured FQCN
+ */
+ def this(system: ActorSystem) = this(Cluster(system).selfAddress, ClusterMetricsExtension(system).settings)
+
+ private val decayFactorOption = Some(decayFactor)
+
+ private val memoryMBean: MemoryMXBean = ManagementFactory.getMemoryMXBean
+
+ private val osMBean: OperatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean
+
+ /**
+ * Samples and collects new data points.
+ * Creates a new instance each time.
+ */
+ def sample(): NodeMetrics = NodeMetrics(address, newTimestamp, metrics)
+
+ /**
+ * Generate metrics set.
+ * Creates a new instance each time.
+ */
+ def metrics(): Set[Metric] = {
+ val heap = heapMemoryUsage
+ Set(systemLoadAverage, heapUsed(heap), heapCommitted(heap), heapMax(heap), processors).flatten
+ }
+
+ /**
+ * (JMX) Returns the OS-specific average load on the CPUs in the system, for the past 1 minute.
+ * On some systems the JMX OS system load average may not be available, in which case a -1 is
+ * returned from JMX, and None is returned from this method.
+ * Creates a new instance each time.
+ */
+ def systemLoadAverage: Option[Metric] = Metric.create(
+ name = SystemLoadAverage,
+ value = osMBean.getSystemLoadAverage,
+ decayFactor = None)
+
+ /**
+ * (JMX) Returns the number of available processors
+ * Creates a new instance each time.
+ */
+ def processors: Option[Metric] = Metric.create(
+ name = Processors,
+ value = osMBean.getAvailableProcessors,
+ decayFactor = None)
+
+ /**
+ * Current heap to be passed in to heapUsed, heapCommitted and heapMax
+ */
+ def heapMemoryUsage: MemoryUsage = memoryMBean.getHeapMemoryUsage
+
+ /**
+ * (JMX) Returns the current sum of heap memory used from all heap memory pools (in bytes).
+ * Creates a new instance each time.
+ */
+ def heapUsed(heap: MemoryUsage): Option[Metric] = Metric.create(
+ name = HeapMemoryUsed,
+ value = heap.getUsed,
+ decayFactor = decayFactorOption)
+
+ /**
+ * (JMX) Returns the current sum of heap memory guaranteed to be available to the JVM
+ * from all heap memory pools (in bytes).
+ * Creates a new instance each time.
+ */
+ def heapCommitted(heap: MemoryUsage): Option[Metric] = Metric.create(
+ name = HeapMemoryCommitted,
+ value = heap.getCommitted,
+ decayFactor = decayFactorOption)
+
+ /**
+ * (JMX) Returns the maximum amount of memory (in bytes) that can be used
+ * for JVM memory management. If not defined the metrics value is None, i.e.
+ * never negative.
+ * Creates a new instance each time.
+ */
+ def heapMax(heap: MemoryUsage): Option[Metric] = Metric.create(
+ name = HeapMemoryMax,
+ value = heap.getMax,
+ decayFactor = None)
+
+ override def close(): Unit = ()
+
+}
+
+/**
+ * Loads metrics through Hyperic SIGAR and JMX monitoring beans. This
+ * loads wider and more accurate range of metrics compared to JmxMetricsCollector
+ * by using SIGAR's native OS library.
+ *
+ * The constructor will by design throw exception if org.hyperic.sigar.Sigar can't be loaded, due
+ * to missing classes or native libraries.
+ *
+ * @param address The [[akka.actor.Address]] of the node being sampled
+ * @param decay how quickly the exponential weighting of past data is decayed
+ * @param sigar the org.hyperic.Sigar instance
+ */
+class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarProxy)
+ extends JmxMetricsCollector(address, decayFactor) {
+
+ import StandardMetrics._
+ import org.hyperic.sigar.CpuPerc
+
+ def this(address: Address, settings: ClusterMetricsSettings, sigar: SigarProxy) =
+ this(address,
+ EWMA.alpha(settings.CollectorMovingAverageHalfLife, settings.CollectorSampleInterval),
+ sigar)
+
+ def this(address: Address, settings: ClusterMetricsSettings) =
+ this(address, settings, DefaultSigarProvider(settings).createSigarInstance)
+
+ /**
+ * This constructor is used when creating an instance from configured FQCN
+ */
+ def this(system: ActorSystem) = this(Cluster(system).selfAddress, ClusterMetricsExtension(system).settings)
+
+ private val decayFactorOption = Some(decayFactor)
+
+ /**
+ * Verify at the end of construction that Sigar is operational.
+ */
+ metrics()
+
+ // Construction complete.
+
+ override def metrics(): Set[Metric] = {
+ // Must obtain cpuPerc in one shot. See https://github.com/akka/akka/issues/16121
+ val cpuPerc = sigar.getCpuPerc
+ super.metrics ++ Set(cpuCombined(cpuPerc), cpuStolen(cpuPerc)).flatten
+ }
+
+ /**
+ * (SIGAR) Returns the OS-specific average load on the CPUs in the system, for the past 1 minute.
+ *
+ * Creates a new instance each time.
+ */
+ override def systemLoadAverage: Option[Metric] = Metric.create(
+ name = SystemLoadAverage,
+ value = sigar.getLoadAverage()(0).asInstanceOf[Number],
+ decayFactor = None)
+
+ /**
+ * (SIGAR) Returns the combined CPU sum of User + Sys + Nice + Wait, in percentage. This metric can describe
+ * the amount of time the CPU spent executing code during n-interval and how much more it could
+ * theoretically. Note that 99% CPU utilization can be optimal or indicative of failure.
+ *
+ * In the data stream, this will sometimes return with a valid metric value, and sometimes as a NaN or Infinite.
+ * Documented bug https://bugzilla.redhat.com/show_bug.cgi?id=749121 and several others.
+ *
+ * Creates a new instance each time.
+ */
+ def cpuCombined(cpuPerc: CpuPerc): Option[Metric] = Metric.create(
+ name = CpuCombined,
+ value = cpuPerc.getCombined.asInstanceOf[Number],
+ decayFactor = decayFactorOption)
+
+ /**
+ * (SIGAR) Returns the stolen CPU time. Relevant to virtual hosting environments.
+ * For details please see: [[http://en.wikipedia.org/wiki/CPU_time#Subdivision Wikipedia - CPU time subdivision]] and
+ * [[https://www.datadoghq.com/2013/08/understanding-aws-stolen-cpu-and-how-it-affects-your-apps/ Understanding AWS stolen CPU and how it affects your apps]]
+ *
+ * Creates a new instance each time.
+ */
+ def cpuStolen(cpuPerc: CpuPerc): Option[Metric] = Metric.create(
+ name = CpuStolen,
+ value = cpuPerc.getStolen.asInstanceOf[Number],
+ decayFactor = decayFactorOption)
+
+ /**
+ * (SIGAR) Returns the idle CPU time.
+ * Amount of CPU time left after combined and stolen are removed.
+ *
+ * Creates a new instance each time.
+ */
+ def cpuIdle(cpuPerc: CpuPerc): Option[Metric] = Metric.create(
+ name = CpuIdle,
+ value = cpuPerc.getIdle.asInstanceOf[Number],
+ decayFactor = decayFactorOption)
+
+ /**
+ * Releases any native resources associated with this instance.
+ */
+ override def close(): Unit = SigarProvider.close(sigar)
+
+}
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala
new file mode 100644
index 0000000000..08edb8c345
--- /dev/null
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala
@@ -0,0 +1,105 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics
+
+import java.io.File
+import kamon.sigar.SigarProvisioner
+import org.hyperic.sigar.Sigar
+import org.hyperic.sigar.SigarProxy
+import org.hyperic.sigar.SigarException
+import scala.language.postfixOps
+import scala.util.Success
+import scala.util.Failure
+import scala.util.Try
+
+/**
+ * Provide sigar instance as [[SigarProxy]].
+ *
+ * User can provision sigar classes and native library in one of the following ways:
+ *
+ * 1) Use [[https://github.com/kamon-io/sigar-loader Kamon sigar-loader]] as a project dependency for the user project.
+ * Metrics extension will extract and load sigar library on demand with help of Kamon sigar provisioner.
+ *
+ * 2) Use [[https://github.com/kamon-io/sigar-loader Kamon sigar-loader]] as java agent: `java -javaagent:/path/to/sigar-loader.jar`
+ * Kamon sigar loader agent will extract and load sigar library during JVM start.
+ *
+ * 3) Place `sigar.jar` on the `classpath` and sigar native library for the o/s on the `java.library.path`
+ * User is required to manage both project dependency and library deployment manually.
+ */
+trait SigarProvider {
+
+ /** Library extract location. */
+ def extractFolder: String
+
+ /** Verify if sigar native library is loaded and operational. */
+ def isNativeLoaded: Boolean =
+ try {
+ val sigar = verifiedSigarInstance
+ SigarProvider.close(sigar)
+ true
+ } catch {
+ case e: Throwable ⇒ false
+ }
+
+ /** Create sigar and verify it works. */
+ def verifiedSigarInstance: SigarProxy = {
+ val sigar = new Sigar()
+ sigar.getPid
+ sigar.getLoadAverage
+ sigar.getCpuPerc
+ sigar
+ }
+
+ /** Extract and load sigar native library. */
+ def provisionSigarLibrary(): Unit = {
+ SigarProvisioner.provision(new File(extractFolder))
+ }
+
+ /**
+ * Create sigar instance with 2-phase sigar library loading.
+ * 1) Assume that library is already provisioned.
+ * 2) Attempt to provision library via sigar-loader.
+ */
+ def createSigarInstance: SigarProxy = {
+ TryNative {
+ verifiedSigarInstance
+ } orElse TryNative {
+ provisionSigarLibrary()
+ verifiedSigarInstance
+ } recover {
+ case e: Throwable ⇒ throw new RuntimeException("Failed to load sigar:", e)
+ } get
+ }
+
+}
+
+object SigarProvider {
+ /**
+ * Release underlying sigar proxy resources.
+ *
+ * Note: [[SigarProxy]] is not [[Sigar]] during tests.
+ */
+ def close(sigar: SigarProxy) = {
+ if (sigar.isInstanceOf[Sigar]) sigar.asInstanceOf[Sigar].close()
+ }
+}
+
+/**
+ * Provide sigar instance as [[SigarProxy]] with configured location via [[ClusterMetricsSettings]].
+ */
+case class DefaultSigarProvider(settings: ClusterMetricsSettings) extends SigarProvider {
+ def extractFolder = settings.NativeLibraryExtractFolder
+}
+
+/**
+ * INTERNAL API
+ */
+private[metrics] object TryNative {
+ def apply[T](r: ⇒ T): Try[T] =
+ try Success(r) catch {
+ // catching all, for example java.lang.LinkageError that are not caught by `NonFatal` in `Try`
+ case e: Throwable ⇒ Failure(e)
+ }
+}
diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala
new file mode 100644
index 0000000000..ebd1440b9d
--- /dev/null
+++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala
@@ -0,0 +1,219 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics.protobuf
+
+import java.io.ByteArrayInputStream
+import java.io.ByteArrayOutputStream
+import java.io.ObjectOutputStream
+import java.{ lang ⇒ jl }
+import java.util.zip.GZIPInputStream
+import java.util.zip.GZIPOutputStream
+import scala.annotation.tailrec
+import scala.collection.JavaConverters.asJavaIterableConverter
+import scala.collection.JavaConverters.asScalaBufferConverter
+import scala.collection.JavaConverters.setAsJavaSetConverter
+import scala.collection.breakOut
+import com.google.protobuf.ByteString
+import com.google.protobuf.MessageLite
+import akka.actor.Address
+import akka.actor.ExtendedActorSystem
+import akka.cluster.metrics.EWMA
+import akka.cluster.metrics.Metric
+import akka.cluster.metrics.MetricsGossip
+import akka.cluster.metrics.NodeMetrics
+import akka.cluster.metrics.protobuf.msg.{ ClusterMetricsMessages ⇒ cm }
+import akka.serialization.Serializer
+import akka.util.ClassLoaderObjectInputStream
+import akka.cluster.metrics.ClusterMetricsMessage
+import akka.cluster.metrics.MetricsGossipEnvelope
+import akka.cluster.metrics.ClusterMetricsSettings
+
+/**
+ * Protobuf serializer for [[ClusterMetricsMessage]] types.
+ */
+class MessageSerializer(val system: ExtendedActorSystem) extends Serializer {
+
+ private final val BufferSize = 4 * 1024
+
+ private val fromBinaryMap = collection.immutable.HashMap[Class[_ <: ClusterMetricsMessage], Array[Byte] ⇒ AnyRef](
+ classOf[MetricsGossipEnvelope] -> metricsGossipEnvelopeFromBinary)
+
+ override val includeManifest: Boolean = true
+
+ override val identifier = ClusterMetricsSettings(system.settings.config).SerializerIdentifier
+
+ override def toBinary(obj: AnyRef): Array[Byte] = obj match {
+ case m: MetricsGossipEnvelope ⇒
+ compress(metricsGossipEnvelopeToProto(m))
+ case _ ⇒
+ throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass}")
+ }
+
+ def compress(msg: MessageLite): Array[Byte] = {
+ val bos = new ByteArrayOutputStream(BufferSize)
+ val zip = new GZIPOutputStream(bos)
+ msg.writeTo(zip)
+ zip.close()
+ bos.toByteArray
+ }
+
+ def decompress(bytes: Array[Byte]): Array[Byte] = {
+ val in = new GZIPInputStream(new ByteArrayInputStream(bytes))
+ val out = new ByteArrayOutputStream()
+ val buffer = new Array[Byte](BufferSize)
+
+ @tailrec def readChunk(): Unit = in.read(buffer) match {
+ case -1 ⇒ ()
+ case n ⇒
+ out.write(buffer, 0, n)
+ readChunk()
+ }
+
+ readChunk()
+ out.toByteArray
+ }
+
+ def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = clazz match {
+ case Some(c) ⇒ fromBinaryMap.get(c.asInstanceOf[Class[ClusterMetricsMessage]]) match {
+ case Some(f) ⇒ f(bytes)
+ case None ⇒ throw new IllegalArgumentException(s"Unimplemented deserialization of message class $c in metrics")
+ }
+ case _ ⇒ throw new IllegalArgumentException("Need a metrics message class to be able to deserialize bytes in metrics")
+ }
+
+ private def addressFromBinary(bytes: Array[Byte]): Address =
+ addressFromProto(cm.Address.parseFrom(bytes))
+
+ private def addressToProto(address: Address): cm.Address.Builder = address match {
+ case Address(protocol, system, Some(host), Some(port)) ⇒
+ cm.Address.newBuilder().setSystem(system).setHostname(host).setPort(port).setProtocol(protocol)
+ case _ ⇒ throw new IllegalArgumentException(s"Address [${address}] could not be serialized: host or port missing.")
+ }
+
+ private def addressToProtoByteArray(address: Address): Array[Byte] = addressToProto(address).build.toByteArray
+
+ @volatile
+ private var protocolCache: String = null
+ @volatile
+ private var systemCache: String = null
+
+ private def getProtocol(address: cm.Address): String = {
+ val p = address.getProtocol
+ val pc = protocolCache
+ if (pc == p) pc
+ else {
+ protocolCache = p
+ p
+ }
+ }
+
+ private def getSystem(address: cm.Address): String = {
+ val s = address.getSystem
+ val sc = systemCache
+ if (sc == s) sc
+ else {
+ systemCache = s
+ s
+ }
+ }
+
+ private def addressFromProto(address: cm.Address): Address =
+ Address(getProtocol(address), getSystem(address), address.getHostname, address.getPort)
+
+ private def mapWithErrorMessage[T](map: Map[T, Int], value: T, unknown: String): Int = map.get(value) match {
+ case Some(x) ⇒ x
+ case _ ⇒ throw new IllegalArgumentException(s"Unknown ${unknown} [${value}] in cluster message")
+ }
+
+ private def metricsGossipEnvelopeToProto(envelope: MetricsGossipEnvelope): cm.MetricsGossipEnvelope = {
+ val mgossip = envelope.gossip
+ val allAddresses = mgossip.nodes.foldLeft(Set.empty[Address])((s, n) ⇒ s + n.address)
+ val addressMapping = allAddresses.zipWithIndex.toMap
+ val allMetricNames = mgossip.nodes.foldLeft(Set.empty[String])((s, n) ⇒ s ++ n.metrics.iterator.map(_.name))
+ val metricNamesMapping = allMetricNames.zipWithIndex.toMap
+
+ def mapAddress(address: Address) = mapWithErrorMessage(addressMapping, address, "address")
+ def mapName(name: String) = mapWithErrorMessage(metricNamesMapping, name, "address")
+
+ def ewmaToProto(ewma: Option[EWMA]): Option[cm.NodeMetrics.EWMA.Builder] = ewma.map {
+ x ⇒ cm.NodeMetrics.EWMA.newBuilder().setValue(x.value).setAlpha(x.alpha)
+ }
+
+ def numberToProto(number: Number): cm.NodeMetrics.Number.Builder = {
+ import cm.NodeMetrics.Number
+ import cm.NodeMetrics.NumberType
+ number match {
+ case n: jl.Double ⇒ Number.newBuilder().setType(NumberType.Double).setValue64(jl.Double.doubleToLongBits(n))
+ case n: jl.Long ⇒ Number.newBuilder().setType(NumberType.Long).setValue64(n)
+ case n: jl.Float ⇒ Number.newBuilder().setType(NumberType.Float).setValue32(jl.Float.floatToIntBits(n))
+ case n: jl.Integer ⇒ Number.newBuilder().setType(NumberType.Integer) setValue32 (n)
+ case _ ⇒
+ val bos = new ByteArrayOutputStream
+ val out = new ObjectOutputStream(bos)
+ out.writeObject(number)
+ out.close()
+ Number.newBuilder().setType(NumberType.Serialized).setSerialized(ByteString.copyFrom(bos.toByteArray))
+ }
+ }
+
+ def metricToProto(metric: Metric): cm.NodeMetrics.Metric.Builder = {
+ val builder = cm.NodeMetrics.Metric.newBuilder().setNameIndex(mapName(metric.name)).setNumber(numberToProto(metric.value))
+ ewmaToProto(metric.average).map(builder.setEwma(_)).getOrElse(builder)
+ }
+
+ def nodeMetricsToProto(nodeMetrics: NodeMetrics): cm.NodeMetrics.Builder =
+ cm.NodeMetrics.newBuilder().setAddressIndex(mapAddress(nodeMetrics.address)).setTimestamp(nodeMetrics.timestamp).
+ addAllMetrics(nodeMetrics.metrics.map(metricToProto(_).build).asJava)
+
+ val nodeMetrics: Iterable[cm.NodeMetrics] = mgossip.nodes.map(nodeMetricsToProto(_).build)
+
+ cm.MetricsGossipEnvelope.newBuilder().setFrom(addressToProto(envelope.from)).setGossip(
+ cm.MetricsGossip.newBuilder().addAllAllAddresses(allAddresses.map(addressToProto(_).build()).asJava).
+ addAllAllMetricNames(allMetricNames.asJava).addAllNodeMetrics(nodeMetrics.asJava)).
+ setReply(envelope.reply).build
+ }
+
+ private def metricsGossipEnvelopeFromBinary(bytes: Array[Byte]): MetricsGossipEnvelope =
+ metricsGossipEnvelopeFromProto(cm.MetricsGossipEnvelope.parseFrom(decompress(bytes)))
+
+ private def metricsGossipEnvelopeFromProto(envelope: cm.MetricsGossipEnvelope): MetricsGossipEnvelope = {
+ val mgossip = envelope.getGossip
+ val addressMapping: Vector[Address] = mgossip.getAllAddressesList.asScala.map(addressFromProto)(breakOut)
+ val metricNameMapping: Vector[String] = mgossip.getAllMetricNamesList.asScala.toVector
+
+ def ewmaFromProto(ewma: cm.NodeMetrics.EWMA): Option[EWMA] =
+ Some(EWMA(ewma.getValue, ewma.getAlpha))
+
+ def numberFromProto(number: cm.NodeMetrics.Number): Number = {
+ import cm.NodeMetrics.Number
+ import cm.NodeMetrics.NumberType
+ number.getType.getNumber match {
+ case NumberType.Double_VALUE ⇒ jl.Double.longBitsToDouble(number.getValue64)
+ case NumberType.Long_VALUE ⇒ number.getValue64
+ case NumberType.Float_VALUE ⇒ jl.Float.intBitsToFloat(number.getValue32)
+ case NumberType.Integer_VALUE ⇒ number.getValue32
+ case NumberType.Serialized_VALUE ⇒
+ val in = new ClassLoaderObjectInputStream(system.dynamicAccess.classLoader,
+ new ByteArrayInputStream(number.getSerialized.toByteArray))
+ val obj = in.readObject
+ in.close()
+ obj.asInstanceOf[jl.Number]
+ }
+ }
+
+ def metricFromProto(metric: cm.NodeMetrics.Metric): Metric =
+ Metric(metricNameMapping(metric.getNameIndex), numberFromProto(metric.getNumber),
+ if (metric.hasEwma) ewmaFromProto(metric.getEwma) else None)
+
+ def nodeMetricsFromProto(nodeMetrics: cm.NodeMetrics): NodeMetrics =
+ NodeMetrics(addressMapping(nodeMetrics.getAddressIndex), nodeMetrics.getTimestamp,
+ nodeMetrics.getMetricsList.asScala.map(metricFromProto)(breakOut))
+
+ val nodeMetrics: Set[NodeMetrics] = mgossip.getNodeMetricsList.asScala.map(nodeMetricsFromProto)(breakOut)
+
+ MetricsGossipEnvelope(addressFromProto(envelope.getFrom), MetricsGossip(nodeMetrics), envelope.getReply)
+ }
+
+}
diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala
new file mode 100644
index 0000000000..d677c057b9
--- /dev/null
+++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics
+
+import scala.language.postfixOps
+import scala.concurrent.duration._
+import com.typesafe.config.ConfigFactory
+import akka.remote.testkit.MultiNodeConfig
+import akka.remote.testkit.MultiNodeSpec
+import akka.actor.ExtendedActorSystem
+import akka.cluster.MultiNodeClusterSpec
+import akka.testkit.LongRunningTest
+import akka.cluster.MemberStatus
+
+trait ClusterMetricsCommonConfig extends MultiNodeConfig {
+ import ConfigFactory._
+
+ val node1 = role("node-1")
+ val node2 = role("node-2")
+ val node3 = role("node-3")
+ val node4 = role("node-4")
+ val node5 = role("node-5")
+
+ def nodeList = Seq(node1, node2, node3, node4, node5)
+
+ // Extract individual sigar library for every node.
+ nodeList foreach { role ⇒
+ nodeConfig(role) {
+ parseString("akka.cluster.metrics.native-library-extract-folder=${user.dir}/target/native/" + role.name)
+ }
+ }
+
+ // Disable legacy metrics in akka-cluster.
+ def disableMetricsLegacy = parseString("""akka.cluster.metrics.enabled=off""")
+
+ // Enable metrics extension in akka-cluster-metrics.
+ def enableMetricsExtension = parseString("""
+ akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"]
+ akka.cluster.metrics.collector.enabled = on
+ """)
+
+ // Disable metrics extension in akka-cluster-metrics.
+ def disableMetricsExtension = parseString("""
+ akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"]
+ akka.cluster.metrics.collector.enabled = off
+ """)
+
+ // Activate slf4j logging along with test listener.
+ def customLogging = parseString("""akka.loggers=["akka.testkit.TestEventListener","akka.event.slf4j.Slf4jLogger"]""")
+}
+
+object ClusterMetricsDisabledConfig extends ClusterMetricsCommonConfig {
+
+ commonConfig {
+ Seq(
+ customLogging,
+ disableMetricsLegacy,
+ disableMetricsExtension,
+ debugConfig(on = false),
+ MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)
+ .reduceLeft(_ withFallback _)
+ }
+}
+
+object ClusterMetricsEnabledConfig extends ClusterMetricsCommonConfig {
+ import ConfigFactory._
+
+ commonConfig {
+ Seq(
+ customLogging,
+ disableMetricsLegacy,
+ enableMetricsExtension,
+ debugConfig(on = false),
+ MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)
+ .reduceLeft(_ withFallback _)
+ }
+
+}
+
+class ClusterMetricsEnabledMultiJvmNode1 extends ClusterMetricsEnabledSpec
+class ClusterMetricsEnabledMultiJvmNode2 extends ClusterMetricsEnabledSpec
+class ClusterMetricsEnabledMultiJvmNode3 extends ClusterMetricsEnabledSpec
+class ClusterMetricsEnabledMultiJvmNode4 extends ClusterMetricsEnabledSpec
+class ClusterMetricsEnabledMultiJvmNode5 extends ClusterMetricsEnabledSpec
+
+abstract class ClusterMetricsEnabledSpec extends MultiNodeSpec(ClusterMetricsEnabledConfig)
+ with MultiNodeClusterSpec with RedirectLogging {
+ import ClusterMetricsEnabledConfig._
+
+ def isSigar(collector: MetricsCollector): Boolean = collector.isInstanceOf[SigarMetricsCollector]
+
+ def saveApplicationConf(): Unit = {
+ import java.io.File
+ import java.io.PrintWriter
+ val conf = cluster.system.settings.config
+ val text = conf.root.render
+ val file = new File(s"target/${myself.name}_application.conf")
+ Some(new PrintWriter(file)) map { p ⇒ p.write(text); p.close }
+ }
+
+ saveApplicationConf()
+
+ val metricsView = new ClusterMetricsView(cluster.system)
+
+ "Cluster metrics" must {
+ "periodically collect metrics on each node, publish to the event stream, " +
+ "and gossip metrics around the node ring" taggedAs LongRunningTest in within(60 seconds) {
+ awaitClusterUp(roles: _*)
+ enterBarrier("cluster-started")
+ awaitAssert(clusterView.members.count(_.status == MemberStatus.Up) should be(roles.size))
+ // TODO ensure same contract
+ //awaitAssert(clusterView.clusterMetrics.size should be(roles.size))
+ awaitAssert(metricsView.clusterMetrics.size should be(roles.size))
+ val collector = MetricsCollector(cluster.system)
+ collector.sample.metrics.size should be > (3)
+ enterBarrier("after")
+ }
+ "reflect the correct number of node metrics in cluster view" taggedAs LongRunningTest in within(30 seconds) {
+ runOn(node2) {
+ cluster.leave(node1)
+ }
+ enterBarrier("first-left")
+ runOn(node2, node3, node4, node5) {
+ markNodeAsUnavailable(node1)
+ // TODO ensure same contract
+ //awaitAssert(clusterView.clusterMetrics.size should be(roles.size - 1))
+ awaitAssert(metricsView.clusterMetrics.size should be(roles.size - 1))
+ }
+ enterBarrier("finished")
+ }
+ }
+}
+
+class ClusterMetricsDisabledMultiJvmNode1 extends ClusterMetricsDisabledSpec
+class ClusterMetricsDisabledMultiJvmNode2 extends ClusterMetricsDisabledSpec
+class ClusterMetricsDisabledMultiJvmNodv3 extends ClusterMetricsDisabledSpec
+class ClusterMetricsDisabledMultiJvmNode4 extends ClusterMetricsDisabledSpec
+class ClusterMetricsDisabledMultiJvmNode5 extends ClusterMetricsDisabledSpec
+
+abstract class ClusterMetricsDisabledSpec extends MultiNodeSpec(ClusterMetricsDisabledConfig)
+ with MultiNodeClusterSpec with RedirectLogging {
+ import akka.cluster.ClusterEvent.CurrentClusterState
+
+ val metricsView = new ClusterMetricsView(cluster.system)
+
+ "Cluster metrics" must {
+ "not collect metrics, not publish metrics events, and not gossip metrics" taggedAs LongRunningTest in {
+ awaitClusterUp(roles: _*)
+ // TODO ensure same contract
+ //clusterView.clusterMetrics.size should be(0)
+ metricsView.clusterMetrics.size should be(0)
+ cluster.subscribe(testActor, classOf[ClusterMetricsChanged])
+ expectMsgType[CurrentClusterState]
+ expectNoMsg
+ // TODO ensure same contract
+ //clusterView.clusterMetrics.size should be(0)
+ metricsView.clusterMetrics.size should be(0)
+ enterBarrier("after")
+ }
+ }
+}
diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
new file mode 100644
index 0000000000..b2ee12ed10
--- /dev/null
+++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
@@ -0,0 +1,244 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics
+
+import language.postfixOps
+import java.lang.management.ManagementFactory
+import scala.concurrent.Await
+import scala.concurrent.duration._
+import com.typesafe.config.Config
+import com.typesafe.config.ConfigFactory
+import akka.actor._
+import akka.cluster.Cluster
+import akka.cluster.MultiNodeClusterSpec
+import akka.pattern.ask
+import akka.remote.testkit.{ MultiNodeSpec, MultiNodeConfig }
+import akka.routing.GetRoutees
+import akka.routing.FromConfig
+import akka.testkit.{ LongRunningTest, DefaultTimeout, ImplicitSender }
+import akka.routing.ActorRefRoutee
+import akka.routing.Routees
+import akka.cluster.routing.ClusterRouterPool
+import akka.cluster.routing.ClusterRouterPoolSettings
+
+object AdaptiveLoadBalancingRouterConfig extends MultiNodeConfig {
+
+ class Echo extends Actor {
+ def receive = {
+ case _ ⇒ sender() ! Reply(Cluster(context.system).selfAddress)
+ }
+ }
+
+ class Memory extends Actor with ActorLogging {
+ var usedMemory: Array[Array[Int]] = _
+ def receive = {
+ case AllocateMemory ⇒
+ val heap = ManagementFactory.getMemoryMXBean.getHeapMemoryUsage
+ // getMax can be undefined (-1)
+ val max = math.max(heap.getMax, heap.getCommitted)
+ val used = heap.getUsed
+ log.debug("used heap before: [{}] bytes, of max [{}]", used, heap.getMax)
+ // allocate 70% of free space
+ val allocateBytes = (0.7 * (max - used)).toInt
+ val numberOfArrays = allocateBytes / 1024
+ usedMemory = Array.ofDim(numberOfArrays, 248) // each 248 element Int array will use ~ 1 kB
+ log.debug("used heap after: [{}] bytes", ManagementFactory.getMemoryMXBean.getHeapMemoryUsage.getUsed)
+ sender() ! "done"
+ }
+ }
+
+ case object AllocateMemory
+ final case class Reply(address: Address)
+
+ val node1 = role("node-1")
+ val node2 = role("node-2")
+ val node3 = role("node-3")
+
+ def nodeList = Seq(node1, node2, node3)
+
+ // Extract individual sigar library for every node.
+ nodeList foreach { role ⇒
+ nodeConfig(role) {
+ ConfigFactory.parseString("akka.cluster.metrics.native-library-extract-folder=${user.dir}/target/native/" + role.name)
+ }
+ }
+
+ commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString("""
+
+ # Disable legacy metrics.
+ akka.cluster.metrics.enabled=off
+
+ # Enable metrics estension.
+ akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"]
+
+ # Use rapid metrics collection.
+ akka.cluster.metrics {
+ collector {
+ sample-interval = 1s
+ gossip-interval = 1s
+ moving-average-half-life = 2s
+ }
+ }
+
+ # Use metrics extension routing.
+ akka.actor.deployment {
+ /router3 = {
+ router = cluster-metrics-adaptive-pool
+ metrics-selector = cpu
+ nr-of-instances = 9
+ }
+ /router4 = {
+ router = cluster-metrics-adaptive-pool
+ metrics-selector = "akka.cluster.metrics.TestCustomMetricsSelector"
+ nr-of-instances = 10
+ cluster {
+ enabled = on
+ max-nr-of-instances-per-node = 2
+ }
+ }
+ }
+ """)).withFallback(MultiNodeClusterSpec.clusterConfig))
+
+}
+
+class TestCustomMetricsSelector(config: Config) extends MetricsSelector {
+ override def weights(nodeMetrics: Set[NodeMetrics]): Map[Address, Int] = Map.empty
+}
+
+class AdaptiveLoadBalancingRouterMultiJvmNode1 extends AdaptiveLoadBalancingRouterSpec
+class AdaptiveLoadBalancingRouterMultiJvmNode2 extends AdaptiveLoadBalancingRouterSpec
+class AdaptiveLoadBalancingRouterMultiJvmNode3 extends AdaptiveLoadBalancingRouterSpec
+
+abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoadBalancingRouterConfig)
+ with MultiNodeClusterSpec with RedirectLogging
+ with ImplicitSender with DefaultTimeout {
+ import AdaptiveLoadBalancingRouterConfig._
+
+ def currentRoutees(router: ActorRef) =
+ Await.result(router ? GetRoutees, timeout.duration).asInstanceOf[Routees].routees
+
+ def receiveReplies(expectedReplies: Int): Map[Address, Int] = {
+ val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0)
+ (receiveWhile(5 seconds, messages = expectedReplies) {
+ case Reply(address) ⇒ address
+ }).foldLeft(zero) {
+ case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1))
+ }
+ }
+
+ /**
+ * Fills in self address for local ActorRef
+ */
+ def fullAddress(actorRef: ActorRef): Address = actorRef.path.address match {
+ case Address(_, _, None, None) ⇒ cluster.selfAddress
+ case a ⇒ a
+ }
+
+ def startRouter(name: String): ActorRef = {
+ val router = system.actorOf(ClusterRouterPool(
+ local = AdaptiveLoadBalancingPool(HeapMetricsSelector),
+ settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true, useRole = None)).
+ props(Props[Echo]),
+ name)
+ // it may take some time until router receives cluster member events
+ awaitAssert { currentRoutees(router).size should be(roles.size) }
+ val routees = currentRoutees(router)
+ routees.map { case ActorRefRoutee(ref) ⇒ fullAddress(ref) }.toSet should be(roles.map(address).toSet)
+ router
+ }
+
+ val metricsSettings = ClusterMetricsSettings(cluster.system.settings.config)
+
+ def metricsAwait(factor: Int = 10): Unit = Thread.sleep(metricsSettings.CollectorSampleInterval.toMillis * factor)
+
+ "A cluster with a AdaptiveLoadBalancingRouter" must {
+ "start cluster nodes" taggedAs LongRunningTest in {
+ awaitClusterUp(roles: _*)
+ enterBarrier("after-1")
+ }
+
+ "use all nodes in the cluster when not overloaded" taggedAs LongRunningTest in {
+ runOn(node1) {
+ val router1 = startRouter("router1")
+
+ // collect some metrics before we start
+ metricsAwait()
+
+ val iterationCount = 100
+ 1 to iterationCount foreach { _ ⇒
+ router1 ! "hit"
+ // wait a while between each message, since metrics is collected periodically
+ Thread.sleep(10)
+ }
+
+ val replies = receiveReplies(iterationCount)
+
+ replies(node1) should be > (0)
+ replies(node2) should be > (0)
+ replies(node3) should be > (0)
+ replies.values.sum should be(iterationCount)
+
+ }
+
+ enterBarrier("after-2")
+ }
+
+ "prefer node with more free heap capacity" taggedAs LongRunningTest in {
+ System.gc()
+ enterBarrier("gc")
+
+ runOn(node2) {
+ within(20.seconds) {
+ system.actorOf(Props[Memory], "memory") ! AllocateMemory
+ expectMsg("done")
+ }
+ }
+ enterBarrier("heap-allocated")
+
+ runOn(node1) {
+ val router2 = startRouter("router2")
+
+ // collect some metrics before we start
+ metricsAwait()
+
+ val iterationCount = 3000
+ 1 to iterationCount foreach { _ ⇒
+ router2 ! "hit"
+ }
+
+ val replies = receiveReplies(iterationCount)
+
+ replies(node3) should be > (replies(node2))
+ replies.values.sum should be(iterationCount)
+
+ }
+
+ enterBarrier("after-3")
+ }
+
+ "create routees from configuration" taggedAs LongRunningTest in {
+ runOn(node1) {
+ val router3 = system.actorOf(FromConfig.props(Props[Memory]), "router3")
+ // it may take some time until router receives cluster member events
+ awaitAssert { currentRoutees(router3).size should be(9) }
+ val routees = currentRoutees(router3)
+ routees.map { case ActorRefRoutee(ref) ⇒ fullAddress(ref) }.toSet should be(Set(address(node1)))
+ }
+ enterBarrier("after-4")
+ }
+
+ "create routees from cluster.enabled configuration" taggedAs LongRunningTest in {
+ runOn(node1) {
+ val router4 = system.actorOf(FromConfig.props(Props[Memory]), "router4")
+ // it may take some time until router receives cluster member events
+ awaitAssert { currentRoutees(router4).size should be(6) }
+ val routees = currentRoutees(router4)
+ routees.map { case ActorRefRoutee(ref) ⇒ fullAddress(ref) }.toSet should be(Set(
+ address(node1), address(node2), address(node3)))
+ }
+ enterBarrier("after-5")
+ }
+ }
+}
diff --git a/akka-cluster-metrics/src/test/resources/logback-test.xml b/akka-cluster-metrics/src/test/resources/logback-test.xml
new file mode 100644
index 0000000000..a4b6fc744d
--- /dev/null
+++ b/akka-cluster-metrics/src/test/resources/logback-test.xml
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+
+
+
+
+
+ ${pattern}
+
+
+
+
+ ${folder}/test.log
+ false
+
+ ${pattern}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true
+
+
+
diff --git a/akka-cluster-metrics/src/test/resources/reference.conf b/akka-cluster-metrics/src/test/resources/reference.conf
new file mode 100644
index 0000000000..4948247054
--- /dev/null
+++ b/akka-cluster-metrics/src/test/resources/reference.conf
@@ -0,0 +1,6 @@
+akka {
+ actor {
+ serialize-creators = on
+ serialize-messages = on
+ }
+}
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala
new file mode 100644
index 0000000000..a127981a0e
--- /dev/null
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala
@@ -0,0 +1,124 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics
+
+import scala.language.postfixOps
+import scala.collection.immutable
+import scala.concurrent.duration._
+import scala.concurrent.Await
+import scala.util.{ Success, Try, Failure }
+import akka.actor._
+import akka.testkit._
+import akka.cluster.metrics.StandardMetrics._
+import org.scalatest.WordSpec
+import org.scalatest.Matchers
+import akka.cluster.Cluster
+
+@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
+class MetricsExtensionSpec extends AkkaSpec(MetricsConfig.clusterSigarMock)
+ with ImplicitSender with RedirectLogging {
+ import system.dispatcher
+
+ val cluster = Cluster(system)
+
+ val extension = ClusterMetricsExtension(system)
+
+ val metricsView = new ClusterMetricsView(cluster.system)
+
+ val sampleInterval = extension.settings.CollectorSampleInterval
+
+ def metricsNodeCount = metricsView.clusterMetrics.size
+
+ def metricsHistorySize = metricsView.metricsHistory.size
+
+ // This is a single node test.
+ val nodeCount = 1
+
+ // Limit collector sample count.
+ val sampleCount = 10
+
+ // Metrics verification precision.
+ val epsilon = 0.001
+
+ // Sleep longer then single sample.
+ def awaitSample(time: Long = 3 * sampleInterval.toMillis) = Thread.sleep(time)
+
+ "Metrics Extension" must {
+
+ "collect metrics after start command" in {
+ extension.supervisor ! CollectionStartMessage
+ awaitAssert(metricsNodeCount should be(nodeCount), 15 seconds)
+ }
+
+ "collect mock sample during a time window" in {
+ awaitAssert(metricsHistorySize should be(sampleCount), 15 seconds)
+ extension.supervisor ! CollectionStopMessage
+ awaitSample()
+ metricsNodeCount should be(nodeCount)
+ metricsHistorySize should be >= (sampleCount)
+ }
+
+ "verify sigar mock data matches expected ewma data" in {
+
+ val history = metricsView.metricsHistory.reverse.map { _.head }
+
+ val expected = List(
+ (0.700, 0.000, 0.000),
+ (0.700, 0.018, 0.007),
+ (0.700, 0.051, 0.020),
+ (0.700, 0.096, 0.038),
+ (0.700, 0.151, 0.060),
+ (0.700, 0.214, 0.085),
+ (0.700, 0.266, 0.106),
+ (0.700, 0.309, 0.123),
+ (0.700, 0.343, 0.137),
+ (0.700, 0.372, 0.148))
+
+ expected.size should be(sampleCount)
+
+ history.zip(expected) foreach {
+ case (mockMetrics, expectedData) ⇒
+ (mockMetrics, expectedData) match {
+ case (Cpu(_, _, loadAverageMock, cpuCombinedMock, cpuStolenMock, _),
+ (loadAverageEwma, cpuCombinedEwma, cpuStolenEwma)) ⇒
+ loadAverageMock.get should be(loadAverageEwma +- epsilon)
+ cpuCombinedMock.get should be(cpuCombinedEwma +- epsilon)
+ cpuStolenMock.get should be(cpuStolenEwma +- epsilon)
+ }
+ }
+ }
+
+ "control collector on/off state" in {
+
+ def cycle() = {
+
+ val size1 = metricsHistorySize
+ awaitSample()
+ val size2 = metricsHistorySize
+ size1 should be(size2)
+
+ extension.supervisor ! CollectionStartMessage
+ awaitSample()
+ val size3 = metricsHistorySize
+ size3 should be > (size2)
+
+ extension.supervisor ! CollectionStopMessage
+ awaitSample()
+ val size4 = metricsHistorySize
+ size4 should be >= (size3)
+
+ awaitSample()
+ val size5 = metricsHistorySize
+ size5 should be(size4)
+
+ }
+
+ (1 to 3) foreach { step ⇒ cycle() }
+
+ }
+
+ }
+
+}
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
new file mode 100644
index 0000000000..9435566ee1
--- /dev/null
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala
@@ -0,0 +1,119 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+package akka.cluster.metrics
+
+import org.scalatest.WordSpec
+import org.scalatest.Matchers
+
+import akka.actor.Address
+import akka.cluster.metrics.StandardMetrics._
+
+@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
+class MetricsSelectorSpec extends WordSpec with Matchers {
+
+ val factor = 0.3 // TODO read from reference.conf
+
+ val abstractSelector = new CapacityMetricsSelector {
+ override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = Map.empty
+ }
+
+ val a1 = Address("akka.tcp", "sys", "a1", 2551)
+ val b1 = Address("akka.tcp", "sys", "b1", 2551)
+ val c1 = Address("akka.tcp", "sys", "c1", 2551)
+ val d1 = Address("akka.tcp", "sys", "d1", 2551)
+
+ val decayFactor = Some(0.18)
+
+ val nodeMetricsA = NodeMetrics(a1, System.currentTimeMillis, Set(
+ Metric.create(HeapMemoryUsed, 128, decayFactor),
+ Metric.create(HeapMemoryCommitted, 256, decayFactor),
+ Metric.create(HeapMemoryMax, 512, None),
+ Metric.create(CpuCombined, 0.2, decayFactor),
+ Metric.create(CpuStolen, 0.1, decayFactor),
+ Metric.create(SystemLoadAverage, 0.5, None),
+ Metric.create(Processors, 8, None)).flatten)
+
+ val nodeMetricsB = NodeMetrics(b1, System.currentTimeMillis, Set(
+ Metric.create(HeapMemoryUsed, 256, decayFactor),
+ Metric.create(HeapMemoryCommitted, 512, decayFactor),
+ Metric.create(HeapMemoryMax, 1024, None),
+ Metric.create(CpuCombined, 0.4, decayFactor),
+ Metric.create(CpuStolen, 0.2, decayFactor),
+ Metric.create(SystemLoadAverage, 1.0, None),
+ Metric.create(Processors, 16, None)).flatten)
+
+ val nodeMetricsC = NodeMetrics(c1, System.currentTimeMillis, Set(
+ Metric.create(HeapMemoryUsed, 1024, decayFactor),
+ Metric.create(HeapMemoryCommitted, 1024, decayFactor),
+ Metric.create(HeapMemoryMax, 1024, None),
+ Metric.create(CpuCombined, 0.6, decayFactor),
+ Metric.create(CpuStolen, 0.3, decayFactor),
+ Metric.create(SystemLoadAverage, 16.0, None),
+ Metric.create(Processors, 16, None)).flatten)
+
+ val nodeMetricsD = NodeMetrics(d1, System.currentTimeMillis, Set(
+ Metric.create(HeapMemoryUsed, 511, decayFactor),
+ Metric.create(HeapMemoryCommitted, 512, decayFactor),
+ Metric.create(HeapMemoryMax, 512, None),
+ Metric.create(Processors, 2, decayFactor)).flatten)
+
+ val nodeMetrics = Set(nodeMetricsA, nodeMetricsB, nodeMetricsC, nodeMetricsD)
+
+ "CapacityMetricsSelector" must {
+
+ "calculate weights from capacity" in {
+ val capacity = Map(a1 -> 0.6, b1 -> 0.3, c1 -> 0.1)
+ val weights = abstractSelector.weights(capacity)
+ weights should be(Map(c1 -> 1, b1 -> 3, a1 -> 6))
+ }
+
+ "handle low and zero capacity" in {
+ val capacity = Map(a1 -> 0.0, b1 -> 1.0, c1 -> 0.005, d1 -> 0.004)
+ val weights = abstractSelector.weights(capacity)
+ weights should be(Map(a1 -> 0, b1 -> 100, c1 -> 1, d1 -> 0))
+ }
+
+ }
+
+ "HeapMetricsSelector" must {
+ "calculate capacity of heap metrics" in {
+ val capacity = HeapMetricsSelector.capacity(nodeMetrics)
+ capacity(a1) should be(0.75 +- 0.0001)
+ capacity(b1) should be(0.75 +- 0.0001)
+ capacity(c1) should be(0.0 +- 0.0001)
+ capacity(d1) should be(0.001953125 +- 0.0001)
+ }
+ }
+
+ "CpuMetricsSelector" must {
+ "calculate capacity of cpuCombined metrics" in {
+ val capacity = CpuMetricsSelector.capacity(nodeMetrics)
+ capacity(a1) should be(1.0 - 0.2 - 0.1 * (1.0 + factor) +- 0.0001)
+ capacity(b1) should be(1.0 - 0.4 - 0.2 * (1.0 + factor) +- 0.0001)
+ capacity(c1) should be(1.0 - 0.6 - 0.3 * (1.0 + factor) +- 0.0001)
+ capacity.contains(d1) should be(false)
+ }
+ }
+
+ "SystemLoadAverageMetricsSelector" must {
+ "calculate capacity of systemLoadAverage metrics" in {
+ val capacity = SystemLoadAverageMetricsSelector.capacity(nodeMetrics)
+ capacity(a1) should be(0.9375 +- 0.0001)
+ capacity(b1) should be(0.9375 +- 0.0001)
+ capacity(c1) should be(0.0 +- 0.0001)
+ capacity.contains(d1) should be(false)
+ }
+ }
+
+ "MixMetricsSelector" must {
+ "aggregate capacity of all metrics" in {
+ val capacity = MixMetricsSelector.capacity(nodeMetrics)
+ capacity(a1) should be((0.75 + 0.67 + 0.9375) / 3 +- 0.0001)
+ capacity(b1) should be((0.75 + 0.34 + 0.9375) / 3 +- 0.0001)
+ capacity(c1) should be((0.0 + 0.01 + 0.0) / 3 +- 0.0001)
+ capacity(d1) should be((0.001953125) / 1 +- 0.0001)
+ }
+ }
+
+}
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsSettingsSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsSettingsSpec.scala
new file mode 100644
index 0000000000..939fb1628d
--- /dev/null
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsSettingsSpec.scala
@@ -0,0 +1,44 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics
+
+import language.postfixOps
+import akka.testkit.AkkaSpec
+import akka.dispatch.Dispatchers
+import scala.concurrent.duration._
+import akka.remote.PhiAccrualFailureDetector
+import akka.util.Helpers.ConfigOps
+import com.typesafe.config.ConfigFactory
+
+@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
+class ClusterMetricsSettingsSpec extends AkkaSpec {
+
+ "ClusterMetricsSettings" must {
+
+ "be able to parse generic metrics config elements" in {
+ val settings = new ClusterMetricsSettings(system.settings.config)
+ import settings._
+
+ // Extension.
+ MetricsDispatcher should be(Dispatchers.DefaultDispatcherId)
+ PeriodicTasksInitialDelay should be(1 second)
+ NativeLibraryExtractFolder should be(System.getProperty("user.dir") + "/native")
+ SerializerIdentifier should be(10)
+
+ // Supervisor.
+ SupervisorName should be("cluster-metrics")
+ SupervisorStrategyProvider should be(classOf[ClusterMetricsStrategy].getName)
+ SupervisorStrategyConfiguration should be(
+ ConfigFactory.parseString("loggingEnabled=true,maxNrOfRetries=3,withinTimeRange=3s"))
+
+ // Collector.
+ CollectorEnabled should be(true)
+ CollectorProvider should be("")
+ CollectorSampleInterval should be(3 seconds)
+ CollectorGossipInterval should be(3 seconds)
+ CollectorMovingAverageHalfLife should be(12 seconds)
+ }
+ }
+}
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala
new file mode 100644
index 0000000000..ab56dc3253
--- /dev/null
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala
@@ -0,0 +1,101 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics
+
+import language.postfixOps
+import scala.concurrent.duration._
+import akka.testkit.{ LongRunningTest, AkkaSpec }
+import scala.concurrent.forkjoin.ThreadLocalRandom
+
+@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
+class EWMASpec extends AkkaSpec(MetricsConfig.defaultEnabled) with MetricsCollectorFactory {
+ import system.dispatcher
+
+ val collector = createMetricsCollector
+
+ "DataStream" must {
+
+ "calcualate same ewma for constant values" in {
+ val ds = EWMA(value = 100.0, alpha = 0.18) :+
+ 100.0 :+ 100.0 :+ 100.0
+ ds.value should be(100.0 +- 0.001)
+ }
+
+ "calcualate correct ewma for normal decay" in {
+ val d0 = EWMA(value = 1000.0, alpha = 2.0 / (1 + 10))
+ d0.value should be(1000.0 +- 0.01)
+ val d1 = d0 :+ 10.0
+ d1.value should be(820.0 +- 0.01)
+ val d2 = d1 :+ 10.0
+ d2.value should be(672.73 +- 0.01)
+ val d3 = d2 :+ 10.0
+ d3.value should be(552.23 +- 0.01)
+ val d4 = d3 :+ 10.0
+ d4.value should be(453.64 +- 0.01)
+
+ val dn = (1 to 100).foldLeft(d0)((d, _) ⇒ d :+ 10.0)
+ dn.value should be(10.0 +- 0.1)
+ }
+
+ "calculate ewma for alpha 1.0, max bias towards latest value" in {
+ val d0 = EWMA(value = 100.0, alpha = 1.0)
+ d0.value should be(100.0 +- 0.01)
+ val d1 = d0 :+ 1.0
+ d1.value should be(1.0 +- 0.01)
+ val d2 = d1 :+ 57.0
+ d2.value should be(57.0 +- 0.01)
+ val d3 = d2 :+ 10.0
+ d3.value should be(10.0 +- 0.01)
+ }
+
+ "calculate alpha from half-life and collect interval" in {
+ // according to http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ val expectedAlpha = 0.1
+ // alpha = 2.0 / (1 + N)
+ val n = 19
+ val halfLife = n.toDouble / 2.8854
+ val collectInterval = 1.second
+ val halfLifeDuration = (halfLife * 1000).millis
+ EWMA.alpha(halfLifeDuration, collectInterval) should be(expectedAlpha +- 0.001)
+ }
+
+ "calculate sane alpha from short half-life" in {
+ val alpha = EWMA.alpha(1.millis, 3.seconds)
+ alpha should be <= (1.0)
+ alpha should be >= (0.0)
+ alpha should be(1.0 +- 0.001)
+ }
+
+ "calculate sane alpha from long half-life" in {
+ val alpha = EWMA.alpha(1.day, 3.seconds)
+ alpha should be <= (1.0)
+ alpha should be >= (0.0)
+ alpha should be(0.0 +- 0.001)
+ }
+
+ "calculate the ewma for multiple, variable, data streams" taggedAs LongRunningTest in {
+ var streamingDataSet = Map.empty[String, Metric]
+ var usedMemory = Array.empty[Byte]
+ (1 to 50) foreach { _ ⇒
+ // wait a while between each message to give the metrics a chance to change
+ Thread.sleep(100)
+ usedMemory = usedMemory ++ Array.fill(1024)(ThreadLocalRandom.current.nextInt(127).toByte)
+ val changes = collector.sample.metrics.flatMap { latest ⇒
+ streamingDataSet.get(latest.name) match {
+ case None ⇒ Some(latest)
+ case Some(previous) ⇒
+ if (latest.isSmooth && latest.value != previous.value) {
+ val updated = previous :+ latest
+ updated.isSmooth should be(true)
+ updated.smoothValue should not be (previous.smoothValue)
+ Some(updated)
+ } else None
+ }
+ }
+ streamingDataSet ++= changes.map(m ⇒ m.name -> m)
+ }
+ }
+ }
+}
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricSpec.scala
new file mode 100644
index 0000000000..3c5c5ef074
--- /dev/null
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricSpec.scala
@@ -0,0 +1,286 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics
+
+import org.scalatest.WordSpec
+import org.scalatest.Matchers
+import akka.cluster.metrics.StandardMetrics._
+import scala.util.Failure
+import akka.actor.Address
+import akka.testkit.AkkaSpec
+import akka.testkit.ImplicitSender
+import java.lang.System.{ currentTimeMillis ⇒ newTimestamp }
+
+@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
+class MetricNumericConverterSpec extends WordSpec with Matchers with MetricNumericConverter {
+
+ "MetricNumericConverter" must {
+
+ "convert" in {
+ convertNumber(0).isLeft should be(true)
+ convertNumber(1).left.get should be(1)
+ convertNumber(1L).isLeft should be(true)
+ convertNumber(0.0).isRight should be(true)
+ }
+
+ "define a new metric" in {
+ val Some(metric) = Metric.create(HeapMemoryUsed, 256L, decayFactor = Some(0.18))
+ metric.name should be(HeapMemoryUsed)
+ metric.value should be(256L)
+ metric.isSmooth should be(true)
+ metric.smoothValue should be(256.0 +- 0.0001)
+ }
+
+ "define an undefined value with a None " in {
+ Metric.create("x", -1, None).isDefined should be(false)
+ Metric.create("x", java.lang.Double.NaN, None).isDefined should be(false)
+ Metric.create("x", Failure(new RuntimeException), None).isDefined should be(false)
+ }
+
+ "recognize whether a metric value is defined" in {
+ defined(0) should be(true)
+ defined(0.0) should be(true)
+ }
+
+ "recognize whether a metric value is not defined" in {
+ defined(-1) should be(false)
+ defined(-1.0) should be(false)
+ defined(Double.NaN) should be(false)
+ }
+ }
+}
+
+@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
+class NodeMetricsSpec extends WordSpec with Matchers {
+
+ val node1 = Address("akka.tcp", "sys", "a", 2554)
+ val node2 = Address("akka.tcp", "sys", "a", 2555)
+
+ "NodeMetrics must" must {
+
+ "return correct result for 2 'same' nodes" in {
+ (NodeMetrics(node1, 0) sameAs NodeMetrics(node1, 0)) should be(true)
+ }
+
+ "return correct result for 2 not 'same' nodes" in {
+ (NodeMetrics(node1, 0) sameAs NodeMetrics(node2, 0)) should be(false)
+ }
+
+ "merge 2 NodeMetrics by most recent" in {
+ val sample1 = NodeMetrics(node1, 1, Set(Metric.create("a", 10, None), Metric.create("b", 20, None)).flatten)
+ val sample2 = NodeMetrics(node1, 2, Set(Metric.create("a", 11, None), Metric.create("c", 30, None)).flatten)
+
+ val merged = sample1 merge sample2
+ merged.timestamp should be(sample2.timestamp)
+ merged.metric("a").map(_.value) should be(Some(11))
+ merged.metric("b").map(_.value) should be(Some(20))
+ merged.metric("c").map(_.value) should be(Some(30))
+ }
+
+ "not merge 2 NodeMetrics if master is more recent" in {
+ val sample1 = NodeMetrics(node1, 1, Set(Metric.create("a", 10, None), Metric.create("b", 20, None)).flatten)
+ val sample2 = NodeMetrics(node1, 0, Set(Metric.create("a", 11, None), Metric.create("c", 30, None)).flatten)
+
+ val merged = sample1 merge sample2 // older and not same
+ merged.timestamp should be(sample1.timestamp)
+ merged.metrics should be(sample1.metrics)
+ }
+
+ "update 2 NodeMetrics by most recent" in {
+ val sample1 = NodeMetrics(node1, 1, Set(Metric.create("a", 10, None), Metric.create("b", 20, None)).flatten)
+ val sample2 = NodeMetrics(node1, 2, Set(Metric.create("a", 11, None), Metric.create("c", 30, None)).flatten)
+
+ val updated = sample1 update sample2
+
+ updated.metrics.size should be(3)
+ updated.timestamp should be(sample2.timestamp)
+ updated.metric("a").map(_.value) should be(Some(11))
+ updated.metric("b").map(_.value) should be(Some(20))
+ updated.metric("c").map(_.value) should be(Some(30))
+ }
+
+ "update 3 NodeMetrics with ewma applied" in {
+ import MetricsConfig._
+
+ val decay = Some(defaultDecayFactor)
+ val epsilon = 0.001
+
+ val sample1 = NodeMetrics(node1, 1, Set(Metric.create("a", 1, decay), Metric.create("b", 4, decay)).flatten)
+ val sample2 = NodeMetrics(node1, 2, Set(Metric.create("a", 2, decay), Metric.create("c", 5, decay)).flatten)
+ val sample3 = NodeMetrics(node1, 3, Set(Metric.create("a", 3, decay), Metric.create("d", 6, decay)).flatten)
+
+ val updated = sample1 update sample2 update sample3
+
+ updated.metrics.size should be(4)
+ updated.timestamp should be(sample3.timestamp)
+
+ updated.metric("a").map(_.value).get should be(3)
+ updated.metric("b").map(_.value).get should be(4)
+ updated.metric("c").map(_.value).get should be(5)
+ updated.metric("d").map(_.value).get should be(6)
+
+ updated.metric("a").map(_.smoothValue).get should be(1.512 +- epsilon)
+ updated.metric("b").map(_.smoothValue).get should be(4.000 +- epsilon)
+ updated.metric("c").map(_.smoothValue).get should be(5.000 +- epsilon)
+ updated.metric("d").map(_.smoothValue).get should be(6.000 +- epsilon)
+ }
+
+ }
+}
+
+@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
+class MetricsGossipSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with ImplicitSender with MetricsCollectorFactory {
+
+ val collector = createMetricsCollector
+
+ "A MetricsGossip" must {
+ "add new NodeMetrics" in {
+ val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics)
+ val m2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), newTimestamp, collector.sample.metrics)
+
+ m1.metrics.size should be > (3)
+ m2.metrics.size should be > (3)
+
+ val g1 = MetricsGossip.empty :+ m1
+ g1.nodes.size should be(1)
+ g1.nodeMetricsFor(m1.address).map(_.metrics) should be(Some(m1.metrics))
+
+ val g2 = g1 :+ m2
+ g2.nodes.size should be(2)
+ g2.nodeMetricsFor(m1.address).map(_.metrics) should be(Some(m1.metrics))
+ g2.nodeMetricsFor(m2.address).map(_.metrics) should be(Some(m2.metrics))
+ }
+
+ "merge peer metrics" in {
+ val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics)
+ val m2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), newTimestamp, collector.sample.metrics)
+
+ val g1 = MetricsGossip.empty :+ m1 :+ m2
+ g1.nodes.size should be(2)
+ val beforeMergeNodes = g1.nodes
+
+ val m2Updated = m2 copy (metrics = collector.sample.metrics, timestamp = m2.timestamp + 1000)
+ val g2 = g1 :+ m2Updated // merge peers
+ g2.nodes.size should be(2)
+ g2.nodeMetricsFor(m1.address).map(_.metrics) should be(Some(m1.metrics))
+ g2.nodeMetricsFor(m2.address).map(_.metrics) should be(Some(m2Updated.metrics))
+ g2.nodes collect { case peer if peer.address == m2.address ⇒ peer.timestamp should be(m2Updated.timestamp) }
+ }
+
+ "merge an existing metric set for a node and update node ring" in {
+ val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics)
+ val m2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), newTimestamp, collector.sample.metrics)
+ val m3 = NodeMetrics(Address("akka.tcp", "sys", "a", 2556), newTimestamp, collector.sample.metrics)
+ val m2Updated = m2 copy (metrics = collector.sample.metrics, timestamp = m2.timestamp + 1000)
+
+ val g1 = MetricsGossip.empty :+ m1 :+ m2
+ val g2 = MetricsGossip.empty :+ m3 :+ m2Updated
+
+ g1.nodes.map(_.address) should be(Set(m1.address, m2.address))
+
+ // should contain nodes 1,3, and the most recent version of 2
+ val mergedGossip = g1 merge g2
+ mergedGossip.nodes.map(_.address) should be(Set(m1.address, m2.address, m3.address))
+ mergedGossip.nodeMetricsFor(m1.address).map(_.metrics) should be(Some(m1.metrics))
+ mergedGossip.nodeMetricsFor(m2.address).map(_.metrics) should be(Some(m2Updated.metrics))
+ mergedGossip.nodeMetricsFor(m3.address).map(_.metrics) should be(Some(m3.metrics))
+ mergedGossip.nodes.foreach(_.metrics.size should be > (3))
+ mergedGossip.nodeMetricsFor(m2.address).map(_.timestamp) should be(Some(m2Updated.timestamp))
+ }
+
+ "get the current NodeMetrics if it exists in the local nodes" in {
+ val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics)
+ val g1 = MetricsGossip.empty :+ m1
+ g1.nodeMetricsFor(m1.address).map(_.metrics) should be(Some(m1.metrics))
+ }
+
+ "remove a node if it is no longer Up" in {
+ val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics)
+ val m2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), newTimestamp, collector.sample.metrics)
+
+ val g1 = MetricsGossip.empty :+ m1 :+ m2
+ g1.nodes.size should be(2)
+ val g2 = g1 remove m1.address
+ g2.nodes.size should be(1)
+ g2.nodes.exists(_.address == m1.address) should be(false)
+ g2.nodeMetricsFor(m1.address) should be(None)
+ g2.nodeMetricsFor(m2.address).map(_.metrics) should be(Some(m2.metrics))
+ }
+
+ "filter nodes" in {
+ val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics)
+ val m2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), newTimestamp, collector.sample.metrics)
+
+ val g1 = MetricsGossip.empty :+ m1 :+ m2
+ g1.nodes.size should be(2)
+ val g2 = g1 filter Set(m2.address)
+ g2.nodes.size should be(1)
+ g2.nodes.exists(_.address == m1.address) should be(false)
+ g2.nodeMetricsFor(m1.address) should be(None)
+ g2.nodeMetricsFor(m2.address).map(_.metrics) should be(Some(m2.metrics))
+ }
+ }
+}
+
+@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
+class MetricValuesSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with MetricsCollectorFactory {
+ import akka.cluster.metrics.StandardMetrics._
+
+ val collector = createMetricsCollector
+
+ val node1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), 1, collector.sample.metrics)
+ val node2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), 1, collector.sample.metrics)
+
+ val nodes: Seq[NodeMetrics] = {
+ (1 to 100).foldLeft(List(node1, node2)) { (nodes, _) ⇒
+ nodes map { n ⇒
+ n.copy(metrics = collector.sample.metrics.flatMap(latest ⇒ n.metrics.collect {
+ case streaming if latest sameAs streaming ⇒ streaming :+ latest
+ }))
+ }
+ }
+ }
+
+ "NodeMetrics.MetricValues" must {
+ "extract expected metrics for load balancing" in {
+ val stream1 = node2.metric(HeapMemoryCommitted).get.value.longValue
+ val stream2 = node1.metric(HeapMemoryUsed).get.value.longValue
+ stream1 should be >= (stream2)
+ }
+
+ "extract expected MetricValue types for load balancing" in {
+ nodes foreach { node ⇒
+ node match {
+ case HeapMemory(address, _, used, committed, _) ⇒
+ used should be > (0L)
+ committed should be >= (used)
+ // Documentation java.lang.management.MemoryUsage says that committed <= max,
+ // but in practice that is not always true (we have seen it happen). Therefore
+ // we don't check the heap max value in this test.
+ // extract is the java api
+ StandardMetrics.extractHeapMemory(node) should not be (null)
+ }
+
+ node match {
+ case Cpu(address, _, systemLoadAverageOption, cpuCombinedOption, cpuStolenOption, processors) ⇒
+ processors should be > (0)
+ if (systemLoadAverageOption.isDefined)
+ systemLoadAverageOption.get should be >= (0.0)
+ if (cpuCombinedOption.isDefined) {
+ cpuCombinedOption.get should be <= (1.0)
+ cpuCombinedOption.get should be >= (0.0)
+ }
+ if (cpuStolenOption.isDefined) {
+ cpuStolenOption.get should be <= (1.0)
+ cpuStolenOption.get should be >= (0.0)
+ }
+ // extract is the java api
+ StandardMetrics.extractCpu(node) should not be (null)
+ }
+ }
+ }
+ }
+
+}
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala
new file mode 100644
index 0000000000..10242064f5
--- /dev/null
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala
@@ -0,0 +1,102 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics
+
+import scala.language.postfixOps
+
+import scala.collection.immutable
+import scala.concurrent.duration._
+import scala.concurrent.Await
+import scala.util.{ Success, Try, Failure }
+
+import akka.actor._
+import akka.testkit._
+import akka.cluster.metrics.StandardMetrics._
+import org.scalatest.WordSpec
+import org.scalatest.Matchers
+
+@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
+class MetricsCollectorSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with ImplicitSender with MetricsCollectorFactory {
+ import system.dispatcher
+
+ val collector = createMetricsCollector
+
+ "Metric must" must {
+
+ "merge 2 metrics that are tracking the same metric" in {
+ for (i ← 1 to 20) {
+ val sample1 = collector.sample.metrics
+ val sample2 = collector.sample.metrics
+ val merged12 = sample2 flatMap (latest ⇒ sample1 collect {
+ case peer if latest sameAs peer ⇒
+ val m = peer :+ latest
+ m.value should be(latest.value)
+ m.isSmooth should be(peer.isSmooth || latest.isSmooth)
+ m
+ })
+
+ val sample3 = collector.sample.metrics
+ val sample4 = collector.sample.metrics
+ val merged34 = sample4 flatMap (latest ⇒ sample3 collect {
+ case peer if latest sameAs peer ⇒
+ val m = peer :+ latest
+ m.value should be(latest.value)
+ m.isSmooth should be(peer.isSmooth || latest.isSmooth)
+ m
+ })
+ }
+ }
+ }
+
+ "MetricsCollector" must {
+
+ "not raise errors when attempting reflective code in apply" in {
+ Try(createMetricsCollector).get should not be null
+ }
+
+ "collect accurate metrics for a node" in {
+ val sample = collector.sample
+ val metrics = sample.metrics.collect { case m ⇒ (m.name, m.value) }
+ val used = metrics collectFirst { case (HeapMemoryUsed, b) ⇒ b }
+ val committed = metrics collectFirst { case (HeapMemoryCommitted, b) ⇒ b }
+ metrics foreach {
+ case (SystemLoadAverage, b) ⇒ b.doubleValue should be >= (0.0)
+ case (Processors, b) ⇒ b.intValue should be >= (0)
+ case (HeapMemoryUsed, b) ⇒ b.longValue should be >= (0L)
+ case (HeapMemoryCommitted, b) ⇒ b.longValue should be > (0L)
+ case (HeapMemoryMax, b) ⇒
+ b.longValue should be > (0L)
+ used.get.longValue should be <= (b.longValue)
+ committed.get.longValue should be <= (b.longValue)
+ case (CpuCombined, b) ⇒
+ b.doubleValue should be <= (1.0)
+ b.doubleValue should be >= (0.0)
+ case (CpuStolen, b) ⇒
+ b.doubleValue should be <= (1.0)
+ b.doubleValue should be >= (0.0)
+
+ }
+ }
+
+ "collect JMX metrics" in {
+ // heap max may be undefined depending on the OS
+ // systemLoadAverage is JMX when SIGAR not present, but
+ // it's not present on all platforms
+ val c = collector.asInstanceOf[JmxMetricsCollector]
+ val heap = c.heapMemoryUsage
+ c.heapUsed(heap).isDefined should be(true)
+ c.heapCommitted(heap).isDefined should be(true)
+ c.processors.isDefined should be(true)
+ }
+
+ "collect 50 node metrics samples in an acceptable duration" taggedAs LongRunningTest in within(10 seconds) {
+ (1 to 50) foreach { _ ⇒
+ val sample = collector.sample
+ sample.metrics.size should be >= (3)
+ Thread.sleep(100)
+ }
+ }
+ }
+}
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala
new file mode 100644
index 0000000000..59c58f90ef
--- /dev/null
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala
@@ -0,0 +1,228 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics
+
+import scala.language.postfixOps
+import java.util.logging.LogManager
+import org.slf4j.bridge.SLF4JBridgeHandler
+import akka.testkit.AkkaSpec
+import akka.actor.ExtendedActorSystem
+import akka.actor.Address
+import akka.cluster.MemberStatus
+import akka.cluster.Member
+import akka.cluster.UniqueAddress
+import akka.cluster.Cluster
+import java.io.Closeable
+import akka.actor.ActorRef
+import akka.actor.Props
+import akka.actor.Actor
+import akka.dispatch.RequiresMessageQueue
+import akka.actor.Deploy
+import akka.dispatch.UnboundedMessageQueueSemantics
+import akka.actor.PoisonPill
+import akka.actor.ActorLogging
+import org.scalatest.mock.MockitoSugar
+import akka.actor.ActorSystem
+import akka.dispatch.Dispatchers
+
+/**
+ * Redirect different logging sources to SLF4J.
+ */
+trait RedirectLogging {
+
+ def redirectLogging(): Unit = {
+ // Redirect JUL to SLF4J.
+ LogManager.getLogManager().reset()
+ SLF4JBridgeHandler.install()
+ }
+
+ redirectLogging()
+
+}
+
+/**
+ * Provide sigar library from `project/target` location.
+ */
+case class SimpleSigarProvider(location: String = "native") extends SigarProvider {
+ def extractFolder = s"${System.getProperty("user.dir")}/target/${location}"
+}
+
+/**
+ * Provide sigar library as static mock.
+ */
+case class MockitoSigarProvider(
+ pid: Long = 123,
+ loadAverage: Array[Double] = Array(0.7, 0.3, 0.1),
+ cpuCombined: Double = 0.5,
+ cpuStolen: Double = 0.2,
+ steps: Int = 5) extends SigarProvider with MockitoSugar {
+
+ import org.hyperic.sigar._
+ import org.mockito.Mockito._
+
+ /** Not used. */
+ override def extractFolder = ???
+
+ /** Generate monotonic array from 0 to value. */
+ def increase(value: Double): Array[Double] = {
+ val delta = value / steps
+ (0 to steps) map { _ * delta } toArray
+ }
+
+ /** Sigar mock instance. */
+ override def verifiedSigarInstance = {
+
+ // Note "thenReturn(0)" invocation is consumed in collector construction.
+
+ val cpuPerc = mock[CpuPerc]
+ when(cpuPerc.getCombined) thenReturn (0, increase(cpuCombined): _*)
+ when(cpuPerc.getStolen) thenReturn (0, increase(cpuStolen): _*)
+
+ val sigar = mock[SigarProxy]
+ when(sigar.getPid) thenReturn pid
+ when(sigar.getLoadAverage) thenReturn loadAverage // Constant.
+ when(sigar.getCpuPerc) thenReturn cpuPerc // Increasing.
+
+ sigar
+ }
+}
+
+/**
+ * Used when testing metrics without full cluster
+ *
+ * TODO change factory after https://github.com/akka/akka/issues/16369
+ */
+trait MetricsCollectorFactory { this: AkkaSpec ⇒
+ import MetricsConfig._
+ import org.hyperic.sigar.Sigar
+
+ private def extendedActorSystem = system.asInstanceOf[ExtendedActorSystem]
+
+ def selfAddress = extendedActorSystem.provider.rootPath.address
+
+ def createMetricsCollector: MetricsCollector =
+ try {
+ new SigarMetricsCollector(selfAddress, defaultDecayFactor, new Sigar())
+ //new SigarMetricsCollector(selfAddress, defaultDecayFactor, SimpleSigarProvider().createSigarInstance)
+ } catch {
+ case e: Throwable ⇒
+ log.warning("Sigar failed to load. Using JMX. Reason: " + e.toString)
+ new JmxMetricsCollector(selfAddress, defaultDecayFactor)
+ }
+
+ /** Create JMX collector. */
+ def collectorJMX: MetricsCollector =
+ new JmxMetricsCollector(selfAddress, defaultDecayFactor)
+
+ /** Create Sigar collector. Rely on java agent injection. */
+ def collectorSigarDefault: MetricsCollector =
+ new SigarMetricsCollector(selfAddress, defaultDecayFactor, new Sigar())
+
+ /** Create Sigar collector. Rely on sigar-loader provisioner. */
+ def collectorSigarProvision: MetricsCollector =
+ new SigarMetricsCollector(selfAddress, defaultDecayFactor, SimpleSigarProvider().createSigarInstance)
+
+ /** Create Sigar collector. Rely on static sigar library mock. */
+ def collectorSigarMockito: MetricsCollector =
+ new SigarMetricsCollector(selfAddress, defaultDecayFactor, MockitoSigarProvider().createSigarInstance)
+
+ def isSigar(collector: MetricsCollector): Boolean = collector.isInstanceOf[SigarMetricsCollector]
+}
+
+/**
+ *
+ */
+class MockitoSigarMetricsCollector(system: ActorSystem)
+ extends SigarMetricsCollector(
+ Address("akka.tcp", system.name),
+ MetricsConfig.defaultDecayFactor,
+ MockitoSigarProvider().createSigarInstance) {
+}
+
+/**
+ * Metrics test configurations.
+ */
+object MetricsConfig {
+
+ val defaultDecayFactor = 2.0 / (1 + 10)
+
+ /** Test w/o cluster, with collection enabled. */
+ val defaultEnabled = """
+ akka.cluster.metrics {
+ collector {
+ enabled = on
+ sample-interval = 1s
+ gossip-interval = 1s
+ }
+ }
+ akka.actor.provider = "akka.remote.RemoteActorRefProvider"
+ """
+
+ /** Test w/o cluster, with collection disabled. */
+ val defaultDisabled = """
+ akka.cluster.metrics {
+ collector {
+ enabled = off
+ }
+ }
+ akka.actor.provider = "akka.remote.RemoteActorRefProvider"
+ """
+
+ /** Test in cluster, with manual collection activation, collector mock, fast. */
+ val clusterSigarMock = """
+ akka.cluster.metrics {
+ periodic-tasks-initial-delay = 100ms
+ collector {
+ enabled = off
+ sample-interval = 200ms
+ gossip-interval = 200ms
+ provider = "akka.cluster.metrics.MockitoSigarMetricsCollector"
+ fallback = false
+ }
+ }
+ akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
+ """
+}
+
+/**
+ * Current cluster metrics, updated periodically via event bus.
+ */
+class ClusterMetricsView(system: ExtendedActorSystem) extends Closeable {
+
+ val extension = ClusterMetricsExtension(system)
+
+ /** Current cluster metrics, updated periodically via event bus. */
+ @volatile
+ private var currentMetricsSet: Set[NodeMetrics] = Set.empty
+
+ /** Collected cluster metrics history. */
+ @volatile
+ private var collectedMetricsList: List[Set[NodeMetrics]] = List.empty
+
+ /** Create actor that subscribes to the cluster eventBus to update current read view state. */
+ private val eventBusListener: ActorRef = {
+ system.systemActorOf(Props(new Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
+ override def preStart(): Unit = extension.subscribe(self)
+ override def postStop(): Unit = extension.unsubscribe(self)
+ def receive = {
+ case ClusterMetricsChanged(nodes) ⇒
+ currentMetricsSet = nodes
+ collectedMetricsList = nodes :: collectedMetricsList
+ case _ ⇒
+ // Ignore.
+ }
+ }).withDispatcher(Dispatchers.DefaultDispatcherId).withDeploy(Deploy.local), name = "metrics-event-bus-listener")
+ }
+
+ /** Current cluster metrics. */
+ def clusterMetrics: Set[NodeMetrics] = currentMetricsSet
+
+ /** Collected cluster metrics history. */
+ def metricsHistory: List[Set[NodeMetrics]] = collectedMetricsList
+
+ /** Unsubscribe from cluster events. */
+ def close(): Unit = eventBusListener ! PoisonPill
+
+}
diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala
new file mode 100644
index 0000000000..256413912c
--- /dev/null
+++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala
@@ -0,0 +1,60 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka.cluster.metrics.protobuf
+
+import akka.actor.{ ExtendedActorSystem, Address }
+import collection.immutable.SortedSet
+import akka.testkit.AkkaSpec
+import java.math.BigInteger
+import akka.cluster.MemberStatus
+import akka.cluster.metrics.MetricsGossip
+import akka.cluster.metrics.NodeMetrics
+import akka.cluster.metrics.Metric
+import akka.cluster.metrics.EWMA
+import akka.cluster.TestMember
+import akka.cluster.metrics.MetricsGossipEnvelope
+
+@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
+class MessageSerializerSpec extends AkkaSpec(
+ "akka.actor.provider = akka.cluster.ClusterActorRefProvider") {
+
+ val serializer = new MessageSerializer(system.asInstanceOf[ExtendedActorSystem])
+
+ def checkSerialization(obj: AnyRef): Unit = {
+ val blob = serializer.toBinary(obj)
+ val ref = serializer.fromBinary(blob, obj.getClass)
+ obj match {
+ case _ ⇒
+ ref should be(obj)
+ }
+
+ }
+
+ import MemberStatus._
+
+ val a1 = TestMember(Address("akka.tcp", "sys", "a", 2552), Joining, Set.empty)
+ val b1 = TestMember(Address("akka.tcp", "sys", "b", 2552), Up, Set("r1"))
+ val c1 = TestMember(Address("akka.tcp", "sys", "c", 2552), Leaving, Set("r2"))
+ val d1 = TestMember(Address("akka.tcp", "sys", "d", 2552), Exiting, Set("r1", "r2"))
+ val e1 = TestMember(Address("akka.tcp", "sys", "e", 2552), Down, Set("r3"))
+ val f1 = TestMember(Address("akka.tcp", "sys", "f", 2552), Removed, Set("r2", "r3"))
+
+ "ClusterMessages" must {
+
+ "be serializable" in {
+
+ val metricsGossip = MetricsGossip(Set(NodeMetrics(a1.address, 4711, Set(Metric("foo", 1.2, None))),
+ NodeMetrics(b1.address, 4712, Set(Metric("foo", 2.1, Some(EWMA(value = 100.0, alpha = 0.18))),
+ Metric("bar1", Double.MinPositiveValue, None),
+ Metric("bar2", Float.MaxValue, None),
+ Metric("bar3", Int.MaxValue, None),
+ Metric("bar4", Long.MaxValue, None),
+ Metric("bar5", BigInt(Long.MaxValue), None)))))
+
+ checkSerialization(MetricsGossipEnvelope(a1.address, metricsGossip, true))
+
+ }
+ }
+}
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala
index 0076821323..78d3218939 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala
@@ -3,6 +3,8 @@
*/
package akka.cluster
+// TODO remove metrics
+
import language.postfixOps
import scala.collection.immutable
import scala.collection.immutable.VectorBuilder
@@ -190,6 +192,7 @@ object ClusterEvent {
/**
* Current snapshot of cluster node metrics. Published to subscribers.
*/
+ @deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
final case class ClusterMetricsChanged(nodeMetrics: Set[NodeMetrics]) extends ClusterDomainEvent {
/**
* Java API
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala
index 73385c3a60..4f7b7d57f5 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala
@@ -4,6 +4,8 @@
package akka.cluster
+// TODO remove metrics
+
import java.io.Closeable
import java.lang.System.{ currentTimeMillis ⇒ newTimestamp }
import java.lang.management.{ OperatingSystemMXBean, MemoryMXBean, ManagementFactory }
@@ -302,6 +304,7 @@ private[cluster] final case class EWMA(value: Double, alpha: Double) {
* averages (e.g. system load average) or finite (e.g. as number of processors), are not trended.
*/
@SerialVersionUID(1L)
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
final case class Metric private[cluster] (name: String, value: Number, private[cluster] val average: Option[EWMA])
extends MetricNumericConverter {
@@ -348,6 +351,7 @@ final case class Metric private[cluster] (name: String, value: Number, private[c
/**
* Factory for creating valid Metric instances.
*/
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
object Metric extends MetricNumericConverter {
/**
@@ -385,6 +389,7 @@ object Metric extends MetricNumericConverter {
* @param metrics the set of sampled [[akka.actor.Metric]]
*/
@SerialVersionUID(1L)
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
final case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Metric] = Set.empty[Metric]) {
/**
@@ -426,6 +431,7 @@ final case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Met
* The following extractors and data structures makes it easy to consume the
* [[akka.cluster.NodeMetrics]] in for example load balancers.
*/
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
object StandardMetrics {
// Constants for the heap related Metric names
@@ -577,6 +583,7 @@ private[cluster] trait MetricNumericConverter {
/**
* Implementations of cluster system metrics extends this trait.
*/
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
trait MetricsCollector extends Closeable {
/**
* Samples and collects new data points.
@@ -592,6 +599,7 @@ trait MetricsCollector extends Closeable {
* @param address The [[akka.actor.Address]] of the node being sampled
* @param decay how quickly the exponential weighting of past data is decayed
*/
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
class JmxMetricsCollector(address: Address, decayFactor: Double) extends MetricsCollector {
import StandardMetrics._
@@ -692,6 +700,7 @@ class JmxMetricsCollector(address: Address, decayFactor: Double) extends Metrics
* @param decay how quickly the exponential weighting of past data is decayed
* @param sigar the org.hyperic.Sigar instance
*/
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: AnyRef)
extends JmxMetricsCollector(address, decayFactor) {
diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala
index 63220422ff..140e698cc6 100644
--- a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala
@@ -4,6 +4,8 @@
package akka.cluster
+// TODO remove metrics
+
import java.io.Closeable
import scala.collection.immutable
import akka.actor.{ Actor, ActorRef, ActorSystemImpl, Address, Props }
diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancing.scala b/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancing.scala
index 38ea934e26..b665598979 100644
--- a/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancing.scala
+++ b/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancing.scala
@@ -3,6 +3,8 @@
*/
package akka.cluster.routing
+// TODO remove metrics
+
import java.util.Arrays
import java.util.concurrent.atomic.AtomicReference
@@ -40,6 +42,7 @@ import akka.routing._
* @param metricsSelector decides what probability to use for selecting a routee, based
* on remaining capacity as indicated by the node metrics
*/
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem, metricsSelector: MetricsSelector = MixMetricsSelector)
extends RoutingLogic with NoSerializationVerificationNeeded {
@@ -126,6 +129,7 @@ final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem, metricsS
* supervision, death watch and router management messages
*/
@SerialVersionUID(1L)
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
final case class AdaptiveLoadBalancingPool(
metricsSelector: MetricsSelector = MixMetricsSelector,
val nrOfInstances: Int = 0,
@@ -206,6 +210,7 @@ final case class AdaptiveLoadBalancingPool(
* router management messages
*/
@SerialVersionUID(1L)
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
final case class AdaptiveLoadBalancingGroup(
metricsSelector: MetricsSelector = MixMetricsSelector,
paths: immutable.Iterable[String] = Nil,
@@ -246,6 +251,7 @@ final case class AdaptiveLoadBalancingGroup(
* Low heap capacity => small weight.
*/
@SerialVersionUID(1L)
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
case object HeapMetricsSelector extends CapacityMetricsSelector {
/**
* Java API: get the singleton instance
@@ -270,6 +276,7 @@ case object HeapMetricsSelector extends CapacityMetricsSelector {
* Low cpu capacity => small weight.
*/
@SerialVersionUID(1L)
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
case object CpuMetricsSelector extends CapacityMetricsSelector {
/**
* Java API: get the singleton instance
@@ -293,6 +300,7 @@ case object CpuMetricsSelector extends CapacityMetricsSelector {
* Low load average capacity => small weight.
*/
@SerialVersionUID(1L)
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
case object SystemLoadAverageMetricsSelector extends CapacityMetricsSelector {
/**
* Java API: get the singleton instance
@@ -313,6 +321,7 @@ case object SystemLoadAverageMetricsSelector extends CapacityMetricsSelector {
* [akka.cluster.routing.CpuMetricsSelector], and [akka.cluster.routing.SystemLoadAverageMetricsSelector]
*/
@SerialVersionUID(1L)
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
object MixMetricsSelector extends MixMetricsSelectorBase(
Vector(HeapMetricsSelector, CpuMetricsSelector, SystemLoadAverageMetricsSelector)) {
@@ -328,6 +337,7 @@ object MixMetricsSelector extends MixMetricsSelectorBase(
* [akka.cluster.routing.CpuMetricsSelector], and [akka.cluster.routing.SystemLoadAverageMetricsSelector]
*/
@SerialVersionUID(1L)
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
final case class MixMetricsSelector(
selectors: immutable.IndexedSeq[CapacityMetricsSelector])
extends MixMetricsSelectorBase(selectors)
@@ -336,6 +346,7 @@ final case class MixMetricsSelector(
* Base class for MetricsSelector that combines other selectors and aggregates their capacity.
*/
@SerialVersionUID(1L)
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMetricsSelector])
extends CapacityMetricsSelector {
@@ -358,6 +369,7 @@ abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMe
}
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
object MetricsSelector {
def fromConfig(config: Config, dynamicAccess: DynamicAccess) =
config.getString("metrics-selector") match {
@@ -380,6 +392,7 @@ object MetricsSelector {
* A MetricsSelector is responsible for producing weights from the node metrics.
*/
@SerialVersionUID(1L)
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
trait MetricsSelector extends Serializable {
/**
* The weights per address, based on the the nodeMetrics.
@@ -392,6 +405,7 @@ trait MetricsSelector extends Serializable {
* The weights are typically proportional to the remaining capacity.
*/
@SerialVersionUID(1L)
+@deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4")
abstract class CapacityMetricsSelector extends MetricsSelector {
/**
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDisabledSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDisabledSpec.scala
index 242265bb36..efa4335e56 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDisabledSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDisabledSpec.scala
@@ -4,6 +4,8 @@
package akka.cluster
+// TODO remove metrics
+
import akka.remote.testkit.{ MultiNodeSpec, MultiNodeConfig }
import com.typesafe.config.ConfigFactory
import akka.testkit.LongRunningTest
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala
index 250f9ac550..294d7ca0bc 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala
@@ -4,6 +4,8 @@
package akka.cluster
+// TODO remove metrics
+
import scala.language.postfixOps
import scala.concurrent.duration._
import com.typesafe.config.ConfigFactory
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala
index 8a908dab91..8c4ffd4ecd 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala
@@ -3,6 +3,8 @@
*/
package akka.cluster
+// TODO remove metrics
+
import language.implicitConversions
import org.scalatest.{ Suite, Outcome, Canceled }
import org.scalatest.exceptions.TestCanceledException
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala
index 114beb6a6b..cc8d60c8bd 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala
@@ -3,6 +3,9 @@
*/
package akka.cluster
+// TODO remove metrics
+// FIXME this test is not migrated to metrics extension
+
import language.postfixOps
import scala.annotation.tailrec
import scala.collection.immutable
diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala
index 332840d30f..3d1036c0ad 100644
--- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala
+++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala
@@ -4,6 +4,8 @@
package akka.cluster.routing
+// TODO remove metrics
+
import language.postfixOps
import java.lang.management.ManagementFactory
import scala.concurrent.Await
diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala
index 104eeb195c..453654fb6f 100644
--- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala
@@ -47,6 +47,7 @@ class ClusterConfigSpec extends AkkaSpec {
ReduceGossipDifferentViewProbability should be(400)
SchedulerTickDuration should be(33 millis)
SchedulerTicksPerWheel should be(512)
+ // TODO remove metrics
MetricsEnabled should be(true)
MetricsCollectorClass should be(classOf[SigarMetricsCollector].getName)
MetricsInterval should be(3 seconds)
diff --git a/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala b/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala
index d6e2361b9a..b5b151dadd 100644
--- a/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala
@@ -4,6 +4,8 @@
package akka.cluster
+// TODO remove metrics
+
import language.postfixOps
import scala.concurrent.duration._
import akka.testkit.{ LongRunningTest, AkkaSpec }
diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricNumericConverterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricNumericConverterSpec.scala
index 8b8d6039d6..318ac0efd3 100644
--- a/akka-cluster/src/test/scala/akka/cluster/MetricNumericConverterSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/MetricNumericConverterSpec.scala
@@ -4,6 +4,8 @@
package akka.cluster
+// TODO remove metrics
+
import org.scalatest.WordSpec
import org.scalatest.Matchers
import akka.cluster.StandardMetrics._
diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricValuesSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricValuesSpec.scala
index 48b5c117b4..740fde91c8 100644
--- a/akka-cluster/src/test/scala/akka/cluster/MetricValuesSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/MetricValuesSpec.scala
@@ -4,6 +4,8 @@
package akka.cluster
+// TODO remove metrics
+
import scala.util.Try
import akka.actor.Address
import akka.testkit.AkkaSpec
diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala
index d5925540a2..12940f5be8 100644
--- a/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala
@@ -5,6 +5,8 @@
package akka.cluster
+// TODO remove metrics
+
import scala.language.postfixOps
import scala.collection.immutable
diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricsGossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricsGossipSpec.scala
index b1069019c0..fce30c1f41 100644
--- a/akka-cluster/src/test/scala/akka/cluster/MetricsGossipSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/MetricsGossipSpec.scala
@@ -4,6 +4,8 @@
package akka.cluster
+// TODO remove metrics
+
import scala.concurrent.duration._
import akka.testkit.{ ImplicitSender, AkkaSpec }
diff --git a/akka-cluster/src/test/scala/akka/cluster/NodeMetricsSpec.scala b/akka-cluster/src/test/scala/akka/cluster/NodeMetricsSpec.scala
index 7d2fa4d87f..9f4b5efe55 100644
--- a/akka-cluster/src/test/scala/akka/cluster/NodeMetricsSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/NodeMetricsSpec.scala
@@ -4,6 +4,8 @@
package akka.cluster
+// TODO remove metrics
+
import org.scalatest.WordSpec
import org.scalatest.Matchers
import akka.actor.Address
diff --git a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala
index fa64afaeb5..ffb37f9322 100644
--- a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala
@@ -3,6 +3,8 @@
*/
package akka.cluster.protobuf
+// TODO remove metrics
+
import akka.cluster._
import akka.actor.{ ExtendedActorSystem, Address }
import collection.immutable.SortedSet
diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala
index a3b952bbaf..4a8329804b 100644
--- a/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala
+++ b/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala
@@ -4,6 +4,8 @@
package akka.cluster.routing
+// TODO remove metrics
+
import org.scalatest.WordSpec
import org.scalatest.Matchers
diff --git a/akka-docs/rst/java/cluster-metrics.rst b/akka-docs/rst/java/cluster-metrics.rst
new file mode 100644
index 0000000000..7e4b29356c
--- /dev/null
+++ b/akka-docs/rst/java/cluster-metrics.rst
@@ -0,0 +1,162 @@
+
+.. _cluster_metrics_java:
+
+Cluster Metrics Extension
+=========================
+
+Introduction
+------------
+
+The member nodes of the cluster can collect system health metrics and publish that to other cluster nodes
+and to the registered subscribers on the system event bus with the help of Cluster Metrics Extension.
+
+Cluster metrics information is primarily used for load-balancing routers,
+and can also be used to implement advanced metrics-based node life cycles,
+such as "Node Let-it-crash" when CPU steal time becomes excessive.
+
+Cluster Metrics Extension is a separate akka module delivered in ``akka-cluster-metrics`` jar.
+
+To enable usage of the extension you need to add the following dependency to your project:
+::
+
+
+ com.typesafe.akka
+ akka-cluster-metrics_@binVersion@
+ @version@
+
+
+and add the following configuration stanza to your ``application.conf``
+::
+
+ akka.extensions = [ "akka.cluster.metrics.ClusterMetricsExtension" ]
+
+Make sure to disable legacy metrics in akka-cluster: ``akka.cluster.metrics.enabled=off``,
+since it is still enabled in akka-cluster by default (for compatibility with past releases).
+
+Metrics Collector
+-----------------
+
+Metrics collection is delegated to an implementation of ``akka.cluster.metrics.MetricsCollector``.
+
+Different collector implementations provide different subsets of metrics published to the cluster.
+Certain message routing and let-it-crash functions may not work when Sigar is not provisioned.
+
+Cluster metrics extension comes with two built-in collector implementations:
+
+#. ``akka.cluster.metrics.SigarMetricsCollector``, which requires Sigar provisioning, and is more rich/precise
+#. ``akka.cluster.metrics.JmxMetricsCollector``, which is used as fall back, and is less rich/precise
+
+You can also plug-in your own metrics collector implementation.
+
+By default, metrics extension will use collector provider fall back and will try to load them in this order:
+
+#. configured user-provided collector
+#. built-in ``akka.cluster.metrics.SigarMetricsCollector``
+#. and finally ``akka.cluster.metrics.JmxMetricsCollector``
+
+Metrics Events
+--------------
+
+Metrics extension periodically publishes current snapshot of the cluster metrics to the node system event bus.
+
+The publication period is controlled by the ``akka.cluster.metrics.collector.sample-period`` setting.
+
+The payload of the ``akka.cluster.metris.ClusterMetricsChanged`` event will contain
+latest metrics of the node as well as other cluster member nodes metrics gossip
+which was received during the collector sample period.
+
+You can subscribe your metrics listener actors to these events in order to implement custom node lifecycle
+::
+
+ ClusterMetricsExtension.get(system).subscribe(metricsListenerActor);
+
+Hyperic Sigar Provisioning
+--------------------------
+
+Both user-provided and built-in metrics collectors can optionally use `Hyperic Sigar `_
+for a wider and more accurate range of metrics compared to what can be retrieved from ordinary JMX MBeans.
+
+Sigar is using a native o/s library, and requires library provisioning, i.e.
+deployment, extraction and loading of the o/s native library into JVM at runtime.
+
+User can provision Sigar classes and native library in one of the following ways:
+
+#. Use `Kamon sigar-loader `_ as a project dependency for the user project.
+ Metrics extension will extract and load sigar library on demand with help of Kamon sigar provisioner.
+#. Use `Kamon sigar-loader `_ as java agent: ``java -javaagent:/path/to/sigar-loader.jar``.
+ Kamon sigar loader agent will extract and load sigar library during JVM start.
+#. Place ``sigar.jar`` on the ``classpath`` and Sigar native library for the o/s on the ``java.library.path``.
+ User is required to manage both project dependency and library deployment manually.
+
+To enable usage of Sigar you can add the following dependency to the user project
+::
+
+
+ io.kamon
+ sigar-loader
+ @sigarLoaderVersion@
+
+
+You can download Kamon sigar-loader from `Maven Central `_
+
+Adaptive Load Balancing
+-----------------------
+
+The ``AdaptiveLoadBalancingPool`` / ``AdaptiveLoadBalancingGroup`` performs load balancing of messages to cluster nodes based on the cluster metrics data.
+It uses random selection of routees with probabilities derived from the remaining capacity of the corresponding node.
+It can be configured to use a specific MetricsSelector to produce the probabilities, a.k.a. weights:
+
+* ``heap`` / ``HeapMetricsSelector`` - Used and max JVM heap memory. Weights based on remaining heap capacity; (max - used) / max
+* ``load`` / ``SystemLoadAverageMetricsSelector`` - System load average for the past 1 minute, corresponding value can be found in ``top`` of Linux systems. The system is possibly nearing a bottleneck if the system load average is nearing number of cpus/cores. Weights based on remaining load capacity; 1 - (load / processors)
+* ``cpu`` / ``CpuMetricsSelector`` - CPU utilization in percentage, sum of User + Sys + Nice + Wait. Weights based on remaining cpu capacity; 1 - utilization
+* ``mix`` / ``MixMetricsSelector`` - Combines heap, cpu and load. Weights based on mean of remaining capacity of the combined selectors.
+* Any custom implementation of ``akka.cluster.metrics.MetricsSelector``
+
+The collected metrics values are smoothed with `exponential weighted moving average `_. In the :ref:`cluster_configuration_java` you can adjust how quickly past data is decayed compared to new data.
+
+Let's take a look at this router in action. What can be more demanding than calculating factorials?
+
+The backend worker that performs the factorial calculation:
+
+.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackend.java#backend
+
+The frontend that receives user jobs and delegates to the backends via the router:
+
+.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontend.java#frontend
+
+
+As you can see, the router is defined in the same way as other routers, and in this case it is configured as follows:
+
+.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf#adaptive-router
+
+It is only ``router`` type and the ``metrics-selector`` parameter that is specific to this router,
+other things work in the same way as other routers.
+
+The same type of router could also have been defined in code:
+
+.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java#router-lookup-in-code
+
+.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java#router-deploy-in-code
+
+The `Typesafe Activator `_ tutorial named
+`Akka Cluster Samples with Java `_.
+contains the full source code and instructions of how to run the **Adaptive Load Balancing** sample.
+
+Subscribe to Metrics Events
+---------------------------
+
+It is possible to subscribe to the metrics events directly to implement other functionality.
+
+.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java#metrics-listener
+
+Custom Metrics Collector
+------------------------
+
+Metrics collection is delegated to the implementation of ``akka.cluster.metrics.MetricsCollector``
+
+You can plug-in your own metrics collector instead of built-in
+``akka.cluster.metrics.SigarMetricsCollector`` or ``akka.cluster.metrics.JmxMetricsCollector``.
+
+Look at those two implementations for inspiration.
+
+Custom metrics collector implementation class must be specified in the :ref:`cluster_metrics_configuration_java`.
diff --git a/akka-docs/rst/java/cluster-usage.rst b/akka-docs/rst/java/cluster-usage.rst
index 385c1cdf41..148a03bf0b 100644
--- a/akka-docs/rst/java/cluster-usage.rst
+++ b/akka-docs/rst/java/cluster-usage.rst
@@ -536,81 +536,9 @@ contains the full source code and instructions of how to run the **Router Exampl
Cluster Metrics
^^^^^^^^^^^^^^^
-The member nodes of the cluster collects system health metrics and publishes that to other nodes and to
-registered subscribers. This information is primarily used for load-balancing routers.
+The member nodes of the cluster can collect system health metrics and publish that to other cluster nodes
+and to the registered subscribers on the system event bus with the help of :doc:`cluster-metrics`.
-Hyperic Sigar
--------------
-
-The built-in metrics is gathered from JMX MBeans, and optionally you can use `Hyperic Sigar `_
-for a wider and more accurate range of metrics compared to what can be retrieved from ordinary MBeans.
-Sigar is using a native OS library. To enable usage of Sigar you need to add the directory of the native library to
-``-Djava.libarary.path=`` add the following dependency::
-
-
- org.fusesource
- sigar
- @sigarVersion@
-
-
-Download the native Sigar libraries from `Maven Central `_
-
-Adaptive Load Balancing
------------------------
-
-The ``AdaptiveLoadBalancingPool`` / ``AdaptiveLoadBalancingGroup`` performs load balancing of messages to cluster nodes based on the cluster metrics data.
-It uses random selection of routees with probabilities derived from the remaining capacity of the corresponding node.
-It can be configured to use a specific MetricsSelector to produce the probabilities, a.k.a. weights:
-
-* ``heap`` / ``HeapMetricsSelector`` - Used and max JVM heap memory. Weights based on remaining heap capacity; (max - used) / max
-* ``load`` / ``SystemLoadAverageMetricsSelector`` - System load average for the past 1 minute, corresponding value can be found in ``top`` of Linux systems. The system is possibly nearing a bottleneck if the system load average is nearing number of cpus/cores. Weights based on remaining load capacity; 1 - (load / processors)
-* ``cpu`` / ``CpuMetricsSelector`` - CPU utilization in percentage, sum of User + Sys + Nice + Wait. Weights based on remaining cpu capacity; 1 - utilization
-* ``mix`` / ``MixMetricsSelector`` - Combines heap, cpu and load. Weights based on mean of remaining capacity of the combined selectors.
-* Any custom implementation of ``akka.cluster.routing.MetricsSelector``
-
-The collected metrics values are smoothed with `exponential weighted moving average `_. In the :ref:`cluster_configuration_java` you can adjust how quickly past data is decayed compared to new data.
-
-Let's take a look at this router in action. What can be more demanding than calculating factorials?
-
-The backend worker that performs the factorial calculation:
-
-.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialBackend.java#backend
-
-The frontend that receives user jobs and delegates to the backends via the router:
-
-.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialFrontend.java#frontend
-
-
-As you can see, the router is defined in the same way as other routers, and in this case it is configured as follows:
-
-.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf#adaptive-router
-
-It is only router type ``adaptive`` and the ``metrics-selector`` that is specific to this router, other things work
-in the same way as other routers.
-
-The same type of router could also have been defined in code:
-
-.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java#router-lookup-in-code
-
-.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java#router-deploy-in-code
-
-The `Typesafe Activator `_ tutorial named
-`Akka Cluster Samples with Java `_.
-contains the full source code and instructions of how to run the **Adaptive Load Balancing** sample.
-
-Subscribe to Metrics Events
----------------------------
-
-It is possible to subscribe to the metrics events directly to implement other functionality.
-
-.. includecode:: ../../../akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java#metrics-listener
-
-Custom Metrics Collector
-------------------------
-
-You can plug-in your own metrics collector instead of
-``akka.cluster.SigarMetricsCollector`` or ``akka.cluster.JmxMetricsCollector``. Look at those two implementations
-for inspiration. The implementation class can be defined in the :ref:`cluster_configuration_java`.
.. _cluster_jmx_java:
diff --git a/akka-docs/rst/java/index-network.rst b/akka-docs/rst/java/index-network.rst
index 6f26d9d457..1e313da6f9 100644
--- a/akka-docs/rst/java/index-network.rst
+++ b/akka-docs/rst/java/index-network.rst
@@ -6,6 +6,7 @@ Networking
../common/cluster
cluster-usage
+ cluster-metrics
remoting
serialization
io
diff --git a/akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst b/akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst
index 2a12828f59..1764dd852a 100644
--- a/akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst
+++ b/akka-docs/rst/project/migration-guide-2.3.x-2.4.x.rst
@@ -165,3 +165,17 @@ Secure Cookies
`Secure cookies` feature was deprecated.
+New Cluster Metrics Extension
+=============================
+Previously, cluster metrics functionality was located in the ``akka-cluster`` jar.
+Now it is split out and moved into a separate akka module: ``akka-cluster-metrics`` jar.
+The module comes with few enhancements, such as use of Kamon sigar-loader
+for native library provisioning as well as use of statistical averaging of metrics data.
+Note that both old and new metrics configuration entries in the ``reference.conf``
+are still in the same name space ``akka.cluster.metrics`` but are not compatible.
+Make sure to disable legacy metrics in akka-cluster: ``akka.cluster.metrics.enabled=off``,
+since it is still enabled in akka-cluster by default (for compatibility with past releases).
+Router configuration entries have also changed for the module, they use prefix ``cluster-metrics-``:
+``cluster-metrics-adaptive-pool`` and ``cluster-metrics-adaptive-group``
+Metrics extension classes and objects are located in the new package ``akka.cluster.metrics``.
+Please see :ref:`Scala `, :ref:`Java ` for more information.
diff --git a/akka-docs/rst/scala/cluster-metrics.rst b/akka-docs/rst/scala/cluster-metrics.rst
new file mode 100644
index 0000000000..2fbab40989
--- /dev/null
+++ b/akka-docs/rst/scala/cluster-metrics.rst
@@ -0,0 +1,155 @@
+
+.. _cluster_metrics_scala:
+
+Cluster Metrics Extension
+=========================
+
+Introduction
+------------
+
+The member nodes of the cluster can collect system health metrics and publish that to other cluster nodes
+and to the registered subscribers on the system event bus with the help of Cluster Metrics Extension.
+
+Cluster metrics information is primarily used for load-balancing routers,
+and can also be used to implement advanced metrics-based node life cycles,
+such as "Node Let-it-crash" when CPU steal time becomes excessive.
+
+Cluster Metrics Extension is a separate akka module delivered in ``akka-cluster-metrics`` jar.
+
+To enable usage of the extension you need to add the following dependency to your project:
+::
+
+ "com.typesafe.akka" % "akka-cluster-metrics_@binVersion@" % "@version@"
+
+and add the following configuration stanza to your ``application.conf``
+::
+
+ akka.extensions = [ "akka.cluster.metrics.ClusterMetricsExtension" ]
+
+Make sure to disable legacy metrics in akka-cluster: ``akka.cluster.metrics.enabled=off``,
+since it is still enabled in akka-cluster by default (for compatibility with past releases).
+
+Metrics Collector
+-----------------
+
+Metrics collection is delegated to an implementation of ``akka.cluster.metrics.MetricsCollector``.
+
+Different collector implementations provide different subsets of metrics published to the cluster.
+Certain message routing and let-it-crash functions may not work when Sigar is not provisioned.
+
+Cluster metrics extension comes with two built-in collector implementations:
+
+#. ``akka.cluster.metrics.SigarMetricsCollector``, which requires Sigar provisioning, and is more rich/precise
+#. ``akka.cluster.metrics.JmxMetricsCollector``, which is used as fall back, and is less rich/precise
+
+You can also plug-in your own metrics collector implementation.
+
+By default, metrics extension will use collector provider fall back and will try to load them in this order:
+
+#. configured user-provided collector
+#. built-in ``akka.cluster.metrics.SigarMetricsCollector``
+#. and finally ``akka.cluster.metrics.JmxMetricsCollector``
+
+Metrics Events
+--------------
+
+Metrics extension periodically publishes current snapshot of the cluster metrics to the node system event bus.
+
+The publication period is controlled by the ``akka.cluster.metrics.collector.sample-period`` setting.
+
+The payload of the ``akka.cluster.metris.ClusterMetricsChanged`` event will contain
+latest metrics of the node as well as other cluster member nodes metrics gossip
+which was received during the collector sample period.
+
+You can subscribe your metrics listener actors to these events in order to implement custom node lifecycle
+::
+
+ ClusterMetricsExtension(system).subscribe(metricsListenerActor)
+
+Hyperic Sigar Provisioning
+--------------------------
+
+Both user-provided and built-in metrics collectors can optionally use `Hyperic Sigar `_
+for a wider and more accurate range of metrics compared to what can be retrieved from ordinary JMX MBeans.
+
+Sigar is using a native o/s library, and requires library provisioning, i.e.
+deployment, extraction and loading of the o/s native library into JVM at runtime.
+
+User can provision Sigar classes and native library in one of the following ways:
+
+#. Use `Kamon sigar-loader `_ as a project dependency for the user project.
+ Metrics extension will extract and load sigar library on demand with help of Kamon sigar provisioner.
+#. Use `Kamon sigar-loader `_ as java agent: ``java -javaagent:/path/to/sigar-loader.jar``.
+ Kamon sigar loader agent will extract and load sigar library during JVM start.
+#. Place ``sigar.jar`` on the ``classpath`` and Sigar native library for the o/s on the ``java.library.path``.
+ User is required to manage both project dependency and library deployment manually.
+
+To enable usage of Sigar you can add the following dependency to the user project
+::
+
+ "io.kamon" % "sigar-loader" % "@sigarLoaderVersion@"
+
+You can download Kamon sigar-loader from `Maven Central `_
+
+
+Adaptive Load Balancing
+-----------------------
+
+The ``AdaptiveLoadBalancingPool`` / ``AdaptiveLoadBalancingGroup`` performs load balancing of messages to cluster nodes based on the cluster metrics data.
+It uses random selection of routees with probabilities derived from the remaining capacity of the corresponding node.
+It can be configured to use a specific MetricsSelector to produce the probabilities, a.k.a. weights:
+
+* ``heap`` / ``HeapMetricsSelector`` - Used and max JVM heap memory. Weights based on remaining heap capacity; (max - used) / max
+* ``load`` / ``SystemLoadAverageMetricsSelector`` - System load average for the past 1 minute, corresponding value can be found in ``top`` of Linux systems. The system is possibly nearing a bottleneck if the system load average is nearing number of cpus/cores. Weights based on remaining load capacity; 1 - (load / processors)
+* ``cpu`` / ``CpuMetricsSelector`` - CPU utilization in percentage, sum of User + Sys + Nice + Wait. Weights based on remaining cpu capacity; 1 - utilization
+* ``mix`` / ``MixMetricsSelector`` - Combines heap, cpu and load. Weights based on mean of remaining capacity of the combined selectors.
+* Any custom implementation of ``akka.cluster.metrics.MetricsSelector``
+
+The collected metrics values are smoothed with `exponential weighted moving average `_. In the :ref:`cluster_configuration_scala` you can adjust how quickly past data is decayed compared to new data.
+
+Let's take a look at this router in action. What can be more demanding than calculating factorials?
+
+The backend worker that performs the factorial calculation:
+
+.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialBackend.scala#backend
+
+The frontend that receives user jobs and delegates to the backends via the router:
+
+.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialFrontend.scala#frontend
+
+
+As you can see, the router is defined in the same way as other routers, and in this case it is configured as follows:
+
+.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf#adaptive-router
+
+It is only ``router`` type and the ``metrics-selector`` parameter that is specific to this router,
+other things work in the same way as other routers.
+
+The same type of router could also have been defined in code:
+
+.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala#router-lookup-in-code
+
+.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala#router-deploy-in-code
+
+The `Typesafe Activator `_ tutorial named
+`Akka Cluster Samples with Scala `_.
+contains the full source code and instructions of how to run the **Adaptive Load Balancing** sample.
+
+Subscribe to Metrics Events
+---------------------------
+
+It is possible to subscribe to the metrics events directly to implement other functionality.
+
+.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala#metrics-listener
+
+Custom Metrics Collector
+------------------------
+
+Metrics collection is delegated to the implementation of ``akka.cluster.metrics.MetricsCollector``
+
+You can plug-in your own metrics collector instead of built-in
+``akka.cluster.metrics.SigarMetricsCollector`` or ``akka.cluster.metrics.JmxMetricsCollector``.
+
+Look at those two implementations for inspiration.
+
+Custom metrics collector implementation class must be specified in the :ref:`cluster_metrics_configuration_scala`.
diff --git a/akka-docs/rst/scala/cluster-usage.rst b/akka-docs/rst/scala/cluster-usage.rst
index 195257c0a7..3e44ea31d6 100644
--- a/akka-docs/rst/scala/cluster-usage.rst
+++ b/akka-docs/rst/scala/cluster-usage.rst
@@ -532,77 +532,9 @@ contains the full source code and instructions of how to run the **Router Exampl
Cluster Metrics
^^^^^^^^^^^^^^^
-The member nodes of the cluster collects system health metrics and publishes that to other nodes and to
-registered subscribers. This information is primarily used for load-balancing routers.
+The member nodes of the cluster can collect system health metrics and publish that to other cluster nodes
+and to the registered subscribers on the system event bus with the help of :doc:`cluster-metrics`.
-Hyperic Sigar
--------------
-
-The built-in metrics is gathered from JMX MBeans, and optionally you can use `Hyperic Sigar `_
-for a wider and more accurate range of metrics compared to what can be retrieved from ordinary MBeans.
-Sigar is using a native OS library. To enable usage of Sigar you need to add the directory of the native library to
-``-Djava.libarary.path=`` add the following dependency::
-
- "org.fusesource" % "sigar" % "@sigarVersion@"
-
-Download the native Sigar libraries from `Maven Central `_
-
-Adaptive Load Balancing
------------------------
-
-The ``AdaptiveLoadBalancingPool`` / ``AdaptiveLoadBalancingGroup`` performs load balancing of messages to cluster nodes based on the cluster metrics data.
-It uses random selection of routees with probabilities derived from the remaining capacity of the corresponding node.
-It can be configured to use a specific MetricsSelector to produce the probabilities, a.k.a. weights:
-
-* ``heap`` / ``HeapMetricsSelector`` - Used and max JVM heap memory. Weights based on remaining heap capacity; (max - used) / max
-* ``load`` / ``SystemLoadAverageMetricsSelector`` - System load average for the past 1 minute, corresponding value can be found in ``top`` of Linux systems. The system is possibly nearing a bottleneck if the system load average is nearing number of cpus/cores. Weights based on remaining load capacity; 1 - (load / processors)
-* ``cpu`` / ``CpuMetricsSelector`` - CPU utilization in percentage, sum of User + Sys + Nice + Wait. Weights based on remaining cpu capacity; 1 - utilization
-* ``mix`` / ``MixMetricsSelector`` - Combines heap, cpu and load. Weights based on mean of remaining capacity of the combined selectors.
-* Any custom implementation of ``akka.cluster.routing.MetricsSelector``
-
-The collected metrics values are smoothed with `exponential weighted moving average `_. In the :ref:`cluster_configuration_scala` you can adjust how quickly past data is decayed compared to new data.
-
-Let's take a look at this router in action. What can be more demanding than calculating factorials?
-
-The backend worker that performs the factorial calculation:
-
-.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialBackend.scala#backend
-
-The frontend that receives user jobs and delegates to the backends via the router:
-
-.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/FactorialFrontend.scala#frontend
-
-
-As you can see, the router is defined in the same way as other routers, and in this case it is configured as follows:
-
-.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf#adaptive-router
-
-It is only router type ``adaptive`` and the ``metrics-selector`` that is specific to this router, other things work
-in the same way as other routers.
-
-The same type of router could also have been defined in code:
-
-.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala#router-lookup-in-code
-
-.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala#router-deploy-in-code
-
-The `Typesafe Activator `_ tutorial named
-`Akka Cluster Samples with Scala `_.
-contains the full source code and instructions of how to run the **Adaptive Load Balancing** sample.
-
-Subscribe to Metrics Events
----------------------------
-
-It is possible to subscribe to the metrics events directly to implement other functionality.
-
-.. includecode:: ../../../akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala#metrics-listener
-
-Custom Metrics Collector
-------------------------
-
-You can plug-in your own metrics collector instead of
-``akka.cluster.SigarMetricsCollector`` or ``akka.cluster.JmxMetricsCollector``. Look at those two implementations
-for inspiration. The implementation class can be defined in the :ref:`cluster_configuration_scala`.
How to Test
^^^^^^^^^^^
diff --git a/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala b/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala
index 954ddf2f9c..87b32f9664 100644
--- a/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala
+++ b/akka-docs/rst/scala/code/docs/persistence/PersistenceDocSpec.scala
@@ -291,4 +291,4 @@ trait PersistenceDocSpec {
//#view-update
}
-}
+}
\ No newline at end of file
diff --git a/akka-docs/rst/scala/index-network.rst b/akka-docs/rst/scala/index-network.rst
index 6f26d9d457..1e313da6f9 100644
--- a/akka-docs/rst/scala/index-network.rst
+++ b/akka-docs/rst/scala/index-network.rst
@@ -6,6 +6,7 @@ Networking
../common/cluster
cluster-usage
+ cluster-metrics
remoting
serialization
io
diff --git a/akka-samples/akka-sample-cluster-java/.gitignore b/akka-samples/akka-sample-cluster-java/.gitignore
index 660c959e44..b0814a06c4 100644
--- a/akka-samples/akka-sample-cluster-java/.gitignore
+++ b/akka-samples/akka-sample-cluster-java/.gitignore
@@ -14,4 +14,5 @@ target/
.cache
.classpath
.project
-.settings
\ No newline at end of file
+.settings
+native/
diff --git a/akka-samples/akka-sample-cluster-java/build.sbt b/akka-samples/akka-sample-cluster-java/build.sbt
index 68ef0492b8..b7533f3d9d 100644
--- a/akka-samples/akka-sample-cluster-java/build.sbt
+++ b/akka-samples/akka-sample-cluster-java/build.sbt
@@ -9,7 +9,7 @@ val project = Project(
settings = Project.defaultSettings ++ SbtMultiJvm.multiJvmSettings ++ Seq(
name := "akka-sample-cluster-java",
version := "2.4-SNAPSHOT",
- scalaVersion := "2.10.4",
+ scalaVersion := "2.11.5",
scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.6", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"),
javacOptions in Compile ++= Seq("-source", "1.6", "-target", "1.6", "-Xlint:unchecked", "-Xlint:deprecation"),
javacOptions in doc in Compile := Seq("-source", "1.6"), // javadoc does not support -target and -Xlint flags
@@ -17,12 +17,12 @@ val project = Project(
"com.typesafe.akka" %% "akka-actor" % akkaVersion,
"com.typesafe.akka" %% "akka-remote" % akkaVersion,
"com.typesafe.akka" %% "akka-cluster" % akkaVersion,
+ "com.typesafe.akka" %% "akka-cluster-metrics" % akkaVersion,
"com.typesafe.akka" %% "akka-contrib" % akkaVersion,
"com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion,
"org.scalatest" %% "scalatest" % "2.2.1" % "test",
- "org.fusesource" % "sigar" % "1.6.4"),
+ "io.kamon" % "sigar-loader" % "1.6.5-rev001"),
javaOptions in run ++= Seq(
- "-Djava.library.path=./sigar",
"-Xms128m", "-Xmx1024m"),
Keys.fork in run := true,
mainClass in (Compile, run) := Some("sample.cluster.simple.SimpleClusterApp"),
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-freebsd-6.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-freebsd-6.so
deleted file mode 100644
index 3e94f0d2bf..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-freebsd-6.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-linux.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-linux.so
deleted file mode 100644
index 5a2e4c24fe..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-linux.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-solaris.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-solaris.so
deleted file mode 100644
index 6396482a43..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-amd64-solaris.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-ia64-hpux-11.sl b/akka-samples/akka-sample-cluster-java/sigar/libsigar-ia64-hpux-11.sl
deleted file mode 100644
index d92ea4a96a..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-ia64-hpux-11.sl and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-ia64-linux.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-ia64-linux.so
deleted file mode 100644
index 2bd2fc8e32..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-ia64-linux.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-pa-hpux-11.sl b/akka-samples/akka-sample-cluster-java/sigar/libsigar-pa-hpux-11.sl
deleted file mode 100644
index 0dfd8a1122..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-pa-hpux-11.sl and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc-aix-5.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc-aix-5.so
deleted file mode 100644
index 7d4b519921..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc-aix-5.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc-linux.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc-linux.so
deleted file mode 100644
index 4394b1b00f..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc-linux.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc64-aix-5.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc64-aix-5.so
deleted file mode 100644
index 35fd828808..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc64-aix-5.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc64-linux.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc64-linux.so
deleted file mode 100644
index a1ba2529c9..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-ppc64-linux.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-s390x-linux.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-s390x-linux.so
deleted file mode 100644
index c275f4ac69..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-s390x-linux.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-sparc-solaris.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-sparc-solaris.so
deleted file mode 100644
index aa847d2b54..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-sparc-solaris.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-sparc64-solaris.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-sparc64-solaris.so
deleted file mode 100644
index 6c4fe809c5..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-sparc64-solaris.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-universal-macosx.dylib b/akka-samples/akka-sample-cluster-java/sigar/libsigar-universal-macosx.dylib
deleted file mode 100644
index 27ab107111..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-universal-macosx.dylib and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-universal64-macosx.dylib b/akka-samples/akka-sample-cluster-java/sigar/libsigar-universal64-macosx.dylib
deleted file mode 100644
index 0c721fecf3..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-universal64-macosx.dylib and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-freebsd-5.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-freebsd-5.so
deleted file mode 100644
index 8c50c6117a..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-freebsd-5.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-freebsd-6.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-freebsd-6.so
deleted file mode 100644
index f0800274a6..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-freebsd-6.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-linux.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-linux.so
deleted file mode 100644
index a0b64eddb0..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-linux.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-solaris.so b/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-solaris.so
deleted file mode 100644
index c6452e5655..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/libsigar-x86-solaris.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/sigar-amd64-winnt.dll b/akka-samples/akka-sample-cluster-java/sigar/sigar-amd64-winnt.dll
deleted file mode 100644
index 1ec8a0353e..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/sigar-amd64-winnt.dll and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/sigar-x86-winnt.dll b/akka-samples/akka-sample-cluster-java/sigar/sigar-x86-winnt.dll
deleted file mode 100644
index 6afdc0166c..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/sigar-x86-winnt.dll and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/sigar/sigar-x86-winnt.lib b/akka-samples/akka-sample-cluster-java/sigar/sigar-x86-winnt.lib
deleted file mode 100644
index 04924a1fc1..0000000000
Binary files a/akka-samples/akka-sample-cluster-java/sigar/sigar-x86-winnt.lib and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java
index 79bd0d9bda..678532d07b 100644
--- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java
+++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/Extra.java
@@ -6,14 +6,14 @@ import java.util.Collections;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.UntypedActor;
-import akka.cluster.routing.AdaptiveLoadBalancingGroup;
-import akka.cluster.routing.AdaptiveLoadBalancingPool;
+import akka.cluster.metrics.AdaptiveLoadBalancingGroup;
+import akka.cluster.metrics.AdaptiveLoadBalancingPool;
import akka.cluster.routing.ClusterRouterGroup;
import akka.cluster.routing.ClusterRouterGroupSettings;
import akka.cluster.routing.ClusterRouterPool;
import akka.cluster.routing.ClusterRouterPoolSettings;
-import akka.cluster.routing.HeapMetricsSelector;
-import akka.cluster.routing.SystemLoadAverageMetricsSelector;
+import akka.cluster.metrics.HeapMetricsSelector;
+import akka.cluster.metrics.SystemLoadAverageMetricsSelector;
//not used, only for documentation
abstract class FactorialFrontend2 extends UntypedActor {
diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialResult.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialResult.java
index 8a80071a2e..7cd49ce988 100644
--- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialResult.java
+++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/FactorialResult.java
@@ -4,11 +4,12 @@ import java.math.BigInteger;
import java.io.Serializable;
public class FactorialResult implements Serializable {
- public final int n;
- public final BigInteger factorial;
+ private static final long serialVersionUID = 1L;
+ public final int n;
+ public final BigInteger factorial;
- FactorialResult(int n, BigInteger factorial) {
- this.n = n;
- this.factorial = factorial;
- }
-}
\ No newline at end of file
+ FactorialResult(int n, BigInteger factorial) {
+ this.n = n;
+ this.factorial = factorial;
+ }
+}
diff --git a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java
index 08ac1eb120..0cc06b167e 100644
--- a/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java
+++ b/akka-samples/akka-sample-cluster-java/src/main/java/sample/cluster/factorial/MetricsListener.java
@@ -3,12 +3,13 @@ package sample.cluster.factorial;
//#metrics-listener
import akka.actor.UntypedActor;
import akka.cluster.Cluster;
-import akka.cluster.ClusterEvent.ClusterMetricsChanged;
import akka.cluster.ClusterEvent.CurrentClusterState;
-import akka.cluster.NodeMetrics;
-import akka.cluster.StandardMetrics;
-import akka.cluster.StandardMetrics.HeapMemory;
-import akka.cluster.StandardMetrics.Cpu;
+import akka.cluster.metrics.ClusterMetricsChanged;
+import akka.cluster.metrics.NodeMetrics;
+import akka.cluster.metrics.StandardMetrics;
+import akka.cluster.metrics.StandardMetrics.HeapMemory;
+import akka.cluster.metrics.StandardMetrics.Cpu;
+import akka.cluster.metrics.ClusterMetricsExtension;
import akka.event.Logging;
import akka.event.LoggingAdapter;
@@ -16,24 +17,27 @@ public class MetricsListener extends UntypedActor {
LoggingAdapter log = Logging.getLogger(getContext().system(), this);
Cluster cluster = Cluster.get(getContext().system());
+
+ ClusterMetricsExtension extension = ClusterMetricsExtension.get(getContext().system());
- //subscribe to ClusterMetricsChanged
+
+ // Subscribe unto ClusterMetricsEvent events.
@Override
public void preStart() {
- cluster.subscribe(getSelf(), ClusterMetricsChanged.class);
+ extension.subscribe(getSelf());
}
- //re-subscribe when restart
+ // Unsubscribe from ClusterMetricsEvent events.
@Override
public void postStop() {
- cluster.unsubscribe(getSelf());
+ extension.unsubscribe(getSelf());
}
@Override
public void onReceive(Object message) {
if (message instanceof ClusterMetricsChanged) {
- ClusterMetricsChanged clusterMetrics = (ClusterMetricsChanged) message;
+ ClusterMetricsChanged clusterMetrics = (ClusterMetricsChanged) message;
for (NodeMetrics nodeMetrics : clusterMetrics.getNodeMetrics()) {
if (nodeMetrics.address().equals(cluster.selfAddress())) {
logHeap(nodeMetrics);
@@ -42,8 +46,7 @@ public class MetricsListener extends UntypedActor {
}
} else if (message instanceof CurrentClusterState) {
- // ignore
-
+ // Ignore.
} else {
unhandled(message);
}
diff --git a/akka-samples/akka-sample-cluster-java/src/main/resources/application.conf b/akka-samples/akka-sample-cluster-java/src/main/resources/application.conf
index 41281e5485..6968532f37 100644
--- a/akka-samples/akka-sample-cluster-java/src/main/resources/application.conf
+++ b/akka-samples/akka-sample-cluster-java/src/main/resources/application.conf
@@ -18,3 +18,13 @@ akka {
auto-down-unreachable-after = 10s
}
}
+
+# Disable legacy metrics in akka-cluster.
+akka.cluster.metrics.enabled=off
+
+# Enable metrics extension in akka-cluster-metrics.
+akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"]
+
+# Sigar native library extract location during tests.
+# Note: use per-jvm-instance folder when running multiple jvm on one host.
+akka.cluster.metrics.native-library-extract-folder=${user.dir}/target/native
diff --git a/akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf b/akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf
index def18c1e51..6dcf1ad1a7 100644
--- a/akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf
+++ b/akka-samples/akka-sample-cluster-java/src/main/resources/factorial.conf
@@ -14,11 +14,14 @@ akka.cluster.role {
# //#adaptive-router
akka.actor.deployment {
/factorialFrontend/factorialBackendRouter = {
- router = adaptive-group
+ # Router type provided by metrics extension.
+ router = cluster-metrics-adaptive-group
+ # Router parameter specific for metrics extension.
# metrics-selector = heap
# metrics-selector = load
# metrics-selector = cpu
metrics-selector = mix
+ #
nr-of-instances = 100
routees.paths = ["/user/factorialBackend"]
cluster {
diff --git a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala
index 36f5a99e0f..8a86a8f720 100644
--- a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala
+++ b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala
@@ -27,6 +27,22 @@ object StatsSampleSingleMasterSpecConfig extends MultiNodeConfig {
val second = role("second")
val third = role("thrid")
+ def nodeList = Seq(first, second, third)
+
+ // Extract individual sigar library for every node.
+ nodeList foreach { role ⇒
+ nodeConfig(role) {
+ ConfigFactory.parseString(s"""
+ # Disable legacy metrics in akka-cluster.
+ akka.cluster.metrics.enabled=off
+ # Enable metrics extension in akka-cluster-metrics.
+ akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"]
+ # Sigar native library extract location during tests.
+ akka.cluster.metrics.native-library-extract-folder=target/native/${role.name}
+ """)
+ }
+ }
+
// this configuration will be used for all nodes
// note that no fixed host names and ports are used
commonConfig(ConfigFactory.parseString("""
@@ -34,8 +50,6 @@ object StatsSampleSingleMasterSpecConfig extends MultiNodeConfig {
akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
akka.remote.log-remote-lifecycle-events = off
akka.cluster.roles = [compute]
- # don't use sigar for tests, native lib not in path
- akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector
#//#router-deploy-config
akka.actor.deployment {
/singleton/statsService/workerRouter {
diff --git a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala
index 3209bba838..2e6ce4f6c0 100644
--- a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala
+++ b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala
@@ -25,14 +25,28 @@ object StatsSampleSpecConfig extends MultiNodeConfig {
val second = role("second")
val third = role("thrid")
+ def nodeList = Seq(first, second, third)
+
+ // Extract individual sigar library for every node.
+ nodeList foreach { role ⇒
+ nodeConfig(role) {
+ ConfigFactory.parseString(s"""
+ # Disable legacy metrics in akka-cluster.
+ akka.cluster.metrics.enabled=off
+ # Enable metrics extension in akka-cluster-metrics.
+ akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"]
+ # Sigar native library extract location during tests.
+ akka.cluster.metrics.native-library-extract-folder=target/native/${role.name}
+ """)
+ }
+ }
+
// this configuration will be used for all nodes
// note that no fixed host names and ports are used
commonConfig(ConfigFactory.parseString("""
akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
akka.remote.log-remote-lifecycle-events = off
akka.cluster.roles = [compute]
- # don't use sigar for tests, native lib not in path
- akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector
#//#router-lookup-config
akka.actor.deployment {
/statsService/workerRouter {
diff --git a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala
index fc28771986..48456980fb 100644
--- a/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala
+++ b/akka-samples/akka-sample-cluster-java/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala
@@ -24,13 +24,27 @@ object TransformationSampleSpecConfig extends MultiNodeConfig {
val backend2 = role("backend2")
val backend3 = role("backend3")
+ def nodeList = Seq(frontend1, frontend2, backend1, backend2, backend3)
+
+ // Extract individual sigar library for every node.
+ nodeList foreach { role ⇒
+ nodeConfig(role) {
+ ConfigFactory.parseString(s"""
+ # Disable legacy metrics in akka-cluster.
+ akka.cluster.metrics.enabled=off
+ # Enable metrics extension in akka-cluster-metrics.
+ akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"]
+ # Sigar native library extract location during tests.
+ akka.cluster.metrics.native-library-extract-folder=target/native/${role.name}
+ """)
+ }
+ }
+
// this configuration will be used for all nodes
// note that no fixed host names and ports are used
commonConfig(ConfigFactory.parseString("""
akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
akka.remote.log-remote-lifecycle-events = off
- # don't use sigar for tests, native lib not in path
- akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector
"""))
nodeConfig(frontend1, frontend2)(
diff --git a/akka-samples/akka-sample-cluster-scala/build.sbt b/akka-samples/akka-sample-cluster-scala/build.sbt
index 7911dffb07..f388649b45 100644
--- a/akka-samples/akka-sample-cluster-scala/build.sbt
+++ b/akka-samples/akka-sample-cluster-scala/build.sbt
@@ -9,19 +9,19 @@ val project = Project(
settings = Project.defaultSettings ++ SbtMultiJvm.multiJvmSettings ++ Seq(
name := "akka-sample-cluster-scala",
version := "2.4-SNAPSHOT",
- scalaVersion := "2.10.4",
+ scalaVersion := "2.11.5",
scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.6", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"),
javacOptions in Compile ++= Seq("-source", "1.6", "-target", "1.6", "-Xlint:unchecked", "-Xlint:deprecation"),
libraryDependencies ++= Seq(
"com.typesafe.akka" %% "akka-actor" % akkaVersion,
"com.typesafe.akka" %% "akka-remote" % akkaVersion,
"com.typesafe.akka" %% "akka-cluster" % akkaVersion,
+ "com.typesafe.akka" %% "akka-cluster-metrics" % akkaVersion,
"com.typesafe.akka" %% "akka-contrib" % akkaVersion,
"com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion,
"org.scalatest" %% "scalatest" % "2.2.1" % "test",
- "org.fusesource" % "sigar" % "1.6.4"),
+ "io.kamon" % "sigar-loader" % "1.6.5-rev001"),
javaOptions in run ++= Seq(
- "-Djava.library.path=./sigar",
"-Xms128m", "-Xmx1024m"),
Keys.fork in run := true,
mainClass in (Compile, run) := Some("sample.cluster.simple.SimpleClusterApp"),
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-freebsd-6.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-freebsd-6.so
deleted file mode 100644
index 3e94f0d2bf..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-freebsd-6.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-linux.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-linux.so
deleted file mode 100644
index 5a2e4c24fe..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-linux.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-solaris.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-solaris.so
deleted file mode 100644
index 6396482a43..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-amd64-solaris.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ia64-hpux-11.sl b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ia64-hpux-11.sl
deleted file mode 100644
index d92ea4a96a..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ia64-hpux-11.sl and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ia64-linux.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ia64-linux.so
deleted file mode 100644
index 2bd2fc8e32..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ia64-linux.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-pa-hpux-11.sl b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-pa-hpux-11.sl
deleted file mode 100644
index 0dfd8a1122..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-pa-hpux-11.sl and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc-aix-5.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc-aix-5.so
deleted file mode 100644
index 7d4b519921..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc-aix-5.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc-linux.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc-linux.so
deleted file mode 100644
index 4394b1b00f..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc-linux.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc64-aix-5.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc64-aix-5.so
deleted file mode 100644
index 35fd828808..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc64-aix-5.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc64-linux.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc64-linux.so
deleted file mode 100644
index a1ba2529c9..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-ppc64-linux.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-s390x-linux.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-s390x-linux.so
deleted file mode 100644
index c275f4ac69..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-s390x-linux.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-sparc-solaris.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-sparc-solaris.so
deleted file mode 100644
index aa847d2b54..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-sparc-solaris.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-sparc64-solaris.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-sparc64-solaris.so
deleted file mode 100644
index 6c4fe809c5..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-sparc64-solaris.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-universal-macosx.dylib b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-universal-macosx.dylib
deleted file mode 100644
index 27ab107111..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-universal-macosx.dylib and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-universal64-macosx.dylib b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-universal64-macosx.dylib
deleted file mode 100644
index 0c721fecf3..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-universal64-macosx.dylib and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-freebsd-5.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-freebsd-5.so
deleted file mode 100644
index 8c50c6117a..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-freebsd-5.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-freebsd-6.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-freebsd-6.so
deleted file mode 100644
index f0800274a6..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-freebsd-6.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-linux.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-linux.so
deleted file mode 100644
index a0b64eddb0..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-linux.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-solaris.so b/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-solaris.so
deleted file mode 100644
index c6452e5655..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/libsigar-x86-solaris.so and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/sigar-amd64-winnt.dll b/akka-samples/akka-sample-cluster-scala/sigar/sigar-amd64-winnt.dll
deleted file mode 100644
index 1ec8a0353e..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/sigar-amd64-winnt.dll and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/sigar-x86-winnt.dll b/akka-samples/akka-sample-cluster-scala/sigar/sigar-x86-winnt.dll
deleted file mode 100644
index 6afdc0166c..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/sigar-x86-winnt.dll and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/sigar/sigar-x86-winnt.lib b/akka-samples/akka-sample-cluster-scala/sigar/sigar-x86-winnt.lib
deleted file mode 100644
index 04924a1fc1..0000000000
Binary files a/akka-samples/akka-sample-cluster-scala/sigar/sigar-x86-winnt.lib and /dev/null differ
diff --git a/akka-samples/akka-sample-cluster-scala/src/main/resources/application.conf b/akka-samples/akka-sample-cluster-scala/src/main/resources/application.conf
index 41281e5485..6968532f37 100644
--- a/akka-samples/akka-sample-cluster-scala/src/main/resources/application.conf
+++ b/akka-samples/akka-sample-cluster-scala/src/main/resources/application.conf
@@ -18,3 +18,13 @@ akka {
auto-down-unreachable-after = 10s
}
}
+
+# Disable legacy metrics in akka-cluster.
+akka.cluster.metrics.enabled=off
+
+# Enable metrics extension in akka-cluster-metrics.
+akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"]
+
+# Sigar native library extract location during tests.
+# Note: use per-jvm-instance folder when running multiple jvm on one host.
+akka.cluster.metrics.native-library-extract-folder=${user.dir}/target/native
diff --git a/akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf b/akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf
index def18c1e51..6dcf1ad1a7 100644
--- a/akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf
+++ b/akka-samples/akka-sample-cluster-scala/src/main/resources/factorial.conf
@@ -14,11 +14,14 @@ akka.cluster.role {
# //#adaptive-router
akka.actor.deployment {
/factorialFrontend/factorialBackendRouter = {
- router = adaptive-group
+ # Router type provided by metrics extension.
+ router = cluster-metrics-adaptive-group
+ # Router parameter specific for metrics extension.
# metrics-selector = heap
# metrics-selector = load
# metrics-selector = cpu
metrics-selector = mix
+ #
nr-of-instances = 100
routees.paths = ["/user/factorialBackend"]
cluster {
diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala
index 24149b0b85..e26b9258d1 100644
--- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala
+++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/Extra.scala
@@ -8,8 +8,8 @@ abstract class FactorialFrontend2 extends Actor {
//#router-lookup-in-code
import akka.cluster.routing.ClusterRouterGroup
import akka.cluster.routing.ClusterRouterGroupSettings
- import akka.cluster.routing.AdaptiveLoadBalancingGroup
- import akka.cluster.routing.HeapMetricsSelector
+ import akka.cluster.metrics.AdaptiveLoadBalancingGroup
+ import akka.cluster.metrics.HeapMetricsSelector
val backend = context.actorOf(
ClusterRouterGroup(AdaptiveLoadBalancingGroup(HeapMetricsSelector),
@@ -25,8 +25,8 @@ abstract class FactorialFrontend3 extends Actor {
//#router-deploy-in-code
import akka.cluster.routing.ClusterRouterPool
import akka.cluster.routing.ClusterRouterPoolSettings
- import akka.cluster.routing.AdaptiveLoadBalancingPool
- import akka.cluster.routing.SystemLoadAverageMetricsSelector
+ import akka.cluster.metrics.AdaptiveLoadBalancingPool
+ import akka.cluster.metrics.SystemLoadAverageMetricsSelector
val backend = context.actorOf(
ClusterRouterPool(AdaptiveLoadBalancingPool(
diff --git a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala
index 3d6a2a890e..2183bdb083 100644
--- a/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala
+++ b/akka-samples/akka-sample-cluster-scala/src/main/scala/sample/cluster/factorial/MetricsListener.scala
@@ -1,25 +1,26 @@
package sample.cluster.factorial
+//#metrics-listener
import akka.actor.ActorLogging
import akka.actor.Actor
-
-//#metrics-listener
import akka.cluster.Cluster
-import akka.cluster.ClusterEvent.ClusterMetricsChanged
+import akka.cluster.metrics.ClusterMetricsEvent
+import akka.cluster.metrics.ClusterMetricsChanged
import akka.cluster.ClusterEvent.CurrentClusterState
-import akka.cluster.NodeMetrics
-import akka.cluster.StandardMetrics.HeapMemory
-import akka.cluster.StandardMetrics.Cpu
+import akka.cluster.metrics.NodeMetrics
+import akka.cluster.metrics.StandardMetrics.HeapMemory
+import akka.cluster.metrics.StandardMetrics.Cpu
+import akka.cluster.metrics.ClusterMetricsExtension
class MetricsListener extends Actor with ActorLogging {
val selfAddress = Cluster(context.system).selfAddress
+ val extension = ClusterMetricsExtension(context.system)
- // subscribe to ClusterMetricsChanged
- // re-subscribe when restart
- override def preStart(): Unit =
- Cluster(context.system).subscribe(self, classOf[ClusterMetricsChanged])
- override def postStop(): Unit =
- Cluster(context.system).unsubscribe(self)
+ // Subscribe unto ClusterMetricsEvent events.
+ override def preStart(): Unit = extension.subscribe(self)
+
+ // Unsubscribe from ClusterMetricsEvent events.
+ override def postStop(): Unit = extension.unsubscribe(self)
def receive = {
case ClusterMetricsChanged(clusterMetrics) =>
@@ -27,21 +28,19 @@ class MetricsListener extends Actor with ActorLogging {
logHeap(nodeMetrics)
logCpu(nodeMetrics)
}
- case state: CurrentClusterState => // ignore
+ case state: CurrentClusterState => // Ignore.
}
def logHeap(nodeMetrics: NodeMetrics): Unit = nodeMetrics match {
case HeapMemory(address, timestamp, used, committed, max) =>
log.info("Used heap: {} MB", used.doubleValue / 1024 / 1024)
- case _ => // no heap info
+ case _ => // No heap info.
}
def logCpu(nodeMetrics: NodeMetrics): Unit = nodeMetrics match {
- case Cpu(address, timestamp, Some(systemLoadAverage), cpuCombined, processors) =>
+ case Cpu(address, timestamp, Some(systemLoadAverage), cpuCombined, cpuStolen, processors) =>
log.info("Load: {} ({} processors)", systemLoadAverage, processors)
- case _ => // no cpu info
+ case _ => // No cpu info.
}
}
-
//#metrics-listener
-
diff --git a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala
index 01f517cc23..b0ebcf9fe8 100644
--- a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala
+++ b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala
@@ -26,6 +26,22 @@ object StatsSampleSingleMasterSpecConfig extends MultiNodeConfig {
val second = role("second")
val third = role("third")
+ def nodeList = Seq(first, second, third)
+
+ // Extract individual sigar library for every node.
+ nodeList foreach { role ⇒
+ nodeConfig(role) {
+ ConfigFactory.parseString(s"""
+ # Disable legacy metrics in akka-cluster.
+ akka.cluster.metrics.enabled=off
+ # Enable metrics extension in akka-cluster-metrics.
+ akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"]
+ # Sigar native library extract location during tests.
+ akka.cluster.metrics.native-library-extract-folder=target/native/${role.name}
+ """)
+ }
+ }
+
// this configuration will be used for all nodes
// note that no fixed host names and ports are used
commonConfig(ConfigFactory.parseString("""
@@ -33,8 +49,6 @@ object StatsSampleSingleMasterSpecConfig extends MultiNodeConfig {
akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
akka.remote.log-remote-lifecycle-events = off
akka.cluster.roles = [compute]
- # don't use sigar for tests, native lib not in path
- akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector
#//#router-deploy-config
akka.actor.deployment {
/singleton/statsService/workerRouter {
diff --git a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala
index 2ec1bdf71b..7cb8cefcb7 100644
--- a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala
+++ b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala
@@ -21,14 +21,28 @@ object StatsSampleSpecConfig extends MultiNodeConfig {
val second = role("second")
val third = role("thrid")
+ def nodeList = Seq(first, second, third)
+
+ // Extract individual sigar library for every node.
+ nodeList foreach { role ⇒
+ nodeConfig(role) {
+ ConfigFactory.parseString(s"""
+ # Disable legacy metrics in akka-cluster.
+ akka.cluster.metrics.enabled=off
+ # Enable metrics extension in akka-cluster-metrics.
+ akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"]
+ # Sigar native library extract location during tests.
+ akka.cluster.metrics.native-library-extract-folder=target/native/${role.name}
+ """)
+ }
+ }
+
// this configuration will be used for all nodes
// note that no fixed host names and ports are used
commonConfig(ConfigFactory.parseString("""
akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
akka.remote.log-remote-lifecycle-events = off
akka.cluster.roles = [compute]
- # don't use sigar for tests, native lib not in path
- akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector
#//#router-lookup-config
akka.actor.deployment {
/statsService/workerRouter {
diff --git a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala
index 802bac7756..0a8d7eda04 100644
--- a/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala
+++ b/akka-samples/akka-sample-cluster-scala/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala
@@ -23,13 +23,27 @@ object TransformationSampleSpecConfig extends MultiNodeConfig {
val backend2 = role("backend2")
val backend3 = role("backend3")
+ def nodeList = Seq(frontend1, frontend2, backend1, backend2, backend3)
+
+ // Extract individual sigar library for every node.
+ nodeList foreach { role ⇒
+ nodeConfig(role) {
+ ConfigFactory.parseString(s"""
+ # Disable legacy metrics in akka-cluster.
+ akka.cluster.metrics.enabled=off
+ # Enable metrics extension in akka-cluster-metrics.
+ akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"]
+ # Sigar native library extract location during tests.
+ akka.cluster.metrics.native-library-extract-folder=target/native/${role.name}
+ """)
+ }
+ }
+
// this configuration will be used for all nodes
// note that no fixed host names and ports are used
commonConfig(ConfigFactory.parseString("""
akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
akka.remote.log-remote-lifecycle-events = off
- # don't use sigar for tests, native lib not in path
- akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector
"""))
nodeConfig(frontend1, frontend2)(
diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala
index 02ec37cbf5..65ddec0d00 100644
--- a/project/AkkaBuild.scala
+++ b/project/AkkaBuild.scala
@@ -62,7 +62,7 @@ object AkkaBuild extends Build {
validatePullRequest <<= (unidoc in Compile, SphinxSupport.generate in Sphinx in docs) map { (_, _) => }
),
- aggregate = Seq(actor, testkit, actorTests, remote, remoteTests, camel, cluster, slf4j, agent,
+ aggregate = Seq(actor, testkit, actorTests, remote, remoteTests, camel, cluster, clusterMetrics, slf4j, agent,
persistence, persistenceTck, kernel, osgi, docs, contrib, samples, multiNodeTestkit)
)
@@ -122,6 +122,12 @@ object AkkaBuild extends Build {
dependencies = Seq(remote, remoteTests % "test->test" , testkit % "test->test")
) configs (MultiJvm)
+ lazy val clusterMetrics = Project(
+ id = "akka-cluster-metrics",
+ base = file("akka-cluster-metrics"),
+ dependencies = Seq(cluster % "compile->compile;test->test;multi-jvm->multi-jvm", slf4j % "test->compile")
+ ) configs (MultiJvm)
+
lazy val slf4j = Project(
id = "akka-slf4j",
base = file("akka-slf4j"),
@@ -168,9 +174,9 @@ object AkkaBuild extends Build {
id = "akka-docs",
base = file("akka-docs"),
dependencies = Seq(actor, testkit % "test->test",
- remote % "compile;test->test", cluster, slf4j, agent, camel, osgi,
+ remote % "compile;test->test", cluster, clusterMetrics, slf4j, agent, camel, osgi,
persistence % "compile;test->test", persistenceTck)
- )
+ )
lazy val contrib = Project(
id = "akka-contrib",
diff --git a/project/Dependencies.scala b/project/Dependencies.scala
index 1eb46afdb5..806c94592b 100644
--- a/project/Dependencies.scala
+++ b/project/Dependencies.scala
@@ -27,7 +27,7 @@ object Dependencies {
val protobuf = "com.google.protobuf" % "protobuf-java" % "2.5.0" // New BSD
val scalaStm = "org.scala-stm" %% "scala-stm" % scalaStmVersion // Modified BSD (Scala)
- val slf4jApi = "org.slf4j" % "slf4j-api" % "1.7.5" // MIT
+ val slf4jApi = "org.slf4j" % "slf4j-api" % "1.7.7" // MIT
// mirrored in OSGi sample
val uncommonsMath = "org.uncommons.maths" % "uncommons-maths" % "1.2.2a" exclude("jfree", "jcommon") exclude("jfree", "jfreechart") // ApacheV2
val osgiCore = "org.osgi" % "org.osgi.core" % "4.3.1" // ApacheV2
@@ -58,7 +58,17 @@ object Dependencies {
val latencyUtils = "org.latencyutils" % "LatencyUtils" % "1.0.3" % "test" // Free BSD
val hdrHistogram = "org.hdrhistogram" % "HdrHistogram" % "1.1.4" % "test" // CC0
val metricsAll = Seq(metrics, metricsJvm, latencyUtils, hdrHistogram)
+
+ // sigar logging
+ val slf4jJul = "org.slf4j" % "jul-to-slf4j" % "1.7.7" % "test" // MIT
+ val slf4jLog4j = "org.slf4j" % "log4j-over-slf4j" % "1.7.7" % "test" // MIT
}
+
+ object Provided {
+ // TODO remove from "test" config
+ val sigarLoader = "io.kamon" % "sigar-loader" % "1.6.5-rev001" % "optional;provided;test" // ApacheV2
+ }
+
}
import Compile._
@@ -75,6 +85,8 @@ object Dependencies {
val cluster = Seq(Test.junit, Test.scalatest)
+ val clusterMetrics = Seq(Provided.sigarLoader, Test.slf4jJul, Test.slf4jLog4j, Test.logback, Test.mockito)
+
val slf4j = Seq(slf4jApi, Test.logback)
val agent = Seq(scalaStm, Test.scalatest, Test.junit)
diff --git a/project/OSGi.scala b/project/OSGi.scala
index 3583b74ab4..d95d8d618e 100644
--- a/project/OSGi.scala
+++ b/project/OSGi.scala
@@ -28,6 +28,8 @@ object OSGi {
val cluster = exports(Seq("akka.cluster.*"), imports = Seq(protobufImport()))
+ val clusterMetrics = exports(Seq("akka.cluster.metrics.*"), imports = Seq(protobufImport(),kamonImport(),sigarImport()))
+
val osgi = exports(Seq("akka.osgi.*"))
val remote = exports(Seq("akka.remote.*"), imports = Seq(protobufImport()))
@@ -59,6 +61,8 @@ object OSGi {
val ScalaVersion(epoch, major) = version
versionedImport(packageName, s"$epoch.$major", s"$epoch.${major+1}")
}
+ def kamonImport(packageName: String = "kamon.sigar.*") = optionalResolution(versionedImport(packageName, "1.6.5", "1.6.6"))
+ def sigarImport(packageName: String = "org.hyperic.*") = optionalResolution(versionedImport(packageName, "1.6.5", "1.6.6"))
def optionalResolution(packageName: String) = "%s;resolution:=optional".format(packageName)
def versionedImport(packageName: String, lower: String, upper: String) = s"""$packageName;version="[$lower,$upper)""""
}
diff --git a/project/SigarLoader.scala b/project/SigarLoader.scala
new file mode 100644
index 0000000000..c1cdc523e8
--- /dev/null
+++ b/project/SigarLoader.scala
@@ -0,0 +1,60 @@
+/**
+ * Copyright (C) 2009-2014 Typesafe Inc.
+ */
+
+package akka
+
+import sbt._
+import sbt.Keys._
+
+/**
+ * Sigar java agent injection build settings.
+ */
+object SigarLoader {
+
+ import Dependencies.Compile.Provided.sigarLoader
+
+ /** Enable Sigar java agent injection during tests. */
+ lazy val sigarTestEnabled = sys.props.get("akka.test.sigar").getOrElse("false").toBoolean
+
+ lazy val sigarArtifact = TaskKey[File]("sigar-artifact", "Location of Sigar java agent jar.")
+
+ lazy val sigarOptions = TaskKey[String]("sigar-options", "JVM command line options for Sigar java agent.")
+
+ lazy val sigarFolder = SettingKey[File]("sigar-folder", "Location of native library extracted by Sigar java agent.")
+
+ /** Sigar agent command line option property. */
+ val sigarFolderProperty = "kamon.sigar.folder"
+
+ def provideSigarOptions = (sigarArtifact, sigarFolder) map { (artifact, folder) =>
+ "-javaagent:" + artifact + "=" + sigarFolderProperty + "=" + folder
+ }
+
+ def locateSigarArtifact = update map { report =>
+ val artifactList = report.matching(
+ moduleFilter(organization = sigarLoader.organization, name = sigarLoader.name)
+ )
+ require(artifactList.size == 1, "Expecting single artifact, while found: " + artifactList)
+ artifactList(0)
+ }
+
+ // TODO remove Sigar form test:test* classpath, it is provided by Sigar agent.
+ lazy val sigarSettings = {
+ Seq(
+ //
+ // Prepare Sigar agent options.
+ sigarArtifact <<= locateSigarArtifact,
+ sigarFolder := target.value / "native",
+ sigarOptions <<= provideSigarOptions,
+ //
+ fork in Test := true
+ ) ++ (
+ // Invoke Sigar agent at JVM init time, to extract and load native Sigar library.
+ if (sigarTestEnabled) Seq(
+ javaOptions in Test += sigarOptions.value
+ )
+ else Seq()
+ )
+ }
+
+}
diff --git a/project/SphinxDoc.scala b/project/SphinxDoc.scala
index deea7d2479..3c1011c62d 100644
--- a/project/SphinxDoc.scala
+++ b/project/SphinxDoc.scala
@@ -64,6 +64,7 @@ object SphinxDoc {
case _ => s
}),
"sigarVersion" -> Dependencies.Compile.sigar.revision,
+ "sigarLoaderVersion" -> Dependencies.Compile.Provided.sigarLoader.revision,
"github" -> GitHub.url(v)
)
},
diff --git a/scripts/show-serializer.sh b/scripts/show-serializer.sh
new file mode 100755
index 0000000000..003c350fa5
--- /dev/null
+++ b/scripts/show-serializer.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+# Locate akka.serialization.Serializer.identifier()
+find . -name *.scala | xargs grep "def identifier =" * | sort