warningMessageConsumer;
diff --git a/proxy/src/main/java/com/wavefront/agent/channel/NoopHealthCheckManager.java b/proxy/src/main/java/com/wavefront/agent/channel/NoopHealthCheckManager.java
index a9c7a75eb..1f1ff4cd9 100644
--- a/proxy/src/main/java/com/wavefront/agent/channel/NoopHealthCheckManager.java
+++ b/proxy/src/main/java/com/wavefront/agent/channel/NoopHealthCheckManager.java
@@ -5,11 +5,7 @@
import io.netty.handler.codec.http.HttpResponse;
import javax.annotation.Nonnull;
-/**
- * A no-op health check manager.
- *
- * @author vasily@wavefront.com.
- */
+/** A no-op health check manager. */
public class NoopHealthCheckManager implements HealthCheckManager {
@Override
public HttpResponse getHealthCheckResponse(
diff --git a/proxy/src/main/java/com/wavefront/agent/channel/PlainTextOrHttpFrameDecoder.java b/proxy/src/main/java/com/wavefront/agent/channel/PlainTextOrHttpFrameDecoder.java
index 15e7b396d..04a40bfbc 100644
--- a/proxy/src/main/java/com/wavefront/agent/channel/PlainTextOrHttpFrameDecoder.java
+++ b/proxy/src/main/java/com/wavefront/agent/channel/PlainTextOrHttpFrameDecoder.java
@@ -16,8 +16,9 @@
import io.netty.handler.codec.string.StringDecoder;
import io.netty.handler.codec.string.StringEncoder;
import java.util.List;
-import java.util.logging.Logger;
import javax.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class handles 2 different protocols on a single port. Supported protocols include HTTP and a
@@ -27,13 +28,13 @@
* @see Netty
* Port Unification Example
- * @author Mike McLaughlin (mike@wavefront.com)
*/
public final class PlainTextOrHttpFrameDecoder extends ByteToMessageDecoder {
protected static final Logger logger =
- Logger.getLogger(PlainTextOrHttpFrameDecoder.class.getName());
-
+ LoggerFactory.getLogger(PlainTextOrHttpFrameDecoder.class.getName());
+ private static final StringDecoder STRING_DECODER = new StringDecoder(Charsets.UTF_8);
+ private static final StringEncoder STRING_ENCODER = new StringEncoder(Charsets.UTF_8);
/** The object for handling requests of either protocol */
private final ChannelHandler handler;
@@ -42,9 +43,6 @@ public final class PlainTextOrHttpFrameDecoder extends ByteToMessageDecoder {
private final int maxLengthPlaintext;
private final int maxLengthHttp;
- private static final StringDecoder STRING_DECODER = new StringDecoder(Charsets.UTF_8);
- private static final StringEncoder STRING_ENCODER = new StringEncoder(Charsets.UTF_8);
-
/**
* @param handler the object responsible for handling the incoming messages on either protocol.
* @param corsConfig enables CORS when {@link CorsConfig} is specified
@@ -72,6 +70,46 @@ private PlainTextOrHttpFrameDecoder(
this.detectGzip = detectGzip;
}
+ /**
+ * @param magic1 the first byte of the incoming message
+ * @param magic2 the second byte of the incoming message
+ * @return true if this is an HTTP message; false o/w
+ * @see Netty
+ * Port Unification Example
+ */
+ private static boolean isHttp(int magic1, int magic2) {
+ return ((magic1 == 'G' && magic2 == 'E')
+ || // GET
+ (magic1 == 'P' && magic2 == 'O')
+ || // POST
+ (magic1 == 'P' && magic2 == 'U')
+ || // PUT
+ (magic1 == 'H' && magic2 == 'E')
+ || // HEAD
+ (magic1 == 'O' && magic2 == 'P')
+ || // OPTIONS
+ (magic1 == 'P' && magic2 == 'A')
+ || // PATCH
+ (magic1 == 'D' && magic2 == 'E')
+ || // DELETE
+ (magic1 == 'T' && magic2 == 'R')
+ || // TRACE
+ (magic1 == 'C' && magic2 == 'O')); // CONNECT
+ }
+
+ /**
+ * @param magic1 the first byte of the incoming message
+ * @param magic2 the second byte of the incoming message
+ * @return true if this is a GZIP stream; false o/w
+ * @see Netty
+ * Port Unification Example
+ */
+ private static boolean isGzip(int magic1, int magic2) {
+ return magic1 == 31 && magic2 == 139;
+ }
+
/**
* Dynamically adds the appropriate encoder/decoder(s) to the pipeline based on the detected
* protocol.
@@ -93,7 +131,7 @@ protected void decode(final ChannelHandlerContext ctx, final ByteBuf buffer, Lis
final ChannelPipeline pipeline = ctx.pipeline();
if (detectGzip && isGzip(firstByte, secondByte)) {
- logger.fine("Inbound gzip stream detected");
+ logger.info("Inbound gzip stream detected");
pipeline
.addLast("gzipdeflater", ZlibCodecFactory.newZlibEncoder(ZlibWrapper.GZIP))
.addLast("gzipinflater", ZlibCodecFactory.newZlibDecoder(ZlibWrapper.GZIP))
@@ -102,7 +140,7 @@ protected void decode(final ChannelHandlerContext ctx, final ByteBuf buffer, Lis
new PlainTextOrHttpFrameDecoder(
handler, corsConfig, maxLengthPlaintext, maxLengthHttp, false));
} else if (isHttp(firstByte, secondByte)) {
- logger.fine("Switching to HTTP protocol");
+ logger.info("Switching to HTTP protocol");
pipeline
.addLast("decoder", new HttpRequestDecoder())
.addLast("inflater", new HttpContentDecompressor())
@@ -113,55 +151,15 @@ protected void decode(final ChannelHandlerContext ctx, final ByteBuf buffer, Lis
}
pipeline.addLast("handler", this.handler);
} else {
- logger.fine("Switching to plaintext TCP protocol");
+ logger.info("Switching to plaintext TCP protocol");
pipeline
.addLast(
"line",
- new IncompleteLineDetectingLineBasedFrameDecoder(logger::warning, maxLengthPlaintext))
+ new IncompleteLineDetectingLineBasedFrameDecoder(logger::warn, maxLengthPlaintext))
.addLast("decoder", STRING_DECODER)
.addLast("encoder", STRING_ENCODER)
.addLast("handler", this.handler);
}
pipeline.remove(this);
}
-
- /**
- * @param magic1 the first byte of the incoming message
- * @param magic2 the second byte of the incoming message
- * @return true if this is an HTTP message; false o/w
- * @see Netty
- * Port Unification Example
- */
- private static boolean isHttp(int magic1, int magic2) {
- return ((magic1 == 'G' && magic2 == 'E')
- || // GET
- (magic1 == 'P' && magic2 == 'O')
- || // POST
- (magic1 == 'P' && magic2 == 'U')
- || // PUT
- (magic1 == 'H' && magic2 == 'E')
- || // HEAD
- (magic1 == 'O' && magic2 == 'P')
- || // OPTIONS
- (magic1 == 'P' && magic2 == 'A')
- || // PATCH
- (magic1 == 'D' && magic2 == 'E')
- || // DELETE
- (magic1 == 'T' && magic2 == 'R')
- || // TRACE
- (magic1 == 'C' && magic2 == 'O')); // CONNECT
- }
-
- /**
- * @param magic1 the first byte of the incoming message
- * @param magic2 the second byte of the incoming message
- * @return true if this is a GZIP stream; false o/w
- * @see Netty
- * Port Unification Example
- */
- private static boolean isGzip(int magic1, int magic2) {
- return magic1 == 31 && magic2 == 139;
- }
}
diff --git a/proxy/src/main/java/com/wavefront/agent/channel/SharedGraphiteHostAnnotator.java b/proxy/src/main/java/com/wavefront/agent/channel/SharedGraphiteHostAnnotator.java
index 7ace9dceb..660d62dad 100644
--- a/proxy/src/main/java/com/wavefront/agent/channel/SharedGraphiteHostAnnotator.java
+++ b/proxy/src/main/java/com/wavefront/agent/channel/SharedGraphiteHostAnnotator.java
@@ -18,8 +18,6 @@
*
* Differences from GraphiteHostAnnotator: - sharable - lazy load - does not proactively perform
* rDNS lookups unless needed - can be applied to HTTP payloads
- *
- * @author vasily@wavefront.com
*/
@ChannelHandler.Sharable
public class SharedGraphiteHostAnnotator {
diff --git a/proxy/src/main/java/com/wavefront/agent/channel/StatusTrackingHttpObjectAggregator.java b/proxy/src/main/java/com/wavefront/agent/channel/StatusTrackingHttpObjectAggregator.java
index 00e2d11a8..e57228811 100644
--- a/proxy/src/main/java/com/wavefront/agent/channel/StatusTrackingHttpObjectAggregator.java
+++ b/proxy/src/main/java/com/wavefront/agent/channel/StatusTrackingHttpObjectAggregator.java
@@ -8,8 +8,6 @@
/**
* A {@link HttpObjectAggregator} that correctly tracks HTTP 413 returned for incoming payloads that
* are too large.
- *
- * @author vasily@wavefront.com
*/
public class StatusTrackingHttpObjectAggregator extends HttpObjectAggregator {
diff --git a/proxy/src/main/java/com/wavefront/agent/config/Configuration.java b/proxy/src/main/java/com/wavefront/agent/config/Configuration.java
index f0ad85088..4d7251a96 100644
--- a/proxy/src/main/java/com/wavefront/agent/config/Configuration.java
+++ b/proxy/src/main/java/com/wavefront/agent/config/Configuration.java
@@ -3,8 +3,9 @@
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
-/** @author Mori Bellamy (mori@wavefront.com) */
public abstract class Configuration {
+ private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
+
protected void ensure(boolean b, String message) throws ConfigurationException {
if (!b) {
throw new ConfigurationException(message);
@@ -13,8 +14,6 @@ protected void ensure(boolean b, String message) throws ConfigurationException {
public abstract void verifyAndInit() throws ConfigurationException;
- private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
-
@Override
public String toString() {
try {
diff --git a/proxy/src/main/java/com/wavefront/agent/config/ConfigurationException.java b/proxy/src/main/java/com/wavefront/agent/config/ConfigurationException.java
index 23ffa8422..2b1975e88 100644
--- a/proxy/src/main/java/com/wavefront/agent/config/ConfigurationException.java
+++ b/proxy/src/main/java/com/wavefront/agent/config/ConfigurationException.java
@@ -1,6 +1,5 @@
package com.wavefront.agent.config;
-/** @author Mori Bellamy (mori@wavefront.com) */
public class ConfigurationException extends Exception {
public ConfigurationException(String message) {
super(message);
diff --git a/proxy/src/main/java/com/wavefront/agent/config/LogsIngestionConfig.java b/proxy/src/main/java/com/wavefront/agent/config/LogsIngestionConfig.java
index e8479f175..6649266c0 100644
--- a/proxy/src/main/java/com/wavefront/agent/config/LogsIngestionConfig.java
+++ b/proxy/src/main/java/com/wavefront/agent/config/LogsIngestionConfig.java
@@ -81,8 +81,6 @@
* - "MYPATTERN she sold %{NUMBER:value} sea shells"
*
*
- *
- * @author Mori Bellamy (mori@wavefront.com)
*/
@SuppressWarnings("CanBeFinal")
public class LogsIngestionConfig extends Configuration {
diff --git a/proxy/src/main/java/com/wavefront/agent/config/MetricMatcher.java b/proxy/src/main/java/com/wavefront/agent/config/MetricMatcher.java
index 752c79cc6..2d585834b 100644
--- a/proxy/src/main/java/com/wavefront/agent/config/MetricMatcher.java
+++ b/proxy/src/main/java/com/wavefront/agent/config/MetricMatcher.java
@@ -12,20 +12,18 @@
import java.io.InputStreamReader;
import java.util.List;
import java.util.Map;
-import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import wavefront.report.TimeSeries;
-/**
- * Object defining transformation between a log line into structured telemetry data.
- *
- * @author Mori Bellamy (mori@wavefront.com)
- */
+/** Object defining transformation between a log line into structured telemetry data. */
@SuppressWarnings("CanBeFinal")
public class MetricMatcher extends Configuration {
- protected static final Logger logger = Logger.getLogger(MetricMatcher.class.getCanonicalName());
+ protected static final Logger logger =
+ LoggerFactory.getLogger(MetricMatcher.class.getCanonicalName());
private final Object grokLock = new Object();
/**
@@ -81,6 +79,28 @@ public class MetricMatcher extends Configuration {
private Grok grok = null;
private Map additionalPatterns = Maps.newHashMap();
+ private static String expandTemplate(String template, Map replacements) {
+ if (template.contains("%{")) {
+ StringBuffer result = new StringBuffer();
+ Matcher placeholders = Pattern.compile("%\\{(.*?)}").matcher(template);
+ while (placeholders.find()) {
+ if (placeholders.group(1).isEmpty()) {
+ placeholders.appendReplacement(result, placeholders.group(0));
+ } else {
+ if (replacements.get(placeholders.group(1)) != null) {
+ placeholders.appendReplacement(
+ result, (String) replacements.get(placeholders.group(1)));
+ } else {
+ placeholders.appendReplacement(result, placeholders.group(0));
+ }
+ }
+ }
+ placeholders.appendTail(result);
+ return result.toString();
+ }
+ return template;
+ }
+
public String getValueLabel() {
return valueLabel;
}
@@ -110,41 +130,19 @@ private Grok grok() {
try {
grok.addPattern(key, value);
} catch (GrokException e) {
- logger.severe("Invalid grok pattern: " + pattern);
+ logger.error("Invalid grok pattern: " + pattern);
throw new RuntimeException(e);
}
});
grok.compile(pattern);
} catch (GrokException e) {
- logger.severe("Invalid grok pattern: " + pattern);
+ logger.error("Invalid grok pattern: " + pattern);
throw new RuntimeException(e);
}
return grok;
}
}
- private static String expandTemplate(String template, Map replacements) {
- if (template.contains("%{")) {
- StringBuffer result = new StringBuffer();
- Matcher placeholders = Pattern.compile("%\\{(.*?)}").matcher(template);
- while (placeholders.find()) {
- if (placeholders.group(1).isEmpty()) {
- placeholders.appendReplacement(result, placeholders.group(0));
- } else {
- if (replacements.get(placeholders.group(1)) != null) {
- placeholders.appendReplacement(
- result, (String) replacements.get(placeholders.group(1)));
- } else {
- placeholders.appendReplacement(result, placeholders.group(0));
- }
- }
- }
- placeholders.appendTail(result);
- return result.toString();
- }
- return template;
- }
-
/**
* Convert the given message to a timeSeries and a telemetry datum.
*
@@ -185,7 +183,7 @@ public TimeSeries timeSeries(LogsMessage logsMessage, Double[] output)
String tagValueLabel = tagValueLabels.get(i);
if (!matches.containsKey(tagValueLabel)) {
// What happened? We shouldn't have had matchEnd != 0 above...
- logger.severe("Application error: unparsed tag key.");
+ logger.error("Application error: unparsed tag key.");
continue;
}
String value = (String) matches.get(tagValueLabel);
diff --git a/proxy/src/main/java/com/wavefront/agent/config/ReportableConfig.java b/proxy/src/main/java/com/wavefront/agent/config/ReportableConfig.java
index fc6a6cf91..810abdab0 100644
--- a/proxy/src/main/java/com/wavefront/agent/config/ReportableConfig.java
+++ b/proxy/src/main/java/com/wavefront/agent/config/ReportableConfig.java
@@ -3,24 +3,26 @@
import com.yammer.metrics.Metrics;
import com.yammer.metrics.core.Gauge;
import com.yammer.metrics.core.MetricName;
-import java.io.FileInputStream;
import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
import java.util.Properties;
import java.util.function.Function;
import java.util.function.Supplier;
-import java.util.logging.Level;
-import java.util.logging.Logger;
import javax.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Wrapper class to simplify access to .properties file + track values as metrics as they are
* retrieved
*/
public class ReportableConfig extends Properties {
- private static final Logger logger = Logger.getLogger(ReportableConfig.class.getCanonicalName());
+ private static final Logger logger =
+ LoggerFactory.getLogger(ReportableConfig.class.getCanonicalName());
public ReportableConfig(String fileName) throws IOException {
- this.load(new FileInputStream(fileName));
+ this.load(Files.newInputStream(Paths.get(fileName)));
}
public ReportableConfig() {}
@@ -61,8 +63,7 @@ public Number getNumber(
"Config setting \"" + key + "\": invalid number format \"" + property + "\"");
}
if (clampMinValue != null && d < clampMinValue.longValue()) {
- logger.log(
- Level.WARNING,
+ logger.warn(
key
+ " ("
+ d
@@ -74,8 +75,7 @@ public Number getNumber(
return clampMinValue;
}
if (clampMaxValue != null && d > clampMaxValue.longValue()) {
- logger.log(
- Level.WARNING,
+ logger.warn(
key
+ " ("
+ d
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/ActiveMQBuffer.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/ActiveMQBuffer.java
new file mode 100644
index 000000000..8e9244e21
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/ActiveMQBuffer.java
@@ -0,0 +1,462 @@
+package com.wavefront.agent.core.buffers;
+
+import static org.apache.activemq.artemis.core.settings.impl.AddressFullMessagePolicy.FAIL;
+import static org.apache.activemq.artemis.core.settings.impl.AddressFullMessagePolicy.PAGE;
+
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.agent.core.queues.QueueStats;
+import com.yammer.metrics.Metrics;
+import com.yammer.metrics.core.Gauge;
+import com.yammer.metrics.core.Histogram;
+import com.yammer.metrics.core.MetricName;
+import com.yammer.metrics.util.JmxGauge;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import org.apache.activemq.artemis.api.core.*;
+import org.apache.activemq.artemis.api.core.client.*;
+import org.apache.activemq.artemis.core.config.Configuration;
+import org.apache.activemq.artemis.core.config.impl.ConfigurationImpl;
+import org.apache.activemq.artemis.core.server.ActiveMQServer;
+import org.apache.activemq.artemis.core.server.JournalType;
+import org.apache.activemq.artemis.core.server.impl.ActiveMQServerImpl;
+import org.apache.activemq.artemis.core.settings.impl.AddressSettings;
+import org.apache.activemq.artemis.spi.core.security.ActiveMQJAASSecurityManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class ActiveMQBuffer implements Buffer {
+ public static final String MSG_ITEMS = "items";
+ public static final String MSG_BYTES = "bytes";
+ public static final String MSG_GZIPBYTES = "gzipbytes";
+ private static final Logger log =
+ LoggerFactory.getLogger(ActiveMQBuffer.class.getCanonicalName());
+ private static final Logger slowLog = log;
+ // new
+ // MessageDedupingLogger(LoggerFactory.getLogger(ActiveMQBuffer.class.getCanonicalName()),
+ // 1000,
+ // 1);
+ protected final Map countMetrics = new HashMap<>();
+ final ActiveMQServer activeMQServer;
+ final String name;
+ private final Map producers = new ConcurrentHashMap<>();
+ private final Map consumers = new ConcurrentHashMap<>();
+ private final Map> sizeMetrics = new HashMap<>();
+ private final Map timeMetrics = new HashMap<>();
+ private final int serverID;
+ protected Buffer nextBuffer;
+ private ServerLocator serverLocator;
+ private ClientSessionFactory factory;
+ private int maxMsgSize = 102400;
+ protected boolean compress = false;
+
+ private static final Histogram messageSize =
+ Metrics.newHistogram(new MetricName("buffer.message", "", "size"));
+ private static final Histogram messageGzipSize =
+ Metrics.newHistogram(new MetricName("buffer.message", "", "gzipsize"));
+
+ public ActiveMQBuffer(
+ int serverID, String name, boolean persistenceEnabled, File buffer, long maxMemory) {
+ this.serverID = serverID;
+ this.name = name;
+
+ Configuration config = new ConfigurationImpl();
+ config.setName(name);
+ config.setSecurityEnabled(false);
+ config.setPersistenceEnabled(persistenceEnabled);
+ config.setMessageExpiryScanPeriod(persistenceEnabled ? 0 : 1_000);
+ config.setGlobalMaxSize(maxMemory);
+
+ try {
+ Path tmpBuffer = Files.createTempDirectory("wfproxy");
+ config.setPagingDirectory(tmpBuffer.toString());
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+
+ if (persistenceEnabled) {
+ config.setMaxDiskUsage(70); // TODO: config option
+ config.setJournalDirectory(new File(buffer, "journal").getAbsolutePath());
+ config.setBindingsDirectory(new File(buffer, "bindings").getAbsolutePath());
+ config.setLargeMessagesDirectory(new File(buffer, "largemessages").getAbsolutePath());
+ config.setPagingDirectory(new File(buffer, "paging").getAbsolutePath());
+ config.setCreateBindingsDir(true);
+ config.setCreateJournalDir(true);
+ config.setJournalLockAcquisitionTimeout(10);
+ config.setJournalType(JournalType.NIO);
+ }
+
+ ActiveMQJAASSecurityManager securityManager = new ActiveMQJAASSecurityManager();
+ activeMQServer = new ActiveMQServerImpl(config, securityManager);
+ activeMQServer.registerActivationFailureListener(
+ exception ->
+ log.error(
+ "error creating buffer, "
+ + exception.getMessage()
+ + ". Review if there is another Proxy running."));
+
+ try {
+ config.addAcceptorConfiguration("in-vm", "vm://" + serverID);
+ activeMQServer.start();
+ } catch (Exception e) {
+ log.error("error creating buffer", e);
+ System.exit(-1);
+ }
+
+ if (!activeMQServer.isActive()) {
+ System.exit(-1);
+ }
+
+ AddressSettings addressSetting =
+ new AddressSettings()
+ .setMaxSizeMessages(-1)
+ .setMaxExpiryDelay(-1L)
+ .setMaxDeliveryAttempts(-1)
+ .setManagementBrowsePageSize(Integer.MAX_VALUE);
+
+ if (persistenceEnabled) {
+ addressSetting.setMaxSizeBytes(-1);
+ addressSetting.setAddressFullMessagePolicy(PAGE);
+ } else {
+ addressSetting.setMaxSizeBytes(maxMemory);
+ addressSetting.setAddressFullMessagePolicy(FAIL);
+ }
+
+ activeMQServer.getAddressSettingsRepository().setDefault(addressSetting);
+ }
+
+ protected String getUrl() {
+ return "vm://" + serverID;
+ }
+
+ @Override
+ public void registerNewQueueInfo(QueueInfo queue) {
+ for (int i = 0; i < queue.getNumberThreads(); i++) {
+ createQueue(queue.getName(), i);
+ }
+
+ try {
+ registerQueueMetrics(queue);
+ } catch (MalformedObjectNameException e) {
+ log.error("error", e);
+ }
+ }
+
+ void registerQueueMetrics(QueueInfo queue) throws MalformedObjectNameException {
+ ObjectName addressObjectName =
+ new ObjectName(
+ String.format(
+ "org.apache.activemq.artemis:broker=\"%s\",component=addresses,address=\"%s\"",
+ name, queue.getName()));
+
+ sizeMetrics.put(
+ queue.getName(),
+ Metrics.newGauge(
+ new MetricName("buffer." + name + "." + queue.getName(), "", "size"),
+ new JmxGauge(addressObjectName, "AddressSize")));
+
+ Metrics.newGauge(
+ new MetricName("buffer." + name + "." + queue.getName(), "", "usage"),
+ new JmxGauge(addressObjectName, "AddressLimitPercent"));
+
+ countMetrics.put(
+ queue.getName(),
+ (PointsGauge)
+ Metrics.newGauge(
+ new MetricName("buffer." + name + "." + queue.getName(), "", "points"),
+ new PointsGauge(queue, activeMQServer)));
+
+ timeMetrics.put(
+ queue.getName(),
+ Metrics.newHistogram(
+ new MetricName("buffer." + name + "." + queue.getName(), "", "queue-time")));
+ }
+
+ public void shutdown() {
+ try {
+ for (Map.Entry entry : producers.entrySet()) {
+ entry.getValue().close(); // session
+ entry.getValue().close(); // producer
+ }
+ for (Map.Entry entry : consumers.entrySet()) {
+ entry.getValue().close(); // session
+ entry.getValue().close(); // consumer
+ }
+
+ activeMQServer.stop();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void sendPoints(String queue, List points) throws ActiveMQAddressFullException {
+ try {
+ doSendPoints(queue, points);
+ } catch (ActiveMQAddressFullException e) {
+ slowLog.error(getName() + " Queue full");
+ if (slowLog.isDebugEnabled()) {
+ slowLog.error("", e);
+ }
+ if (nextBuffer != null) {
+ nextBuffer.sendPoints(queue, points);
+ QueueStats.get(queue).queuedFull.inc();
+ } else {
+ throw e;
+ }
+ }
+ }
+
+ public void doSendPoints(String queue, List points) throws ActiveMQAddressFullException {
+ String str = String.join("\n", points);
+ // if the str is too long we split points in two, to avoid "largemessages" which use disk
+ // access.
+ if (str.length() > maxMsgSize) {
+ doSendPoints(queue, points.subList(0, points.size() / 2));
+ doSendPoints(queue, points.subList(points.size() / 2, points.size()));
+ return;
+ }
+
+ String sessionKey = "sendMsg." + queue + "." + Thread.currentThread().getName();
+ Session mqCtx =
+ producers.computeIfAbsent(
+ sessionKey,
+ s -> {
+ try {
+ checkConnection();
+ ClientSession session = factory.createSession();
+ ClientProducer producer = session.createProducer(queue);
+ return new Session(session, producer);
+ } catch (Exception e) {
+ checkException(e);
+ throw new RuntimeException(e);
+ }
+ });
+ try {
+ ClientMessage message = mqCtx.session.createMessage(true);
+ message.putIntProperty(MSG_ITEMS, points.size());
+ message.putIntProperty(MSG_BYTES, str.length());
+ messageSize.update(str.length());
+ if (compress) {
+ byte[] strBuffer = GZIP.compress(str);
+ message.writeBodyBufferBytes(strBuffer);
+ message.putIntProperty(MSG_GZIPBYTES, strBuffer.length);
+ messageGzipSize.update(strBuffer.length);
+ } else {
+ message.writeBodyBufferString(str);
+ }
+ mqCtx.producer.send(message);
+ } catch (ActiveMQAddressFullException e) {
+ log.info("queue full: " + e.getMessage());
+ throw e;
+ } catch (ActiveMQObjectClosedException e) {
+ log.info("connection close: " + e.getMessage());
+ mqCtx.close();
+ producers.remove(sessionKey);
+ QueueStats.get(queue).internalError.inc();
+ if (nextBuffer != null) {
+ nextBuffer.sendPoints(queue, points);
+ } else {
+ sendPoints(queue, points);
+ }
+ } catch (Exception e) {
+ log.error("error", e);
+ throw new RuntimeException(e);
+ }
+ }
+
+ private void checkConnection() throws Exception {
+ if ((serverLocator == null) || (serverLocator.isClosed())) {
+ serverLocator = ActiveMQClient.createServerLocator(getUrl());
+ serverLocator.getMinLargeMessageSize();
+ maxMsgSize = (int) (serverLocator.getMinLargeMessageSize() * 0.8);
+ }
+ if ((factory == null) || (factory.isClosed())) {
+ factory = serverLocator.createSessionFactory();
+ }
+ }
+
+ @Override
+ public void onMsgBatch(QueueInfo queue, int idx, OnMsgDelegate delegate) {
+ String sessionKey = "onMsgBatch." + queue.getName() + "." + Thread.currentThread().getName();
+ Session mqCtx =
+ consumers.computeIfAbsent(
+ sessionKey,
+ s -> {
+ try {
+ checkConnection();
+ ClientSession session = factory.createSession(false, false);
+ ClientConsumer consumer = session.createConsumer(queue.getName() + "." + idx);
+ return new Session(session, consumer);
+ } catch (Exception e) {
+ checkException(e);
+ if (e instanceof ActiveMQConnectionTimedOutException) {
+ createQueue(queue.getName(), idx);
+ }
+ throw new RuntimeException(e);
+ }
+ });
+
+ try {
+ long start = System.currentTimeMillis();
+ mqCtx.session.start();
+ List batch = new ArrayList<>();
+ List toACK = new ArrayList<>();
+ boolean done = false;
+ boolean needRollBack = false;
+ int batchBytes = 0;
+ while (!done && ((System.currentTimeMillis() - start) < 1000)) {
+ ClientMessage msg = mqCtx.consumer.receive(100);
+ if (msg != null) {
+ String str;
+ if (compress) {
+ str = GZIP.decompress(msg);
+ } else {
+ str = msg.getBodyBuffer().readString();
+ }
+ List points = Arrays.asList(str.split("\n"));
+ boolean ok_size =
+ delegate.checkBatchSize(
+ batch.size(), batchBytes, points.size(), msg.getIntProperty(MSG_BYTES));
+ boolean ok_rate = delegate.checkRates(points.size(), batchBytes);
+ if (ok_size && ok_rate) {
+ toACK.add(msg);
+ batch.addAll(points);
+ batchBytes += msg.getIntProperty(MSG_BYTES);
+ } else {
+ if (!ok_rate) {
+ slowLog.info("rate limit reached on queue '" + queue.getName() + "'");
+ } else {
+ slowLog.info("payload limit reached on queue '" + queue.getName() + "'");
+ }
+ done = true;
+ needRollBack = true;
+ }
+ } else {
+ done = true;
+ }
+ }
+
+ try {
+ if (batch.size() > 0) {
+ delegate.processBatch(batch);
+ }
+ // commit all messages ACKed
+ toACK.forEach(
+ msg -> {
+ try {
+ msg.individualAcknowledge();
+ timeMetrics.get(queue.getName()).update(start - msg.getTimestamp());
+ } catch (ActiveMQException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ mqCtx.session.commit();
+ if (needRollBack) {
+ // rollback all messages not ACKed (rate)
+ mqCtx.session.rollback();
+ }
+ } catch (Exception e) {
+ log.error(e.toString());
+ if (log.isDebugEnabled()) {
+ log.error("error", e);
+ }
+ // ACK all messages and then rollback so fail count go up
+ toACK.forEach(
+ msg -> {
+ try {
+ msg.individualAcknowledge();
+ } catch (ActiveMQException ex) {
+ throw new RuntimeException(ex);
+ }
+ });
+ mqCtx.session.rollback();
+ }
+ } catch (Throwable e) {
+ log.error("error", e);
+ mqCtx.close();
+ consumers.remove(sessionKey);
+ } finally {
+ try {
+ if (!mqCtx.session.isClosed()) {
+ mqCtx.session.stop();
+ }
+ } catch (ActiveMQException e) {
+ log.error("error", e);
+ mqCtx.close();
+ consumers.remove(sessionKey);
+ }
+ }
+ }
+
+ private void checkException(Exception e) {
+ if (e instanceof ActiveMQNotConnectedException) {
+ serverLocator = null;
+ factory = null;
+ }
+ }
+
+ private void createQueue(String queueName, int i) {
+ QueueConfiguration queue =
+ new QueueConfiguration(queueName + (i < 0 ? "" : ("." + i)))
+ .setAddress(queueName)
+ .setRoutingType(RoutingType.ANYCAST);
+
+ try (ServerLocator sl = ActiveMQClient.createServerLocator(getUrl());
+ ClientSessionFactory f = sl.createSessionFactory();
+ ClientSession session = f.createSession()) {
+ ClientSession.QueueQuery q = session.queueQuery(queue.getName());
+ if (!q.isExists()) {
+ session.createQueue(queue);
+ }
+ } catch (Exception e) {
+ log.error("error", e);
+ }
+ }
+
+ public void setNextBuffer(Buffer nextBuffer) {
+ this.nextBuffer = nextBuffer;
+ }
+
+ private class Session {
+ ClientSession session;
+ ClientConsumer consumer;
+ ClientProducer producer;
+
+ Session(ClientSession session, ClientConsumer consumer) {
+ this.session = session;
+ this.consumer = consumer;
+ }
+
+ public Session(ClientSession session, ClientProducer producer) {
+ this.session = session;
+ this.producer = producer;
+ }
+
+ void close() {
+ if (session != null) {
+ try {
+ session.close();
+ } catch (Throwable e) {
+ }
+ }
+ if (consumer != null) {
+ try {
+ consumer.close();
+ } catch (Throwable e) {
+ }
+ }
+ if (producer != null) {
+ try {
+ producer.close();
+ } catch (Throwable e) {
+ }
+ }
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/Bridge.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/Bridge.java
new file mode 100644
index 000000000..b4cf40a23
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/Bridge.java
@@ -0,0 +1,77 @@
+package com.wavefront.agent.core.buffers;
+
+import static com.wavefront.agent.core.buffers.ActiveMQBuffer.MSG_ITEMS;
+
+import com.wavefront.agent.core.queues.QueueStats;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.TimeUnit;
+import org.apache.activemq.artemis.api.core.ActiveMQAddressFullException;
+import org.apache.activemq.artemis.api.core.ActiveMQException;
+import org.apache.activemq.artemis.core.message.impl.CoreMessage;
+import org.apache.activemq.artemis.core.server.MessageReference;
+import org.apache.activemq.artemis.core.server.ServerConsumer;
+import org.apache.activemq.artemis.core.server.impl.AckReason;
+import org.apache.activemq.artemis.core.server.plugin.ActiveMQServerMessagePlugin;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class Bridge implements ActiveMQServerMessagePlugin {
+ private static final Logger log = LoggerFactory.getLogger(Bridge.class.getCanonicalName());
+
+ private final MemoryBuffer memoryBuffer;
+ private final DiskBuffer diskBuffer;
+ private final Timer checkDiskFull;
+
+ public Bridge(MemoryBuffer memoryBuffer, DiskBuffer diskBuffer) {
+ this.memoryBuffer = memoryBuffer;
+ this.diskBuffer = diskBuffer;
+ checkDiskFull = new Timer(); // TODO stop the timer on shutdown ?
+ checkDiskFull.scheduleAtFixedRate(
+ new TimerTask() {
+ @Override
+ public void run() {
+ if (diskBuffer.isFull()) {
+ memoryBuffer.disableBridge();
+ } else {
+ memoryBuffer.enableBridge();
+ }
+ }
+ },
+ TimeUnit.MINUTES.toMillis(1),
+ TimeUnit.MINUTES.toMillis(1));
+ }
+
+ @Override
+ public void messageAcknowledged(MessageReference ref, AckReason reason, ServerConsumer consumer)
+ throws ActiveMQException {
+ if (reason == AckReason.KILLED || reason == AckReason.EXPIRED) {
+ String queue = ref.getQueue().getAddress().toString();
+ CoreMessage msg = (CoreMessage) ref.getMessage().copy();
+ String stringBody = msg.getReadOnlyBodyBuffer().readString();
+ List points = Arrays.asList(stringBody.split("\n"));
+ QueueStats stats = QueueStats.get(queue);
+ try {
+ diskBuffer.sendPoints(queue, points);
+ switch (reason) {
+ case KILLED:
+ stats.queuedFailed.inc(ref.getMessage().getIntProperty(MSG_ITEMS));
+ break;
+ case EXPIRED:
+ stats.queuedExpired.inc(ref.getMessage().getIntProperty(MSG_ITEMS));
+ break;
+ }
+ } catch (ActiveMQAddressFullException e) {
+ // disk buffer full, we put the metrics back to memory
+ // and disable this.
+ memoryBuffer.sendPoints(queue, points);
+ memoryBuffer.disableBridge();
+ } catch (Exception e) {
+ log.error("Error deleting expired messages", e);
+ throw new ActiveMQException("Error deleting expired messages. " + e.getMessage());
+ }
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/Buffer.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/Buffer.java
new file mode 100644
index 000000000..8d67a92ff
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/Buffer.java
@@ -0,0 +1,17 @@
+package com.wavefront.agent.core.buffers;
+
+import com.wavefront.agent.core.queues.QueueInfo;
+import java.util.List;
+import org.apache.activemq.artemis.api.core.ActiveMQAddressFullException;
+
+public interface Buffer {
+ void registerNewQueueInfo(QueueInfo key);
+
+ void onMsgBatch(QueueInfo key, int idx, OnMsgDelegate func);
+
+ void sendPoints(String queue, List strPoint) throws ActiveMQAddressFullException;
+
+ String getName();
+
+ int getPriority();
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/BuffersManager.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/BuffersManager.java
new file mode 100644
index 000000000..814c00cde
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/BuffersManager.java
@@ -0,0 +1,86 @@
+package com.wavefront.agent.core.buffers;
+
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.agent.data.EntityRateLimiter;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class BuffersManager {
+ private static final Map registeredQueues = new HashMap<>();
+ private static MemoryBuffer memoryBuffer;
+ private static DiskBuffer diskBuffer;
+ private static Buffer external;
+
+ public static void init(BuffersManagerConfig cfg) {
+
+ memoryBuffer = new MemoryBuffer(0, "memory", cfg.memoryCfg);
+
+ if (cfg.disk) {
+ diskBuffer = new DiskBuffer(1, "disk", cfg.diskCfg);
+ memoryBuffer.createBridge(diskBuffer);
+ }
+
+ if (cfg.external) {
+ external = new SQSBuffer(cfg.sqsCfg);
+ if (cfg.disk) {
+ diskBuffer.setNextBuffer(external);
+ } else {
+ memoryBuffer.setNextBuffer(external);
+ }
+ }
+ }
+
+ public static void shutdown() {
+ registeredQueues.clear();
+
+ if (memoryBuffer != null) {
+ memoryBuffer.shutdown();
+ memoryBuffer = null;
+ }
+ if (diskBuffer != null) {
+ diskBuffer.shutdown();
+ diskBuffer = null;
+ }
+ }
+
+ public static List registerNewQueueIfNeedIt(QueueInfo queue) {
+ List buffers = new ArrayList<>();
+ Boolean registered = registeredQueues.computeIfAbsent(queue.getName(), s -> false);
+ if (!registered) { // is controlled by queue manager, but we do it also here just in case.
+ memoryBuffer.registerNewQueueInfo(queue);
+ buffers.add(memoryBuffer);
+
+ if (diskBuffer != null) {
+ diskBuffer.registerNewQueueInfo(queue);
+ buffers.add(diskBuffer);
+ }
+
+ if (external != null) {
+ external.registerNewQueueInfo(queue);
+ buffers.add(external);
+ }
+
+ registeredQueues.put(queue.getName(), true);
+ }
+
+ queue.getTenants().values().forEach(BuffersManager::registerNewQueueIfNeedIt);
+ return buffers;
+ }
+
+ public static void sendMsg(QueueInfo queue, String strPoint) {
+ memoryBuffer.sendPoint(queue, strPoint);
+ }
+
+ public static void onMsgBatch(
+ QueueInfo handler, int idx, EntityRateLimiter rateLimiter, OnMsgDelegate func) {
+ memoryBuffer.onMsgBatch(handler, idx, func);
+ }
+
+ public static void truncateBacklog() {
+ if (diskBuffer != null) {
+ diskBuffer.truncate();
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/BuffersManagerConfig.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/BuffersManagerConfig.java
new file mode 100644
index 000000000..7a6d2f0c3
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/BuffersManagerConfig.java
@@ -0,0 +1,10 @@
+package com.wavefront.agent.core.buffers;
+
+public class BuffersManagerConfig {
+ public boolean disk = true;
+ public boolean external = false;
+
+ public SQSBufferConfig sqsCfg = new SQSBufferConfig();
+ public final MemoryBufferConfig memoryCfg = new MemoryBufferConfig();
+ public final DiskBufferConfig diskCfg = new DiskBufferConfig();
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/DiskBuffer.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/DiskBuffer.java
new file mode 100644
index 000000000..040d3f290
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/DiskBuffer.java
@@ -0,0 +1,82 @@
+package com.wavefront.agent.core.buffers;
+
+import com.yammer.metrics.Metrics;
+import com.yammer.metrics.core.MetricName;
+import com.yammer.metrics.util.JmxGauge;
+import java.util.List;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import org.apache.activemq.artemis.api.core.ActiveMQAddressFullException;
+import org.apache.activemq.artemis.api.core.management.AddressControl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class DiskBuffer extends ActiveMQBuffer implements Buffer {
+ private static final Logger log = LoggerFactory.getLogger(DiskBuffer.class.getCanonicalName());
+ private static final Logger slowLog = log;
+ // new MessageDedupingLogger(LoggerFactory.getLogger(MemoryBuffer.class.getCanonicalName()),
+ // 1000, 1);
+
+ public DiskBuffer(int level, String name, DiskBufferConfig cfg) {
+ super(level, name, true, cfg.buffer, cfg.maxMemory);
+ this.compress = true;
+
+ try {
+ ObjectName addressObjectName =
+ new ObjectName(String.format("org.apache.activemq.artemis:broker=\"%s\"", name));
+ Metrics.newGauge(
+ new MetricName("buffer." + name, "", "diskUsage"),
+ new JmxGauge(addressObjectName, "DiskStoreUsage"));
+ Metrics.newGauge(
+ new MetricName("buffer." + name, "", "diskUsageMax"),
+ new JmxGauge(addressObjectName, "MaxDiskUsage"));
+
+ } catch (MalformedObjectNameException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ // @Override
+ // protected String getUrl() {
+ // return "tcp://localhost:61616";
+ // }
+
+ @Override
+ public void sendPoints(String queue, List points) throws ActiveMQAddressFullException {
+ if (isFull()) {
+ slowLog.error(getName() + " Queue full");
+ throw new ActiveMQAddressFullException();
+ }
+ super.sendPoints(queue, points);
+ }
+
+ @Override
+ public String getName() {
+ return "Disk";
+ }
+
+ @Override
+ public int getPriority() {
+ return Thread.NORM_PRIORITY;
+ }
+
+ public boolean isFull() {
+ return activeMQServer.getPagingManager().isDiskFull();
+ }
+
+ public void truncate() {
+ Object[] addresses = activeMQServer.getManagementService().getResources(AddressControl.class);
+
+ try {
+ for (Object obj : addresses) {
+ AddressControl address = (AddressControl) obj;
+ if (!address.getAddress().startsWith("active")) {
+ address.purge();
+ log.info(address.getAddress() + " buffer truncated");
+ }
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/DiskBufferConfig.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/DiskBufferConfig.java
new file mode 100644
index 000000000..459f588fa
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/DiskBufferConfig.java
@@ -0,0 +1,16 @@
+package com.wavefront.agent.core.buffers;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+
+public class DiskBufferConfig {
+ public File buffer;
+ public long maxMemory;
+
+ public void validate() {
+ if (!buffer.exists() || !buffer.isDirectory()) {
+ throw new IllegalArgumentException(
+ new FileNotFoundException("Buffer directory '" + buffer + "' Not Found"));
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/Exporter.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/Exporter.java
new file mode 100644
index 000000000..71171d768
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/Exporter.java
@@ -0,0 +1,93 @@
+package com.wavefront.agent.core.buffers;
+
+import static com.wavefront.agent.core.buffers.ActiveMQBuffer.MSG_GZIPBYTES;
+
+import com.wavefront.data.ReportableEntityType;
+import java.io.*;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import org.apache.activemq.artemis.api.core.SimpleString;
+import org.apache.activemq.artemis.core.message.impl.CoreMessage;
+import org.apache.activemq.artemis.core.server.ActiveMQServer;
+import org.apache.activemq.artemis.core.server.MessageReference;
+import org.apache.activemq.artemis.utils.collections.LinkedListIterator;
+
+public class Exporter {
+ public static void export(String bufferStr, String dirStr, String atomsStr, boolean retainData) {
+ List atomsList = Arrays.asList(atomsStr.split(","));
+ atomsList.replaceAll(String::trim);
+ List atoms =
+ atomsList.stream()
+ .map(
+ s -> {
+ ReportableEntityType atom = ReportableEntityType.fromString(s);
+ if (atom == null) {
+ throw new IllegalArgumentException("invalid atom '" + s + "'");
+ }
+ return atom;
+ })
+ .collect(Collectors.toList());
+ File dir = new File(dirStr);
+
+ DiskBufferConfig config = new DiskBufferConfig();
+ config.buffer = new File(bufferStr);
+ DiskBuffer buffer = new DiskBuffer(1, "disk", config);
+ atoms.forEach(
+ atom -> {
+ ActiveMQServer amq = buffer.activeMQServer;
+ try {
+ File outFile = new File(dir, atom.toString().toLowerCase() + ".txt");
+ System.out.println(
+ "Exporting '" + atom + "' from '" + dirStr + "' to '" + outFile + "'");
+ AtomicInteger c = new AtomicInteger();
+ BufferedWriter out = new BufferedWriter(new FileWriter(outFile));
+ amq.getPostOffice()
+ .listQueuesForAddress(SimpleString.toSimpleString(atom.name()))
+ .forEach(
+ queue -> {
+ LinkedListIterator it = queue.browserIterator();
+ while (it.hasNext()) {
+ CoreMessage msg = (CoreMessage) it.next().getMessage();
+ String str = "";
+ if (msg.getIntProperty(MSG_GZIPBYTES) != 0) {
+ str = GZIP.decompress(msg);
+ } else {
+ str = msg.getReadOnlyBodyBuffer().readString();
+ }
+ List points = Arrays.asList(str.split("\n"));
+ try {
+ out.write(String.join("\n", points));
+ out.write("\n");
+ } catch (IOException e) {
+ throw new RuntimeException("Error writing on the output file.", e);
+ }
+ if (!retainData) {
+ try {
+ queue.deleteReference(msg.getMessageID());
+ } catch (Exception e) {
+ throw new RuntimeException("Error deleting data from the buffer", e);
+ }
+ }
+ if (c.addAndGet(points.size()) % 100_000 == 0) {
+ System.out.print(".");
+ }
+ }
+ });
+ out.flush();
+ out.close();
+ System.out.println(
+ ".\nDone, exported "
+ + (retainData ? "" : "and deleted ")
+ + c
+ + " "
+ + atom.toString()
+ + "\n");
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
+ buffer.shutdown();
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/GZIP.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/GZIP.java
new file mode 100644
index 000000000..6a1e48c17
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/GZIP.java
@@ -0,0 +1,57 @@
+package com.wavefront.agent.core.buffers;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import com.yammer.metrics.Metrics;
+import com.yammer.metrics.core.Histogram;
+import com.yammer.metrics.core.MetricName;
+import java.io.*;
+import java.util.zip.GZIPInputStream;
+import java.util.zip.GZIPOutputStream;
+import org.apache.activemq.artemis.api.core.ICoreMessage;
+import org.apache.commons.io.IOUtils;
+
+class GZIP {
+ private static final Histogram compressTime =
+ Metrics.newHistogram(new MetricName("buffer.gzip.compress", "", "time"));
+ private static final Histogram decompressTime =
+ Metrics.newHistogram(new MetricName("buffer.gzip.decompress", "", "time"));
+
+ protected static byte[] compress(final String stringToCompress) {
+ long start = System.currentTimeMillis();
+ try (final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ final GZIPOutputStream gzipOutput = new GZIPOutputStream(baos)) {
+ gzipOutput.write(stringToCompress.getBytes(UTF_8));
+ gzipOutput.finish();
+ return baos.toByteArray();
+ } catch (IOException e) {
+ throw new UncheckedIOException("Error while compression!", e);
+ } finally {
+ compressTime.update(System.currentTimeMillis() - start);
+ }
+ }
+
+ protected static String decompress(ICoreMessage msg) {
+ byte[] array = msg.getBodyBuffer().byteBuf().array();
+
+ long start = System.currentTimeMillis();
+ try (final ByteArrayInputStream is = new ByteArrayInputStream(array)) {
+ is.read(); // First 4 byte are the message length
+ is.read();
+ is.read();
+ is.read();
+ try (final GZIPInputStream gzipInput = new GZIPInputStream(is);
+ final StringWriter stringWriter = new StringWriter()) {
+ IOUtils.copy(gzipInput, stringWriter, UTF_8);
+ return stringWriter.toString();
+ } catch (IOException e) {
+ throw new UncheckedIOException("Error while decompression!", e);
+ } finally {
+ System.out.println("-->" + (System.currentTimeMillis() - start));
+ decompressTime.update(System.currentTimeMillis() - start);
+ }
+ } catch (IOException e) {
+ throw new UncheckedIOException("Error while decompression!", e);
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/MemoryBuffer.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/MemoryBuffer.java
new file mode 100644
index 000000000..4ff068b6a
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/MemoryBuffer.java
@@ -0,0 +1,161 @@
+package com.wavefront.agent.core.buffers;
+
+import static org.apache.activemq.artemis.core.settings.impl.AddressFullMessagePolicy.FAIL;
+
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.agent.core.queues.QueueStats;
+import com.wavefront.common.NamedThreadFactory;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.concurrent.*;
+import org.apache.activemq.artemis.api.core.ActiveMQAddressFullException;
+import org.apache.activemq.artemis.api.core.management.QueueControl;
+import org.apache.activemq.artemis.core.settings.impl.AddressSettings;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MemoryBuffer extends ActiveMQBuffer {
+ private static final Logger log = LoggerFactory.getLogger(MemoryBuffer.class.getCanonicalName());
+ private static final Logger slowLog = log;
+ // new MessageDedupingLogger(LoggerFactory.getLogger(MemoryBuffer.class.getCanonicalName()),
+ // 1000, 1);
+ private static final Logger droppedPointsLogger = LoggerFactory.getLogger("RawDroppedPoints");
+
+ private static final Map> midBuffers =
+ new ConcurrentHashMap<>();
+ private final ScheduledExecutorService executor;
+ private final MemoryBufferConfig cfg;
+
+ public MemoryBuffer(int level, String name, MemoryBufferConfig cfg) {
+ super(level, name, false, null, cfg.maxMemory);
+ this.cfg = cfg;
+ this.compress = false;
+ executor =
+ Executors.newScheduledThreadPool(
+ Runtime.getRuntime().availableProcessors(),
+ new NamedThreadFactory("memory-buffer-receiver"));
+ }
+
+ public String getName() {
+ return "Memory";
+ }
+
+ @Override
+ public int getPriority() {
+ return Thread.MAX_PRIORITY;
+ }
+
+ public void shutdown() {
+ executor.shutdown();
+ try {
+ executor.awaitTermination(1, TimeUnit.MINUTES);
+ } catch (InterruptedException e) {
+ log.error("Error during MemoryBuffer shutdown. " + e);
+ }
+
+ // TODO: implement dump to external queue
+ if (this.nextBuffer instanceof DiskBuffer) {
+ if (((DiskBuffer) nextBuffer).isFull()) {
+ return;
+ }
+ }
+
+ int counter = 0;
+ try {
+ Object[] queues = activeMQServer.getManagementService().getResources(QueueControl.class);
+ for (Object obj : queues) {
+ QueueControl queue = (QueueControl) obj;
+ int c = queue.expireMessages("");
+ counter += c;
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+
+ if (counter != 0) {
+ log.info("'" + counter + "' points sent to the buffer disk");
+ }
+
+ super.shutdown();
+ }
+
+ public void sendPoint(QueueInfo queue, String strPoint) {
+ QueueStats.get(queue.getName()).msgLength.update(strPoint.length());
+ LinkedTransferQueue midBuffer =
+ midBuffers.computeIfAbsent(queue.getName(), s -> new LinkedTransferQueue<>());
+ midBuffer.add(strPoint);
+ }
+
+ @Override
+ public void registerNewQueueInfo(QueueInfo queue) {
+ super.registerNewQueueInfo(queue);
+ for (int i = 0; i < queue.getNumberThreads(); i++) {
+ executor.scheduleAtFixedRate(new sender(queue), 1, 1, TimeUnit.SECONDS);
+ }
+ }
+
+ protected void createBridge(DiskBuffer diskBuffer) {
+ setNextBuffer(diskBuffer);
+ activeMQServer.registerBrokerPlugin(new Bridge(this, diskBuffer));
+ enableBridge();
+ }
+
+ protected void enableBridge() {
+ log.info("bridge enabled");
+ AddressSettings addressSetting = activeMQServer.getAddressSettingsRepository().getDefault();
+ addressSetting.setMaxExpiryDelay(cfg.msgExpirationTime);
+ addressSetting.setMaxDeliveryAttempts(cfg.msgRetry);
+ addressSetting.setMaxSizeBytes(cfg.maxMemory);
+ addressSetting.setAddressFullMessagePolicy(FAIL);
+ activeMQServer.getAddressSettingsRepository().setDefault(addressSetting);
+ }
+
+ protected void disableBridge() {
+ log.info("bridge disabled");
+ AddressSettings addressSetting = activeMQServer.getAddressSettingsRepository().getDefault();
+ addressSetting.setMaxExpiryDelay(-1L);
+ addressSetting.setMaxDeliveryAttempts(-1);
+ addressSetting.setMaxSizeBytes(cfg.maxMemory);
+ addressSetting.setAddressFullMessagePolicy(FAIL);
+ activeMQServer.getAddressSettingsRepository().setDefault(addressSetting);
+ }
+
+ protected void flush(QueueInfo queue) {
+ new sender(queue).run();
+ }
+
+ private class sender implements Runnable {
+ private final QueueInfo queue;
+
+ private sender(QueueInfo queue) {
+ this.queue = queue;
+ }
+
+ @Override
+ public void run() {
+ LinkedTransferQueue midBuffer = midBuffers.get(queue.getName());
+ if ((midBuffer != null) && (midBuffer.size() != 0)) {
+ boolean done = false;
+ while (!done) {
+ ArrayList metrics = new ArrayList<>();
+ if (midBuffer.drainTo(metrics, queue.getMaxItemsPerMessage()) != 0) {
+ try {
+ sendPoints(queue.getName(), metrics);
+ } catch (ActiveMQAddressFullException e) {
+ slowLog.error("All Queues full, dropping " + metrics.size() + " points.");
+ if (slowLog.isDebugEnabled()) {
+ slowLog.error("", e);
+ }
+ QueueStats.get(queue.getName()).dropped.inc(metrics.size());
+ if (droppedPointsLogger.isInfoEnabled()) {
+ metrics.forEach(point -> droppedPointsLogger.info(point, queue.getEntityType()));
+ }
+ }
+ } else {
+ done = true;
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/MemoryBufferConfig.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/MemoryBufferConfig.java
new file mode 100644
index 000000000..f4285b071
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/MemoryBufferConfig.java
@@ -0,0 +1,7 @@
+package com.wavefront.agent.core.buffers;
+
+public class MemoryBufferConfig {
+ public int msgRetry = 3;
+ public long msgExpirationTime = -1;
+ public long maxMemory = -1;
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/OnMsgDelegate.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/OnMsgDelegate.java
new file mode 100644
index 000000000..0df2ee517
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/OnMsgDelegate.java
@@ -0,0 +1,11 @@
+package com.wavefront.agent.core.buffers;
+
+import java.util.List;
+
+public interface OnMsgDelegate {
+ void processBatch(List batch) throws Exception;
+
+ boolean checkBatchSize(int items, int bytes, int newItems, int newBytes);
+
+ boolean checkRates(int newItems, int newBytes);
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/PointsGauge.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/PointsGauge.java
new file mode 100644
index 000000000..b61e40983
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/PointsGauge.java
@@ -0,0 +1,68 @@
+package com.wavefront.agent.core.buffers;
+
+import static com.wavefront.agent.core.buffers.ActiveMQBuffer.MSG_ITEMS;
+
+import com.wavefront.agent.PushAgent;
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.common.NamedThreadFactory;
+import com.yammer.metrics.core.Gauge;
+import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.activemq.artemis.api.core.management.AddressControl;
+import org.apache.activemq.artemis.api.core.management.QueueControl;
+import org.apache.activemq.artemis.api.core.management.ResourceNames;
+import org.apache.activemq.artemis.core.server.ActiveMQServer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class PointsGauge extends Gauge {
+ private static final Logger log = LoggerFactory.getLogger(PointsGauge.class.getCanonicalName());
+ private static final ScheduledExecutorService executor =
+ Executors.newScheduledThreadPool(2, new NamedThreadFactory("PointsGauge"));
+ private Long pointsCount = 0L;
+ private final QueueInfo queue;
+ private final ActiveMQServer amq;
+
+ public PointsGauge(QueueInfo queue, ActiveMQServer amq) {
+ this.queue = queue;
+ this.amq = amq;
+ executor.scheduleAtFixedRate(() -> doCount(), 1, 1, TimeUnit.MINUTES);
+ }
+
+ @Override
+ public Long value() {
+ return pointsCount;
+ }
+
+ long doCount() {
+ long count = 0;
+
+ AddressControl address =
+ (AddressControl)
+ amq.getManagementService().getResource(ResourceNames.ADDRESS + queue.getName());
+
+ try {
+ for (String queueName : address.getQueueNames()) {
+ QueueControl queueControl =
+ (QueueControl) amq.getManagementService().getResource(ResourceNames.QUEUE + queueName);
+ Map[] messages = queueControl.listMessages("");
+ for (Map message : messages) {
+ int p = (int) message.get(MSG_ITEMS);
+ count += p;
+ }
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ PushAgent.stats.info(
+ "[buffer."
+ + amq.getConfiguration().getName()
+ + "."
+ + queue.getName()
+ + "] points: "
+ + pointsCount);
+ return pointsCount = count;
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/SQSBuffer.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/SQSBuffer.java
new file mode 100644
index 000000000..2659bc24f
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/SQSBuffer.java
@@ -0,0 +1,131 @@
+package com.wavefront.agent.core.buffers;
+
+import com.amazonaws.AmazonClientException;
+import com.amazonaws.services.sqs.AmazonSQS;
+import com.amazonaws.services.sqs.AmazonSQSClientBuilder;
+import com.amazonaws.services.sqs.model.*;
+import com.wavefront.agent.core.queues.QueueInfo;
+import java.util.*;
+import org.apache.activemq.artemis.api.core.ActiveMQAddressFullException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SQSBuffer implements Buffer {
+ private static final Logger log = LoggerFactory.getLogger(SQSBuffer.class.getCanonicalName());
+
+ private final String template;
+ private final AmazonSQS client;
+ private final Map queuesUrls = new HashMap<>();
+ private final String visibilityTimeOut;
+
+ public SQSBuffer(SQSBufferConfig cfg) {
+ this.template = cfg.template;
+ this.client = AmazonSQSClientBuilder.standard().withRegion(cfg.region).build();
+ visibilityTimeOut = String.valueOf(cfg.vto);
+ }
+
+ public String getName() {
+ return "SQS";
+ }
+
+ @Override
+ public int getPriority() {
+ return Thread.NORM_PRIORITY;
+ }
+
+ @Override
+ public void registerNewQueueInfo(QueueInfo queue) {
+ String queueName = queue.getName();
+ String queueUrl = null;
+
+ try {
+ GetQueueUrlResult queueUrlResult =
+ client.getQueueUrl(new GetQueueUrlRequest().withQueueName(queueName));
+ queueUrl = queueUrlResult.getQueueUrl();
+ } catch (QueueDoesNotExistException e) {
+ log.info("Queue " + queueName + " does not exist...creating for first time");
+ } catch (AmazonClientException e) {
+ log.error("Unable to lookup queue by name in aws " + queueName, e);
+ }
+
+ if (queueUrl == null) {
+ try {
+ CreateQueueRequest request = new CreateQueueRequest();
+ request
+ .addAttributesEntry(
+ QueueAttributeName.MessageRetentionPeriod.toString(), "1209600") // 14 days
+ .addAttributesEntry(QueueAttributeName.ReceiveMessageWaitTimeSeconds.toString(), "20")
+ .addAttributesEntry(
+ QueueAttributeName.VisibilityTimeout.toString(), visibilityTimeOut) // 1 minute
+ .setQueueName(queueName);
+ CreateQueueResult result = client.createQueue(request);
+ queueUrl = result.getQueueUrl();
+ log.info("queue " + queueName + " created. url:" + queueUrl);
+ } catch (AmazonClientException e) {
+ log.error("Error creating queue in AWS " + queueName, e);
+ }
+ }
+
+ queuesUrls.put(queue.getName(), queueUrl);
+ }
+
+ @Override
+ public void onMsgBatch(QueueInfo queue, int idx, OnMsgDelegate func) {
+
+ String queueUrl = queuesUrls.get(queue.getName());
+ long start = System.currentTimeMillis();
+ List batch = new ArrayList<>();
+ List messagesToDelete = new ArrayList<>();
+ boolean done = false;
+ while (!done && ((System.currentTimeMillis() - start) < 1000)) {
+ ReceiveMessageRequest receiveRequest = new ReceiveMessageRequest(queueUrl);
+ receiveRequest.setMaxNumberOfMessages(1);
+ receiveRequest.setWaitTimeSeconds(1);
+ ReceiveMessageResult result = client.receiveMessage(receiveRequest);
+ List messages = result.getMessages();
+ if (messages.size() == 1) {
+ List points = Arrays.asList(messages.get(0).getBody().split("\n"));
+ batch.addAll(points);
+ messagesToDelete.addAll(messages);
+ done = !func.checkBatchSize(batch.size(), 0, 0, 0);
+ } else {
+ done = true;
+ }
+ }
+
+ try {
+ if (batch.size() > 0) {
+ func.processBatch(batch);
+ }
+ messagesToDelete.forEach(
+ message -> {
+ client.deleteMessage(queueUrl, message.getReceiptHandle());
+ });
+ } catch (Exception e) {
+ log.error(e.getMessage());
+ if (log.isDebugEnabled()) {
+ log.error("error", e);
+ }
+ }
+ }
+
+ @Override
+ public void sendPoints(String queue, List points) throws ActiveMQAddressFullException {
+ try {
+ SendMessageRequest request = new SendMessageRequest();
+ request.setMessageBody(String.join("\n", points));
+ request.setQueueUrl(queuesUrls.get(queue));
+ client.sendMessage(request);
+ } catch (AmazonClientException e) {
+ throw new RuntimeException("Error sending message to queue '" + queue + "'", e);
+ }
+ }
+
+ public void truncateQueue(String queue) {
+ try {
+ client.purgeQueue(new PurgeQueueRequest(queuesUrls.get(queue)));
+ } catch (AmazonClientException e) {
+ log.error("Error truncating queue '" + queue + "'", e);
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/buffers/SQSBufferConfig.java b/proxy/src/main/java/com/wavefront/agent/core/buffers/SQSBufferConfig.java
new file mode 100644
index 000000000..c8cc01395
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/buffers/SQSBufferConfig.java
@@ -0,0 +1,22 @@
+package com.wavefront.agent.core.buffers;
+
+import org.apache.commons.lang.StringUtils;
+
+public class SQSBufferConfig {
+ public String template;
+ public String region;
+ public String id;
+ public int vto = 60;
+
+ public void validate() {
+ if (StringUtils.isBlank(id)) {
+ throw new IllegalArgumentException(
+ "sqsQueueIdentifier cannot be blank! Please correct " + "your configuration settings.");
+ }
+
+ if (!(template.contains("{{id}}") && template.contains("{{entity}}"))) {
+ throw new IllegalArgumentException(
+ "sqsQueueNameTemplate is invalid! Must contain " + "{{id}} and {{entity}} replacements.");
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/handlers/AbstractReportableEntityHandler.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/AbstractReportableEntityHandler.java
new file mode 100644
index 000000000..f604167f2
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/AbstractReportableEntityHandler.java
@@ -0,0 +1,201 @@
+package com.wavefront.agent.core.handlers;
+
+import com.google.common.util.concurrent.RateLimiter;
+import com.wavefront.agent.PushAgent;
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.yammer.metrics.Metrics;
+import com.yammer.metrics.core.*;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.function.Function;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.jetbrains.annotations.NotNull;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base class for all {@link ReportableEntityHandler} implementations.
+ *
+ * @param the type of input objects handled
+ * @param the type of the output object as handled by {@link SenderTask}
+ */
+abstract class AbstractReportableEntityHandler implements ReportableEntityHandler {
+ protected static final String MULTICASTING_TENANT_TAG_KEY = "multicastingTenantName";
+ private static final Logger logger =
+ LoggerFactory.getLogger(AbstractReportableEntityHandler.class.getCanonicalName());
+ final QueueInfo queue;
+ final String handler;
+
+ final Function serializer;
+ final String rateUnit;
+ final BurstRateTrackingCounter receivedStats;
+ private final Logger blockedItemsLogger;
+ private final Counter receivedCounter;
+ private final Counter receivedBytesCounter;
+ private final Counter blockedCounter;
+ private final Counter rejectedCounter;
+ private final Timer timer;
+
+ @SuppressWarnings("UnstableApiUsage")
+ private final RateLimiter noDataStatsRateLimiter = RateLimiter.create(1.0d / 60);
+
+ /**
+ * @param queue metrics pipeline key (entity type + port number)
+ * @param serializer helper function to convert objects to string. Used when writing blocked
+ * points to logs.
+ * @param blockedItemsLogger a {@link Logger} instance for blocked items
+ */
+ AbstractReportableEntityHandler(
+ String handler,
+ @NotNull QueueInfo queue,
+ final Function serializer,
+ @Nullable final Logger blockedItemsLogger) {
+ this.handler = handler;
+ this.queue = queue;
+ //noinspection UnstableApiUsage
+ this.serializer = serializer;
+ this.rateUnit = queue.getEntityType().getRateUnit();
+ this.blockedItemsLogger = blockedItemsLogger;
+
+ MetricsRegistry registry = Metrics.defaultRegistry();
+ String metricPrefix = queue.getName() + "." + this.handler;
+ MetricName receivedMetricName = new MetricName(metricPrefix, "", "received");
+ this.receivedCounter = registry.newCounter(receivedMetricName);
+ this.receivedBytesCounter =
+ registry.newCounter(new MetricName(metricPrefix, "", "received.bytes"));
+ this.blockedCounter = registry.newCounter(new MetricName(metricPrefix, "", "blocked"));
+ this.rejectedCounter = registry.newCounter(new MetricName(metricPrefix, "", "rejected"));
+ this.receivedStats = new BurstRateTrackingCounter(receivedMetricName, registry, 1000);
+
+ registry.newGauge(
+ new MetricName(metricPrefix + ".received", "", "max-burst-rate"),
+ new Gauge() {
+ @Override
+ public Double value() {
+ return receivedStats.getMaxBurstRateAndClear();
+ }
+ });
+
+ timer = new Timer("stats-output-" + queue.getName() + "." + this.handler);
+ timer.scheduleAtFixedRate(
+ new TimerTask() {
+ @Override
+ public void run() {
+ printStats();
+ }
+ },
+ 10_000,
+ 10_000);
+ timer.scheduleAtFixedRate(
+ new TimerTask() {
+ @Override
+ public void run() {
+ printTotal();
+ }
+ },
+ 60_000,
+ 60_000);
+ }
+
+ @Override
+ public void reject(@Nullable T item, @Nullable String message) {
+ rejectedCounter.inc();
+ blockedCounter.inc();
+ if (item != null && blockedItemsLogger != null) {
+ blockedItemsLogger.warn(serializer.apply(item));
+ }
+ if (message != null) {
+ logger.info("[" + this.handler + "] blocked input: [" + message + "]");
+ }
+ }
+
+ @Override
+ public void reject(@Nonnull String line, @Nullable String message) {
+ rejectedCounter.inc();
+ blockedCounter.inc();
+ if (blockedItemsLogger != null) blockedItemsLogger.warn(line);
+ //noinspection UnstableApiUsage
+ if (message != null) {
+ logger.info("[" + this.handler + "] blocked input: [" + message + "]");
+ }
+ }
+
+ @Override
+ public void block(T item) {
+ blockedCounter.inc();
+ if (blockedItemsLogger != null) {
+ blockedItemsLogger.info(serializer.apply(item));
+ }
+ }
+
+ @Override
+ public void block(@Nullable T item, @Nullable String message) {
+ blockedCounter.inc();
+ if (item != null && blockedItemsLogger != null) {
+ blockedItemsLogger.info(serializer.apply(item));
+ }
+ if (message != null && blockedItemsLogger != null) {
+ blockedItemsLogger.info(message);
+ }
+ }
+
+ @Override
+ public void report(T item) {
+ try {
+ reportInternal(item);
+ } catch (IllegalArgumentException e) {
+ this.reject(item, e.getMessage() + " (" + serializer.apply(item) + ")");
+ } catch (Exception ex) {
+ logger.error(
+ "WF-500 Uncaught exception when handling input (" + serializer.apply(item) + ")", ex);
+ }
+ }
+
+ @Override
+ public void shutdown() {
+ if (this.timer != null) timer.cancel();
+ }
+
+ abstract void reportInternal(T item);
+
+ protected void incrementReceivedCounters(int b) {
+ receivedCounter.inc();
+ receivedBytesCounter.inc(b);
+ }
+
+ protected void printStats() {
+ PushAgent.stats.info(
+ "["
+ + this.handler
+ + "] "
+ + queue.getEntityType().toCapitalizedString()
+ + " received rate: "
+ + receivedStats.getOneMinutePrintableRate()
+ + " "
+ + rateUnit
+ + " (1 min), "
+ + receivedStats.getFiveMinutePrintableRate()
+ + " "
+ + rateUnit
+ + " (5 min), "
+ + receivedStats.getCurrentRate()
+ + " "
+ + rateUnit
+ + " (current).");
+ }
+
+ protected void printTotal() {
+ PushAgent.stats.info(
+ "["
+ + this.handler
+ + "] "
+ + queue.getEntityType().toCapitalizedString()
+ + " received since start: "
+ + this.receivedCounter.count()
+ + "; rejected: "
+ + this.rejectedCounter.count()
+ + "; blocked: "
+ + this.blockedCounter.count());
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/handlers/DelegatingReportableEntityHandlerFactoryImpl.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/DelegatingReportableEntityHandlerFactoryImpl.java
similarity index 63%
rename from proxy/src/main/java/com/wavefront/agent/handlers/DelegatingReportableEntityHandlerFactoryImpl.java
rename to proxy/src/main/java/com/wavefront/agent/core/handlers/DelegatingReportableEntityHandlerFactoryImpl.java
index 0cbe1b7c0..625d82f2e 100644
--- a/proxy/src/main/java/com/wavefront/agent/handlers/DelegatingReportableEntityHandlerFactoryImpl.java
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/DelegatingReportableEntityHandlerFactoryImpl.java
@@ -1,12 +1,10 @@
-package com.wavefront.agent.handlers;
+package com.wavefront.agent.core.handlers;
-import javax.annotation.Nonnull;
+import com.wavefront.agent.core.queues.QueueInfo;
/**
* Wrapper for {@link ReportableEntityHandlerFactory} to allow partial overrides for the {@code
* getHandler} method.
- *
- * @author vasily@wavefront.com
*/
public class DelegatingReportableEntityHandlerFactoryImpl
implements ReportableEntityHandlerFactory {
@@ -17,12 +15,12 @@ public DelegatingReportableEntityHandlerFactoryImpl(ReportableEntityHandlerFacto
}
@Override
- public ReportableEntityHandler getHandler(HandlerKey handlerKey) {
- return delegate.getHandler(handlerKey);
+ public ReportableEntityHandler getHandler(String handler, QueueInfo queue) {
+ return delegate.getHandler(handler, queue);
}
@Override
- public void shutdown(@Nonnull String handle) {
+ public void shutdown(int handle) {
delegate.shutdown(handle);
}
}
diff --git a/proxy/src/main/java/com/wavefront/agent/handlers/DeltaCounterAccumulationHandlerImpl.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/DeltaCounterAccumulationHandlerImpl.java
similarity index 63%
rename from proxy/src/main/java/com/wavefront/agent/handlers/DeltaCounterAccumulationHandlerImpl.java
rename to proxy/src/main/java/com/wavefront/agent/core/handlers/DeltaCounterAccumulationHandlerImpl.java
index 9be041ff4..bab443595 100644
--- a/proxy/src/main/java/com/wavefront/agent/handlers/DeltaCounterAccumulationHandlerImpl.java
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/DeltaCounterAccumulationHandlerImpl.java
@@ -1,5 +1,6 @@
-package com.wavefront.agent.handlers;
+package com.wavefront.agent.core.handlers;
+import static com.wavefront.agent.PushAgent.isMulticastingActive;
import static com.wavefront.data.Validation.validatePoint;
import static com.wavefront.sdk.common.Utils.metricToLineData;
@@ -8,7 +9,8 @@
import com.github.benmanes.caffeine.cache.RemovalListener;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.AtomicDouble;
-import com.wavefront.agent.api.APIContainer;
+import com.wavefront.agent.core.buffers.BuffersManager;
+import com.wavefront.agent.core.queues.QueueInfo;
import com.wavefront.api.agent.ValidationConfiguration;
import com.wavefront.common.Clock;
import com.wavefront.common.HostMetricTagsPair;
@@ -16,41 +18,32 @@
import com.wavefront.data.DeltaCounterValueException;
import com.wavefront.ingester.ReportPointSerializer;
import com.yammer.metrics.Metrics;
-import com.yammer.metrics.core.BurstRateTrackingCounter;
-import com.yammer.metrics.core.Counter;
-import com.yammer.metrics.core.DeltaCounter;
-import com.yammer.metrics.core.Gauge;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.core.MetricName;
-import java.util.Collection;
-import java.util.Map;
+import com.yammer.metrics.core.*;
import java.util.Objects;
import java.util.Timer;
-import java.util.TimerTask;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
-import java.util.function.BiConsumer;
import java.util.function.Supplier;
-import java.util.logging.Level;
-import java.util.logging.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import wavefront.report.ReportPoint;
/**
* Handler that processes incoming DeltaCounter objects, aggregates them and hands it over to one of
* the {@link SenderTask} threads according to deltaCountersAggregationIntervalSeconds or before
* cache expires.
- *
- * @author djia@vmware.com
*/
public class DeltaCounterAccumulationHandlerImpl
extends AbstractReportableEntityHandler {
- private final ValidationConfiguration validationConfig;
- private final Logger validItemsLogger;
+ private static final Logger log =
+ LoggerFactory.getLogger(DeltaCounterAccumulationHandlerImpl.class.getCanonicalName());
+
final Histogram receivedPointLag;
+ private final ValidationConfiguration validationConfig;
private final BurstRateTrackingCounter reportedStats;
private final Supplier discardedCounterSupplier;
private final Cache aggregatedDeltas;
@@ -59,36 +52,18 @@ public class DeltaCounterAccumulationHandlerImpl
/**
* @param handlerKey metrics pipeline key.
- * @param blockedItemsPerBatch controls sample rate of how many blocked points are written into
- * the main log file.
- * @param senderTaskMap map of tenant name and tasks actually handling data transfer to the
- * Wavefront endpoint corresponding to the tenant name
* @param validationConfig validation configuration.
* @param aggregationIntervalSeconds aggregation interval for delta counters.
- * @param receivedRateSink where to report received rate.
* @param blockedItemLogger logger for blocked items.
- * @param validItemsLogger logger for valid items.
*/
public DeltaCounterAccumulationHandlerImpl(
- final HandlerKey handlerKey,
- final int blockedItemsPerBatch,
- @Nullable final Map>> senderTaskMap,
+ final String handler,
+ final QueueInfo handlerKey,
@Nonnull final ValidationConfiguration validationConfig,
long aggregationIntervalSeconds,
- @Nullable final BiConsumer receivedRateSink,
- @Nullable final Logger blockedItemLogger,
- @Nullable final Logger validItemsLogger) {
- super(
- handlerKey,
- blockedItemsPerBatch,
- new ReportPointSerializer(),
- senderTaskMap,
- true,
- null,
- blockedItemLogger);
- super.initializeCounters();
+ @Nullable final Logger blockedItemLogger) {
+ super(handler, handlerKey, new ReportPointSerializer(), blockedItemLogger);
this.validationConfig = validationConfig;
- this.validItemsLogger = validItemsLogger;
this.aggregatedDeltas =
Caffeine.newBuilder()
@@ -100,7 +75,7 @@ public DeltaCounterAccumulationHandlerImpl(
this.receivedPointLag =
Metrics.newHistogram(
- new MetricName("points." + handlerKey.getHandle() + ".received", "", "lag"), false);
+ new MetricName("points." + handlerKey.getName() + ".received", "", "lag"), false);
reporter.scheduleWithFixedDelay(
this::flushDeltaCounters,
@@ -122,22 +97,19 @@ public Long value() {
return aggregatedDeltas.estimatedSize();
}
});
- if (receivedRateSink == null) {
- this.receivedRateTimer = null;
- } else {
- this.receivedRateTimer = new Timer("delta-counter-timer-" + handlerKey.getHandle());
- this.receivedRateTimer.scheduleAtFixedRate(
- new TimerTask() {
- @Override
- public void run() {
- for (String tenantName : senderTaskMap.keySet()) {
- receivedRateSink.accept(tenantName, receivedStats.getCurrentRate());
- }
- }
- },
- 1000,
- 1000);
- }
+ this.receivedRateTimer = new Timer("delta-counter-timer-" + handlerKey.getName());
+ // TODO: review
+ // this.receivedRateTimer.scheduleAtFixedRate(
+ // new TimerTask() {
+ // @Override
+ // public void run() {
+ // for (String tenantName : senderTaskMap.keySet()) {
+ // receivedRateSink.accept(tenantName, receivedStats.getCurrentRate());
+ // }
+ // }
+ // },
+ // 1000,
+ // 1000);
}
@VisibleForTesting
@@ -161,26 +133,22 @@ private void reportAggregatedDeltaValue(
hostMetricTagsPair.getHost(),
hostMetricTagsPair.getTags(),
"wavefront-proxy");
- getTask(APIContainer.CENTRAL_TENANT_NAME).add(strPoint);
- // check if delta tag contains the tag key indicating this delta point should be multicasted
+
+ incrementReceivedCounters(strPoint.length());
+ BuffersManager.sendMsg(queue, strPoint);
+
if (isMulticastingActive
&& hostMetricTagsPair.getTags() != null
&& hostMetricTagsPair.getTags().containsKey(MULTICASTING_TENANT_TAG_KEY)) {
String[] multicastingTenantNames =
hostMetricTagsPair.getTags().get(MULTICASTING_TENANT_TAG_KEY).trim().split(",");
hostMetricTagsPair.getTags().remove(MULTICASTING_TENANT_TAG_KEY);
- for (String multicastingTenantName : multicastingTenantNames) {
- // if the tenant name indicated in delta point tag is not configured, just ignore
- if (getTask(multicastingTenantName) != null) {
- getTask(multicastingTenantName)
- .add(
- metricToLineData(
- hostMetricTagsPair.metric,
- reportedValue,
- Clock.now(),
- hostMetricTagsPair.getHost(),
- hostMetricTagsPair.getTags(),
- "wavefront-proxy"));
+ for (String tenant : multicastingTenantNames) {
+ QueueInfo tenantQueue = queue.getTenantQueue(tenant);
+ if (tenantQueue != null) {
+ BuffersManager.sendMsg(tenantQueue, strPoint);
+ } else {
+ log.info("Tenant '" + tenant + "' invalid");
}
}
}
@@ -195,16 +163,12 @@ void reportInternal(ReportPoint point) {
discardedCounterSupplier.get().inc();
return;
}
- getReceivedCounter().inc();
double deltaValue = (double) point.getValue();
receivedPointLag.update(Clock.now() - point.getTimestamp());
HostMetricTagsPair hostMetricTagsPair =
new HostMetricTagsPair(point.getHost(), point.getMetric(), point.getAnnotations());
Objects.requireNonNull(aggregatedDeltas.get(hostMetricTagsPair, key -> new AtomicDouble(0)))
.getAndAdd(deltaValue);
- if (validItemsLogger != null && validItemsLogger.isLoggable(Level.FINEST)) {
- validItemsLogger.info(serializer.apply(point));
- }
} else {
reject(point, "Port is not configured to accept non-delta counter data!");
}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/handlers/EventHandlerImpl.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/EventHandlerImpl.java
new file mode 100644
index 000000000..d709aecbc
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/EventHandlerImpl.java
@@ -0,0 +1,72 @@
+package com.wavefront.agent.core.handlers;
+
+import static com.wavefront.agent.PushAgent.isMulticastingActive;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.wavefront.agent.core.buffers.BuffersManager;
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.data.Validation;
+import com.wavefront.dto.Event;
+import java.util.function.Function;
+import javax.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import wavefront.report.ReportEvent;
+
+/** This class will validate parsed events and distribute them among SenderTask threads. */
+public class EventHandlerImpl extends AbstractReportableEntityHandler {
+ private static final Logger logger =
+ LoggerFactory.getLogger(AbstractReportableEntityHandler.class.getCanonicalName());
+ private static final Function EVENT_SERIALIZER =
+ value -> new Event(value).toString();
+
+ /**
+ * @param handlerKey pipeline key.
+ * @param blockedEventsLogger logger for blocked events.
+ */
+ public EventHandlerImpl(
+ final String handler,
+ final QueueInfo handlerKey,
+ @Nullable final Logger blockedEventsLogger) {
+ super(handler, handlerKey, EVENT_SERIALIZER, blockedEventsLogger);
+ }
+
+ @VisibleForTesting
+ static boolean annotationKeysAreValid(ReportEvent event) {
+ if (event.getAnnotations() != null) {
+ for (String key : event.getAnnotations().keySet()) {
+ if (!Validation.charactersAreValid(key)) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ @Override
+ protected void reportInternal(ReportEvent event) {
+ if (!annotationKeysAreValid(event)) {
+ throw new IllegalArgumentException("WF-401: Event annotation key has illegal characters.");
+ }
+
+ String strEvent = event.toString();
+ incrementReceivedCounters(strEvent.length());
+ BuffersManager.sendMsg(queue, strEvent);
+
+ if (isMulticastingActive
+ && event.getAnnotations() != null
+ && event.getAnnotations().containsKey(MULTICASTING_TENANT_TAG_KEY)) {
+ String[] multicastingTenantNames =
+ event.getAnnotations().get(MULTICASTING_TENANT_TAG_KEY).trim().split(",");
+ event.getAnnotations().remove(MULTICASTING_TENANT_TAG_KEY);
+ for (String tenant : multicastingTenantNames) {
+ QueueInfo tenantQueue = queue.getTenantQueue(tenant);
+ if (tenantQueue != null) {
+ BuffersManager.sendMsg(tenantQueue, strEvent);
+ } else {
+ logger.info("Tenant '" + tenant + "' invalid");
+ }
+ }
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/handlers/HistogramAccumulationHandlerImpl.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/HistogramAccumulationHandlerImpl.java
similarity index 80%
rename from proxy/src/main/java/com/wavefront/agent/handlers/HistogramAccumulationHandlerImpl.java
rename to proxy/src/main/java/com/wavefront/agent/core/handlers/HistogramAccumulationHandlerImpl.java
index b3482f8c8..7d5800131 100644
--- a/proxy/src/main/java/com/wavefront/agent/handlers/HistogramAccumulationHandlerImpl.java
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/HistogramAccumulationHandlerImpl.java
@@ -1,9 +1,10 @@
-package com.wavefront.agent.handlers;
+package com.wavefront.agent.core.handlers;
import static com.wavefront.agent.histogram.HistogramUtils.granularityToString;
import static com.wavefront.common.Utils.lazySupplier;
import static com.wavefront.data.Validation.validatePoint;
+import com.wavefront.agent.core.queues.QueueInfo;
import com.wavefront.agent.histogram.Granularity;
import com.wavefront.agent.histogram.HistogramKey;
import com.wavefront.agent.histogram.HistogramUtils;
@@ -12,20 +13,16 @@
import com.yammer.metrics.Metrics;
import com.yammer.metrics.core.Counter;
import com.yammer.metrics.core.MetricName;
-import java.util.function.BiConsumer;
import java.util.function.Supplier;
-import java.util.logging.Level;
-import java.util.logging.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
+import org.slf4j.Logger;
import wavefront.report.Histogram;
import wavefront.report.ReportPoint;
/**
* A ReportPointHandler that ships parsed points to a histogram accumulator instead of forwarding
* them to SenderTask.
- *
- * @author vasily@wavefront.com
*/
public class HistogramAccumulationHandlerImpl extends ReportPointHandlerImpl {
private final Accumulator digests;
@@ -43,34 +40,17 @@ public class HistogramAccumulationHandlerImpl extends ReportPointHandlerImpl {
*
* @param handlerKey pipeline handler key
* @param digests accumulator for storing digests
- * @param blockedItemsPerBatch controls sample rate of how many blocked points are written into
- * the main log file.
* @param granularity granularity level
* @param validationConfig Supplier for the ValidationConfiguration
- * @param isHistogramInput Whether expected input data for this handler is histograms.
- * @param receivedRateSink Where to report received rate.
*/
public HistogramAccumulationHandlerImpl(
- final HandlerKey handlerKey,
+ final String handler,
+ final QueueInfo handlerKey,
final Accumulator digests,
- final int blockedItemsPerBatch,
@Nullable Granularity granularity,
@Nonnull final ValidationConfiguration validationConfig,
- boolean isHistogramInput,
- @Nullable final BiConsumer receivedRateSink,
- @Nullable final Logger blockedItemLogger,
- @Nullable final Logger validItemsLogger) {
- super(
- handlerKey,
- blockedItemsPerBatch,
- null,
- validationConfig,
- !isHistogramInput,
- receivedRateSink,
- blockedItemLogger,
- validItemsLogger,
- null);
- super.initializeCounters();
+ @Nullable final Logger blockedItemLogger) {
+ super(handler, handlerKey, validationConfig, blockedItemLogger, null);
this.digests = digests;
this.granularity = granularity;
String metricNamespace = "histogram.accumulator." + granularityToString(granularity);
@@ -136,9 +116,5 @@ protected void reportInternal(ReportPoint point) {
// atomic update
digests.put(histogramKey, value);
}
-
- if (validItemsLogger != null && validItemsLogger.isLoggable(Level.FINEST)) {
- validItemsLogger.info(serializer.apply(point));
- }
}
}
diff --git a/proxy/src/main/java/com/wavefront/agent/handlers/InternalProxyWavefrontClient.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/InternalProxyWavefrontClient.java
similarity index 86%
rename from proxy/src/main/java/com/wavefront/agent/handlers/InternalProxyWavefrontClient.java
rename to proxy/src/main/java/com/wavefront/agent/core/handlers/InternalProxyWavefrontClient.java
index d60127515..0adb66700 100644
--- a/proxy/src/main/java/com/wavefront/agent/handlers/InternalProxyWavefrontClient.java
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/InternalProxyWavefrontClient.java
@@ -1,5 +1,6 @@
-package com.wavefront.agent.handlers;
+package com.wavefront.agent.core.handlers;
+import static com.wavefront.agent.ProxyContext.queuesManager;
import static com.wavefront.common.Utils.lazySupplier;
import com.wavefront.common.Clock;
@@ -21,19 +22,22 @@
import wavefront.report.ReportPoint;
public class InternalProxyWavefrontClient implements WavefrontSender {
- private final Supplier> pointHandlerSupplier;
- private final Supplier> histogramHandlerSupplier;
+ private final Supplier> pointHandlerSupplier;
+ private final Supplier> histogramHandlerSupplier;
private final String clientId;
- public InternalProxyWavefrontClient(
- ReportableEntityHandlerFactory handlerFactory, String handle) {
+ public InternalProxyWavefrontClient(ReportableEntityHandlerFactory handlerFactory, int port) {
this.pointHandlerSupplier =
lazySupplier(
- () -> handlerFactory.getHandler(HandlerKey.of(ReportableEntityType.POINT, handle)));
+ () ->
+ handlerFactory.getHandler(
+ port, queuesManager.initQueue(ReportableEntityType.POINT)));
this.histogramHandlerSupplier =
lazySupplier(
- () -> handlerFactory.getHandler(HandlerKey.of(ReportableEntityType.HISTOGRAM, handle)));
- this.clientId = handle;
+ () ->
+ handlerFactory.getHandler(
+ port, queuesManager.initQueue(ReportableEntityType.HISTOGRAM)));
+ this.clientId = String.valueOf(port);
}
@Override
diff --git a/proxy/src/main/java/com/wavefront/agent/handlers/LineDelimitedUtils.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/LineDelimitedUtils.java
similarity index 82%
rename from proxy/src/main/java/com/wavefront/agent/handlers/LineDelimitedUtils.java
rename to proxy/src/main/java/com/wavefront/agent/core/handlers/LineDelimitedUtils.java
index d506aa8c5..ca6e9ceb0 100644
--- a/proxy/src/main/java/com/wavefront/agent/handlers/LineDelimitedUtils.java
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/LineDelimitedUtils.java
@@ -1,13 +1,9 @@
-package com.wavefront.agent.handlers;
+package com.wavefront.agent.core.handlers;
import java.util.Collection;
import org.apache.commons.lang.StringUtils;
-/**
- * A collection of helper methods around plaintext newline-delimited payloads.
- *
- * @author vasily@wavefront.com
- */
+/** A collection of helper methods around plaintext newline-delimited payloads. */
public abstract class LineDelimitedUtils {
static final String PUSH_DATA_DELIMITER = "\n";
diff --git a/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportLogHandlerImpl.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportLogHandlerImpl.java
new file mode 100644
index 000000000..f69a7912b
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportLogHandlerImpl.java
@@ -0,0 +1,70 @@
+package com.wavefront.agent.core.handlers;
+
+import static com.wavefront.data.Validation.validateLog;
+
+import com.wavefront.agent.core.buffers.BuffersManager;
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.api.agent.ValidationConfiguration;
+import com.wavefront.common.Clock;
+import com.wavefront.dto.Log;
+import com.yammer.metrics.Metrics;
+import com.yammer.metrics.core.MetricName;
+import com.yammer.metrics.core.MetricsRegistry;
+import java.util.function.Function;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.slf4j.Logger;
+import wavefront.report.Annotation;
+import wavefront.report.ReportLog;
+
+/** This class will validate parsed logs and distribute them among SenderTask threads. */
+public class ReportLogHandlerImpl extends AbstractReportableEntityHandler {
+ private static final Function LOG_SERIALIZER =
+ value -> new Log(value).toString();
+ final ValidationConfiguration validationConfig;
+ final com.yammer.metrics.core.Histogram receivedLogLag;
+ final com.yammer.metrics.core.Histogram receivedTagCount;
+ final com.yammer.metrics.core.Histogram receivedTagLength;
+ final com.yammer.metrics.core.Histogram receivedMessageLength;
+
+ /**
+ * @param handlerKey pipeline key.
+ * @param validationConfig validation configuration.
+ * @param blockedLogsLogger logger for blocked logs.
+ */
+ public ReportLogHandlerImpl(
+ final String handler,
+ final QueueInfo handlerKey,
+ @Nonnull final ValidationConfiguration validationConfig,
+ @Nullable final Logger blockedLogsLogger) {
+ super(handler, handlerKey, LOG_SERIALIZER, blockedLogsLogger);
+ this.validationConfig = validationConfig;
+ MetricsRegistry registry = Metrics.defaultRegistry();
+ this.receivedLogLag =
+ registry.newHistogram(new MetricName(handlerKey.getName() + ".received", "", "lag"), false);
+ this.receivedTagCount =
+ registry.newHistogram(
+ new MetricName(handlerKey.getName() + ".received", "", "tagCount"), false);
+ this.receivedTagLength =
+ registry.newHistogram(
+ new MetricName(handlerKey.getName() + ".received", "", "tagLength"), false);
+ this.receivedMessageLength =
+ registry.newHistogram(
+ new MetricName(handlerKey.getName() + ".received", "", "messageLength"), false);
+ }
+
+ @Override
+ protected void reportInternal(ReportLog log) {
+ receivedTagCount.update(log.getAnnotations().size());
+ receivedMessageLength.update(log.getMessage().length());
+ for (Annotation a : log.getAnnotations()) {
+ receivedTagLength.update(a.getValue().length());
+ }
+ validateLog(log, validationConfig);
+ receivedLogLag.update(Clock.now() - log.getTimestamp());
+ Log logObj = new Log(log);
+ String strLog = logObj.toString();
+ incrementReceivedCounters(strLog.length());
+ BuffersManager.sendMsg(queue, strLog);
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/handlers/ReportPointHandlerImpl.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportPointHandlerImpl.java
similarity index 57%
rename from proxy/src/main/java/com/wavefront/agent/handlers/ReportPointHandlerImpl.java
rename to proxy/src/main/java/com/wavefront/agent/core/handlers/ReportPointHandlerImpl.java
index e4dc4536c..a52cde766 100644
--- a/proxy/src/main/java/com/wavefront/agent/handlers/ReportPointHandlerImpl.java
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportPointHandlerImpl.java
@@ -1,8 +1,10 @@
-package com.wavefront.agent.handlers;
+package com.wavefront.agent.core.handlers;
+import static com.wavefront.agent.PushAgent.isMulticastingActive;
import static com.wavefront.data.Validation.validatePoint;
-import com.wavefront.agent.api.APIContainer;
+import com.wavefront.agent.core.buffers.BuffersManager;
+import com.wavefront.agent.core.queues.QueueInfo;
import com.wavefront.api.agent.ValidationConfiguration;
import com.wavefront.common.Clock;
import com.wavefront.common.Utils;
@@ -12,26 +14,23 @@
import com.yammer.metrics.core.Counter;
import com.yammer.metrics.core.MetricName;
import com.yammer.metrics.core.MetricsRegistry;
-import java.util.Collection;
-import java.util.Map;
-import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.Supplier;
-import java.util.logging.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import wavefront.report.Histogram;
import wavefront.report.ReportPoint;
/**
* Handler that processes incoming ReportPoint objects, validates them and hands them over to one of
- * the {@link SenderTask} threads.
- *
- * @author vasily@wavefront.com
+ * the SenderTask threads.
*/
class ReportPointHandlerImpl extends AbstractReportableEntityHandler {
+ private static final Logger logger =
+ LoggerFactory.getLogger(ReportPointHandlerImpl.class.getCanonicalName());
- final Logger validItemsLogger;
final ValidationConfiguration validationConfig;
final Function recompressor;
final com.yammer.metrics.core.Histogram receivedPointLag;
@@ -42,46 +41,25 @@ class ReportPointHandlerImpl extends AbstractReportableEntityHandler>> senderTaskMap,
+ final String handler,
+ final QueueInfo handlerKey,
@Nonnull final ValidationConfiguration validationConfig,
- final boolean setupMetrics,
- @Nullable final BiConsumer receivedRateSink,
@Nullable final Logger blockedItemLogger,
- @Nullable final Logger validItemsLogger,
@Nullable final Function recompressor) {
- super(
- handlerKey,
- blockedItemsPerBatch,
- new ReportPointSerializer(),
- senderTaskMap,
- setupMetrics,
- receivedRateSink,
- blockedItemLogger);
- super.initializeCounters();
+ super(handler, handlerKey, new ReportPointSerializer(), blockedItemLogger);
this.validationConfig = validationConfig;
- this.validItemsLogger = validItemsLogger;
this.recompressor = recompressor;
- MetricsRegistry registry = setupMetrics ? Metrics.defaultRegistry() : LOCAL_REGISTRY;
+ MetricsRegistry registry = Metrics.defaultRegistry();
this.receivedPointLag =
- registry.newHistogram(
- new MetricName(handlerKey.toString() + ".received", "", "lag"), false);
+ registry.newHistogram(new MetricName(handlerKey.getName() + ".received", "", "lag"), false);
this.receivedTagCount =
registry.newHistogram(
- new MetricName(handlerKey.toString() + ".received", "", "tagCount"), false);
+ new MetricName(handlerKey.getName() + ".received", "", "tagCount"), false);
this.discardedCounterSupplier =
Utils.lazySupplier(
() -> Metrics.newCounter(new MetricName(handlerKey.toString(), "", "discarded")));
@@ -102,22 +80,24 @@ void reportInternal(ReportPoint point) {
point.setValue(recompressor.apply(histogram));
}
final String strPoint = serializer.apply(point);
- getTask(APIContainer.CENTRAL_TENANT_NAME).add(strPoint);
- getReceivedCounter().inc();
- // check if data points contains the tag key indicating this point should be multicasted
+
+ incrementReceivedCounters(strPoint.length());
+ BuffersManager.sendMsg(queue, strPoint);
+
if (isMulticastingActive
&& point.getAnnotations() != null
&& point.getAnnotations().containsKey(MULTICASTING_TENANT_TAG_KEY)) {
String[] multicastingTenantNames =
point.getAnnotations().get(MULTICASTING_TENANT_TAG_KEY).trim().split(",");
point.getAnnotations().remove(MULTICASTING_TENANT_TAG_KEY);
- for (String multicastingTenantName : multicastingTenantNames) {
- // if the tenant name indicated in point tag is not configured, just ignore
- if (getTask(multicastingTenantName) != null) {
- getTask(multicastingTenantName).add(serializer.apply(point));
+ for (String tenant : multicastingTenantNames) {
+ QueueInfo tenantQueue = queue.getTenantQueue(tenant);
+ if (tenantQueue != null) {
+ BuffersManager.sendMsg(tenantQueue, strPoint);
+ } else {
+ logger.info("Tenant '" + tenant + "' invalid");
}
}
}
- if (validItemsLogger != null) validItemsLogger.info(strPoint);
}
}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportSourceTagHandlerImpl.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportSourceTagHandlerImpl.java
new file mode 100644
index 000000000..545d50873
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportSourceTagHandlerImpl.java
@@ -0,0 +1,51 @@
+package com.wavefront.agent.core.handlers;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.google.common.annotations.VisibleForTesting;
+import com.wavefront.agent.core.buffers.BuffersManager;
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.data.Validation;
+import com.wavefront.dto.SourceTag;
+import java.util.function.Function;
+import org.slf4j.Logger;
+import wavefront.report.ReportSourceTag;
+import wavefront.report.SourceOperationType;
+
+/** This class will validate parsed source tags and distribute them among SenderTask threads. */
+class ReportSourceTagHandlerImpl
+ extends AbstractReportableEntityHandler {
+ private static final Function SOURCE_TAG_SERIALIZER =
+ value -> new SourceTag(value).toString();
+
+ public ReportSourceTagHandlerImpl(
+ String handler, QueueInfo handlerKey, final Logger blockedItemLogger) {
+ super(handler, handlerKey, SOURCE_TAG_SERIALIZER, blockedItemLogger);
+ }
+
+ @VisibleForTesting
+ static boolean annotationsAreValid(ReportSourceTag sourceTag) {
+ if (sourceTag.getOperation() == SourceOperationType.SOURCE_DESCRIPTION) return true;
+ return sourceTag.getAnnotations().stream().allMatch(Validation::charactersAreValid);
+ }
+
+ @Override
+ protected void reportInternal(ReportSourceTag sourceTag) {
+ if (!annotationsAreValid(sourceTag)) {
+ throw new IllegalArgumentException(
+ "WF-401: SourceTag annotation key has illegal characters.");
+ }
+
+ try {
+ ObjectWriter ow = new ObjectMapper().writer();
+ String json = ow.writeValueAsString(new SourceTag(sourceTag));
+ incrementReceivedCounters(json.length());
+ BuffersManager.sendMsg(queue, json);
+ } catch (JsonProcessingException e) {
+ throw new RuntimeException(e);
+ }
+
+ // tagK=tagV based multicasting is not support
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/handlers/ReportableEntityHandler.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportableEntityHandler.java
similarity index 88%
rename from proxy/src/main/java/com/wavefront/agent/handlers/ReportableEntityHandler.java
rename to proxy/src/main/java/com/wavefront/agent/core/handlers/ReportableEntityHandler.java
index e02fb4782..38d73f9a9 100644
--- a/proxy/src/main/java/com/wavefront/agent/handlers/ReportableEntityHandler.java
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportableEntityHandler.java
@@ -1,6 +1,5 @@
-package com.wavefront.agent.handlers;
+package com.wavefront.agent.core.handlers;
-import com.wavefront.agent.formatter.DataFormat;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
@@ -8,10 +7,9 @@
* Handler that processes incoming objects of a single entity type, validates them and hands them
* over to one of the {@link SenderTask} threads.
*
- * @author vasily@wavefront.com
* @param the type of input objects handled.
*/
-public interface ReportableEntityHandler {
+public interface ReportableEntityHandler {
/**
* Validate and accept the input object.
@@ -53,7 +51,8 @@ public interface ReportableEntityHandler {
*/
void reject(@Nonnull String t, @Nullable String message);
- void setLogFormat(DataFormat format);
+ // TODO: 10/5/23 review
+ // void setLogFormat(DataFormat format);
/** Gracefully shutdown the pipeline. */
void shutdown();
diff --git a/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportableEntityHandlerFactory.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportableEntityHandlerFactory.java
new file mode 100644
index 000000000..6e0689fe2
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportableEntityHandlerFactory.java
@@ -0,0 +1,23 @@
+package com.wavefront.agent.core.handlers;
+
+import com.wavefront.agent.core.queues.QueueInfo;
+
+/** Factory for {@link ReportableEntityHandler} objects. */
+public interface ReportableEntityHandlerFactory {
+
+ /**
+ * Create, or return existing, {@link ReportableEntityHandler}.
+ *
+ * @param handler
+ * @param queue unique identifier for the handler.
+ * @return new or existing handler.
+ */
+ ReportableEntityHandler getHandler(String handler, QueueInfo queue);
+
+ default ReportableEntityHandler getHandler(int port, QueueInfo queue) {
+ return getHandler(String.valueOf(port), queue);
+ }
+
+ /** Shutdown pipeline for a specific handle. */
+ void shutdown(int handle);
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportableEntityHandlerFactoryImpl.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportableEntityHandlerFactoryImpl.java
new file mode 100644
index 000000000..0a8d2aafc
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/ReportableEntityHandlerFactoryImpl.java
@@ -0,0 +1,121 @@
+package com.wavefront.agent.core.handlers;
+
+import static com.wavefront.agent.ProxyContext.entityPropertiesFactoryMap;
+import static com.wavefront.agent.ProxyContext.queuesManager;
+import static com.wavefront.data.ReportableEntityType.*;
+
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.api.agent.ValidationConfiguration;
+import com.wavefront.common.Utils;
+import com.wavefront.data.ReportableEntityType;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Function;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.apache.commons.lang.math.NumberUtils;
+import org.slf4j.Logger;
+import wavefront.report.Histogram;
+
+/**
+ * Caching factory for {@link ReportableEntityHandler} objects. Makes sure there's only one handler
+ * for each {@link com.wavefront.agent.core.queues.QueueInfo}, which makes it possible to spin up
+ * handlers on demand at runtime, as well as redirecting traffic to a different pipeline.
+ */
+public class ReportableEntityHandlerFactoryImpl implements ReportableEntityHandlerFactory {
+
+ protected final Map>> handlers =
+ new ConcurrentHashMap<>();
+
+ private final ValidationConfiguration validationConfig;
+ private final Logger blockedPointsLogger;
+ private final Logger blockedHistogramsLogger;
+ private final Logger blockedSpansLogger;
+ private final Logger blockedLogsLogger;
+ private final Function histogramRecompressor;
+
+ /**
+ * Create new instance.
+ *
+ * @param validationConfig validation configuration.
+ */
+ public ReportableEntityHandlerFactoryImpl(
+ @Nonnull final ValidationConfiguration validationConfig,
+ final Logger blockedPointsLogger,
+ final Logger blockedHistogramsLogger,
+ final Logger blockedSpansLogger,
+ @Nullable Function histogramRecompressor,
+ final Logger blockedLogsLogger) {
+ this.validationConfig = validationConfig;
+ this.blockedPointsLogger = blockedPointsLogger;
+ this.blockedHistogramsLogger = blockedHistogramsLogger;
+ this.blockedSpansLogger = blockedSpansLogger;
+ this.histogramRecompressor = histogramRecompressor;
+ this.blockedLogsLogger = blockedLogsLogger;
+ }
+
+ private static double getSystemPropertyAsDouble(String propertyName) {
+ String sampleRateProperty = propertyName == null ? null : System.getProperty(propertyName);
+ return NumberUtils.isNumber(sampleRateProperty) ? Double.parseDouble(sampleRateProperty) : 1.0d;
+ }
+
+ @SuppressWarnings("unchecked")
+ // TODO: review all implementations of this method
+ @Override
+ public ReportableEntityHandler getHandler(String handler, QueueInfo queue) {
+ return (ReportableEntityHandler)
+ handlers
+ .computeIfAbsent(handler + "." + queue.getName(), h -> new ConcurrentHashMap<>())
+ .computeIfAbsent(
+ queue.getEntityType(),
+ k -> {
+ switch (queue.getEntityType()) {
+ case POINT:
+ return new ReportPointHandlerImpl(
+ handler, queue, validationConfig, blockedPointsLogger, null);
+ case HISTOGRAM:
+ return new ReportPointHandlerImpl(
+ handler,
+ queue,
+ validationConfig,
+ blockedHistogramsLogger,
+ histogramRecompressor);
+ case SOURCE_TAG:
+ return new ReportSourceTagHandlerImpl(handler, queue, blockedPointsLogger);
+ case TRACE:
+ return new SpanHandlerImpl(
+ handler,
+ queue,
+ validationConfig,
+ blockedSpansLogger,
+ (tenantName) ->
+ entityPropertiesFactoryMap
+ .get(tenantName)
+ .getGlobalProperties()
+ .getDropSpansDelayedMinutes(),
+ Utils.lazySupplier(
+ () -> getHandler(handler, queuesManager.initQueue(TRACE_SPAN_LOGS))));
+ case TRACE_SPAN_LOGS:
+ return new SpanLogsHandlerImpl(handler, queue, blockedSpansLogger);
+ case EVENT:
+ return new EventHandlerImpl(handler, queue, blockedPointsLogger);
+ case LOGS:
+ return new ReportLogHandlerImpl(
+ handler, queue, validationConfig, blockedLogsLogger);
+ default:
+ throw new IllegalArgumentException(
+ "Unexpected entity type "
+ + queue.getEntityType().name()
+ + " for "
+ + handler);
+ }
+ });
+ }
+
+ @Override
+ public void shutdown(int handle) {
+ if (handlers.containsKey(String.valueOf(handle))) {
+ handlers.get(String.valueOf(handle)).values().forEach(ReportableEntityHandler::shutdown);
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/handlers/SpanHandlerImpl.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/SpanHandlerImpl.java
similarity index 64%
rename from proxy/src/main/java/com/wavefront/agent/handlers/SpanHandlerImpl.java
rename to proxy/src/main/java/com/wavefront/agent/core/handlers/SpanHandlerImpl.java
index a88b0fd20..0fe5ca7b3 100644
--- a/proxy/src/main/java/com/wavefront/agent/handlers/SpanHandlerImpl.java
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/SpanHandlerImpl.java
@@ -1,87 +1,83 @@
-package com.wavefront.agent.handlers;
+package com.wavefront.agent.core.handlers;
+import static com.wavefront.agent.PushAgent.isMulticastingActive;
import static com.wavefront.agent.sampler.SpanSampler.SPAN_SAMPLING_POLICY_TAG;
import static com.wavefront.data.Validation.validateSpan;
import com.wavefront.agent.api.APIContainer;
+import com.wavefront.agent.core.buffers.BuffersManager;
+import com.wavefront.agent.core.queues.QueueInfo;
import com.wavefront.api.agent.ValidationConfiguration;
import com.wavefront.common.Clock;
import com.wavefront.data.AnnotationUtils;
import com.wavefront.ingester.SpanSerializer;
import com.yammer.metrics.Metrics;
import com.yammer.metrics.core.MetricName;
-import java.util.Collection;
import java.util.List;
-import java.util.Map;
import java.util.concurrent.TimeUnit;
-import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.Supplier;
-import java.util.logging.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import wavefront.report.Annotation;
import wavefront.report.Span;
import wavefront.report.SpanLogs;
/**
* Handler that processes incoming Span objects, validates them and hands them over to one of the
- * {@link SenderTask} threads.
- *
- * @author vasily@wavefront.com
+ * SenderTask threads.
*/
public class SpanHandlerImpl extends AbstractReportableEntityHandler {
+ private static final Logger log =
+ LoggerFactory.getLogger(SpanHandlerImpl.class.getCanonicalName());
private final ValidationConfiguration validationConfig;
- private final Logger validItemsLogger;
private final Function dropSpansDelayedMinutes;
private final com.yammer.metrics.core.Histogram receivedTagCount;
private final com.yammer.metrics.core.Counter policySampledSpanCounter;
- private final Supplier> spanLogsHandler;
+ private final Supplier> spanLogsHandler;
/**
* @param handlerKey pipeline hanler key.
- * @param blockedItemsPerBatch controls sample rate of how many blocked points are written into
- * the main log file.
- * @param senderTaskMap map of tenant name and tasks actually handling data transfer to the
- * Wavefront endpoint corresponding to the tenant name
* @param validationConfig parameters for data validation.
- * @param receivedRateSink where to report received rate.
* @param blockedItemLogger logger for blocked items.
- * @param validItemsLogger logger for valid items.
* @param dropSpansDelayedMinutes latency threshold for dropping delayed spans.
* @param spanLogsHandler spanLogs handler.
*/
SpanHandlerImpl(
- final HandlerKey handlerKey,
- final int blockedItemsPerBatch,
- final Map>> senderTaskMap,
+ final String handler,
+ final QueueInfo handlerKey,
@Nonnull final ValidationConfiguration validationConfig,
- @Nullable final BiConsumer receivedRateSink,
@Nullable final Logger blockedItemLogger,
- @Nullable final Logger validItemsLogger,
@Nonnull final Function dropSpansDelayedMinutes,
- @Nonnull final Supplier> spanLogsHandler) {
- super(
- handlerKey,
- blockedItemsPerBatch,
- new SpanSerializer(),
- senderTaskMap,
- true,
- receivedRateSink,
- blockedItemLogger);
- super.initializeCounters();
+ @Nonnull final Supplier> spanLogsHandler) {
+ super(handler, handlerKey, new SpanSerializer(), blockedItemLogger);
this.validationConfig = validationConfig;
- this.validItemsLogger = validItemsLogger;
this.dropSpansDelayedMinutes = dropSpansDelayedMinutes;
this.receivedTagCount =
Metrics.newHistogram(
- new MetricName(handlerKey.toString() + ".received", "", "tagCount"), false);
+ new MetricName(handlerKey.getName() + ".received", "", "tagCount"), false);
this.spanLogsHandler = spanLogsHandler;
this.policySampledSpanCounter =
Metrics.newCounter(new MetricName(handlerKey.toString(), "", "sampler.policy.saved"));
}
+ // MONIT-26010: this is a temp helper function to remove MULTICASTING_TENANT_TAG
+ // TODO: refactor this into AnnotationUtils or figure out a better removing implementation
+ private static void removeSpanAnnotation(List annotations, String key) {
+ Annotation toRemove = null;
+ for (Annotation annotation : annotations) {
+ if (annotation.getKey().equals(key)) {
+ toRemove = annotation;
+ // we should have only one matching
+ break;
+ }
+ }
+ annotations.remove(toRemove);
+ }
+
@Override
protected void reportInternal(Span span) {
receivedTagCount.update(span.getAnnotations().size());
@@ -107,9 +103,10 @@ protected void reportInternal(Span span) {
this.policySampledSpanCounter.inc();
}
final String strSpan = serializer.apply(span);
- getTask(APIContainer.CENTRAL_TENANT_NAME).add(strSpan);
- getReceivedCounter().inc();
- // check if span annotations contains the tag key indicating this span should be multicasted
+
+ incrementReceivedCounters(strSpan.length());
+ BuffersManager.sendMsg(queue, strSpan);
+
if (isMulticastingActive
&& span.getAnnotations() != null
&& AnnotationUtils.getValue(span.getAnnotations(), MULTICASTING_TENANT_TAG_KEY) != null) {
@@ -118,34 +115,14 @@ protected void reportInternal(Span span) {
.trim()
.split(",");
removeSpanAnnotation(span.getAnnotations(), MULTICASTING_TENANT_TAG_KEY);
- for (String multicastingTenantName : multicastingTenantNames) {
- // if the tenant name indicated in span tag is not configured, just ignore
- if (getTask(multicastingTenantName) != null) {
- maxSpanDelay = dropSpansDelayedMinutes.apply(multicastingTenantName);
- if (maxSpanDelay != null
- && span.getStartMillis() + span.getDuration()
- < Clock.now() - TimeUnit.MINUTES.toMillis(maxSpanDelay)) {
- // just ignore, reduce unnecessary cost on multicasting cluster
- continue;
- }
- getTask(multicastingTenantName).add(serializer.apply(span));
+ for (String tenant : multicastingTenantNames) {
+ QueueInfo tenantQueue = queue.getTenantQueue(tenant);
+ if (tenantQueue != null) {
+ BuffersManager.sendMsg(tenantQueue, strSpan);
+ } else {
+ log.info("Tenant '" + tenant + "' invalid");
}
}
}
- if (validItemsLogger != null) validItemsLogger.info(strSpan);
- }
-
- // MONIT-26010: this is a temp helper function to remove MULTICASTING_TENANT_TAG
- // TODO: refactor this into AnnotationUtils or figure out a better removing implementation
- private static void removeSpanAnnotation(List annotations, String key) {
- Annotation toRemove = null;
- for (Annotation annotation : annotations) {
- if (annotation.getKey().equals(key)) {
- toRemove = annotation;
- // we should have only one matching
- break;
- }
- }
- annotations.remove(toRemove);
}
}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/handlers/SpanLogsHandlerImpl.java b/proxy/src/main/java/com/wavefront/agent/core/handlers/SpanLogsHandlerImpl.java
new file mode 100644
index 000000000..b2e7fb64b
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/handlers/SpanLogsHandlerImpl.java
@@ -0,0 +1,35 @@
+package com.wavefront.agent.core.handlers;
+
+import com.wavefront.agent.core.buffers.BuffersManager;
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.ingester.SpanLogsSerializer;
+import javax.annotation.Nullable;
+import org.slf4j.Logger;
+import wavefront.report.SpanLogs;
+
+/**
+ * Handler that processes incoming SpanLogs objects, validates them and hands them over to one of
+ * the {@link SenderTask} threads.
+ */
+public class SpanLogsHandlerImpl extends AbstractReportableEntityHandler {
+
+ /**
+ * Create new instance.
+ *
+ * @param handlerKey pipeline handler key.
+ * @param blockedItemLogger logger for blocked items.
+ */
+ SpanLogsHandlerImpl(
+ final String handler, final QueueInfo handlerKey, @Nullable final Logger blockedItemLogger) {
+ super(handler, handlerKey, new SpanLogsSerializer(), blockedItemLogger);
+ }
+
+ @Override
+ protected void reportInternal(SpanLogs spanLogs) {
+ String strSpanLogs = serializer.apply(spanLogs);
+ if (strSpanLogs != null) {
+ incrementReceivedCounters(strSpanLogs.length());
+ BuffersManager.sendMsg(queue, strSpanLogs);
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/queues/Queue.java b/proxy/src/main/java/com/wavefront/agent/core/queues/Queue.java
new file mode 100644
index 000000000..ff8281ec0
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/queues/Queue.java
@@ -0,0 +1,70 @@
+package com.wavefront.agent.core.queues;
+
+import static com.wavefront.agent.api.APIContainer.CENTRAL_TENANT_NAME;
+
+import com.wavefront.data.ReportableEntityType;
+import java.util.HashMap;
+import java.util.Map;
+
+class Queue implements QueueInfo {
+ private final String name;
+ private final ReportableEntityType entityType;
+ private final String tenant;
+ private final int threads;
+ private final Map tenants = new HashMap<>();
+ private final int midBufferItems;
+
+ Queue(ReportableEntityType entityType, String tenant, int threads) {
+ this.name = entityType + (tenant.equalsIgnoreCase(CENTRAL_TENANT_NAME) ? "" : "." + tenant);
+ this.entityType = entityType;
+ this.tenant = tenant;
+ this.threads = threads;
+ switch (entityType) {
+ case LOGS:
+ midBufferItems = 10;
+ break;
+ case POINT:
+ midBufferItems = 255;
+ break;
+ default:
+ midBufferItems = 100;
+ }
+ QueueStats.register(this);
+ }
+
+ public String getTenant() {
+ return tenant;
+ }
+
+ @Override
+ public QueueInfo getTenantQueue(String tenant) {
+ return tenants.get(tenant);
+ }
+
+ @Override
+ public Map getTenants() {
+ return tenants;
+ }
+
+ public ReportableEntityType getEntityType() {
+ return entityType;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public int getNumberThreads() {
+ return threads;
+ }
+
+ @Override
+ public int getMaxItemsPerMessage() {
+ return midBufferItems;
+ }
+
+ public void addTenant(String tenant, Queue queue) {
+ tenants.put(tenant, queue);
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/queues/QueueInfo.java b/proxy/src/main/java/com/wavefront/agent/core/queues/QueueInfo.java
new file mode 100644
index 000000000..274e17768
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/queues/QueueInfo.java
@@ -0,0 +1,20 @@
+package com.wavefront.agent.core.queues;
+
+import com.wavefront.data.ReportableEntityType;
+import java.util.Map;
+
+public interface QueueInfo {
+ String getTenant();
+
+ QueueInfo getTenantQueue(String tenant);
+
+ Map getTenants();
+
+ ReportableEntityType getEntityType();
+
+ String getName();
+
+ int getNumberThreads();
+
+ int getMaxItemsPerMessage();
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/queues/QueueStats.java b/proxy/src/main/java/com/wavefront/agent/core/queues/QueueStats.java
new file mode 100644
index 000000000..cd444dd20
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/queues/QueueStats.java
@@ -0,0 +1,113 @@
+package com.wavefront.agent.core.queues;
+
+import com.wavefront.agent.PushAgent;
+import com.wavefront.common.NamedThreadFactory;
+import com.wavefront.common.TaggedMetricName;
+import com.yammer.metrics.Metrics;
+import com.yammer.metrics.core.BurstRateTrackingCounter;
+import com.yammer.metrics.core.Counter;
+import com.yammer.metrics.core.Histogram;
+import com.yammer.metrics.core.MetricName;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+public class QueueStats {
+ public final Counter dropped;
+ public final Counter delivered;
+ public final Counter deliveredBytes;
+ public final Counter failed;
+ public final Counter sent;
+ public final Counter queuedFailed;
+ public final Counter queuedExpired;
+ public final Histogram msgLength;
+ public final Counter queuedFull;
+ public final Counter internalError;
+
+ private final BurstRateTrackingCounter deliveredStats;
+ private final QueueInfo queue;
+
+ private static final Map stats = new HashMap<>();
+ private static final ScheduledExecutorService executor =
+ Executors.newScheduledThreadPool(2, new NamedThreadFactory("QueueStats"));
+
+ protected static QueueStats register(QueueInfo queue) {
+ return stats.computeIfAbsent(queue.getName(), s -> new QueueStats(queue, executor));
+ }
+
+ public static QueueStats get(String queue) {
+ return stats.get(queue);
+ }
+
+ private QueueStats(QueueInfo queue, ScheduledExecutorService scheduler) {
+ this.queue = queue;
+ MetricName deliveredMetricName = new MetricName(queue.getName(), "", "delivered");
+ this.deliveredBytes =
+ Metrics.newCounter(new MetricName(queue.getName(), "", "delivered.bytes"));
+ this.delivered = Metrics.newCounter(deliveredMetricName);
+ this.deliveredStats =
+ new BurstRateTrackingCounter(deliveredMetricName, Metrics.defaultRegistry(), 1000);
+ this.failed = Metrics.newCounter(new MetricName(queue.getName(), "", "failed"));
+ this.sent = Metrics.newCounter(new MetricName(queue.getName(), "", "sent"));
+ this.dropped = Metrics.newCounter(new MetricName(queue.getName(), "", "dropped"));
+
+ msgLength =
+ Metrics.newHistogram(new MetricName("buffer." + queue.getName(), "", "message_length"));
+
+ queuedFailed =
+ Metrics.newCounter(new TaggedMetricName(queue.getName(), "queued", "reason", "failed"));
+
+ queuedExpired =
+ Metrics.newCounter(new TaggedMetricName(queue.getName(), "queued", "reason", "expired"));
+
+ queuedFull =
+ Metrics.newCounter(new TaggedMetricName(queue.getName(), "queued", "reason", "queue-full"));
+
+ internalError =
+ Metrics.newCounter(
+ new TaggedMetricName(queue.getName(), "queued", "reason", "internal-error"));
+
+ scheduler.scheduleAtFixedRate(() -> printStats(), 10, 10, TimeUnit.SECONDS);
+ scheduler.scheduleAtFixedRate(() -> printTotal(), 1, 1, TimeUnit.MINUTES);
+ }
+
+ protected void printStats() {
+ String rateUnit = queue.getEntityType().getRateUnit();
+ PushAgent.stats.info(
+ "["
+ + queue.getName()
+ + "] "
+ + queue.getEntityType().toCapitalizedString()
+ + " delivered rate: "
+ + deliveredStats.getOneMinutePrintableRate()
+ + " "
+ + rateUnit
+ + " (1 min), "
+ + deliveredStats.getFiveMinutePrintableRate()
+ + " "
+ + rateUnit
+ + " (5 min) "
+ + deliveredStats.getCurrentRate()
+ + " "
+ + rateUnit
+ + " (current).");
+ }
+
+ protected void printTotal() {
+ PushAgent.stats.info(
+ "["
+ + queue.getName()
+ + "] "
+ + queue.getEntityType().toCapitalizedString()
+ + " sent since start: "
+ + this.sent.count()
+ + "; delivered: "
+ + this.delivered.count()
+ + "; failed: "
+ + this.failed.count()
+ + "; dropped: "
+ + this.dropped.count());
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/queues/QueuesManager.java b/proxy/src/main/java/com/wavefront/agent/core/queues/QueuesManager.java
new file mode 100644
index 000000000..f2ec084f5
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/queues/QueuesManager.java
@@ -0,0 +1,7 @@
+package com.wavefront.agent.core.queues;
+
+import com.wavefront.data.ReportableEntityType;
+
+public interface QueuesManager {
+ QueueInfo initQueue(ReportableEntityType entityType);
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/queues/QueuesManagerDefault.java b/proxy/src/main/java/com/wavefront/agent/core/queues/QueuesManagerDefault.java
new file mode 100644
index 000000000..7bdc000da
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/queues/QueuesManagerDefault.java
@@ -0,0 +1,54 @@
+package com.wavefront.agent.core.queues;
+
+import static com.wavefront.agent.ProxyContext.entityPropertiesFactoryMap;
+import static com.wavefront.agent.api.APIContainer.CENTRAL_TENANT_NAME;
+
+import com.wavefront.agent.ProxyConfig;
+import com.wavefront.agent.core.buffers.Buffer;
+import com.wavefront.agent.core.buffers.BuffersManager;
+import com.wavefront.agent.core.senders.SenderTasksManager;
+import com.wavefront.data.ReportableEntityType;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+public class QueuesManagerDefault implements QueuesManager {
+ private final Map queues = new ConcurrentHashMap<>();
+ private final ProxyConfig cfg;
+
+ public QueuesManagerDefault(ProxyConfig cfg) {
+ this.cfg = cfg;
+ }
+
+ public QueueInfo initQueue(ReportableEntityType entityType) {
+ Queue queue = initQueue(entityType, CENTRAL_TENANT_NAME);
+ cfg.getMulticastingTenantList()
+ .keySet()
+ .forEach(
+ tenat -> {
+ queue.addTenant(tenat, initQueue(entityType, tenat));
+ });
+ QueueStats.register(queue);
+ return queue;
+ }
+
+ private Queue initQueue(ReportableEntityType entityType, String tenant) {
+ Queue queue =
+ new Queue(
+ entityType,
+ tenant,
+ entityPropertiesFactoryMap.get(tenant).get(entityType).getFlushThreads());
+ queues.computeIfAbsent(
+ queue.getName(),
+ s -> {
+ setupQueue(queue);
+ return queue;
+ });
+ return queue;
+ }
+
+ private static void setupQueue(QueueInfo q) {
+ List buffers = BuffersManager.registerNewQueueIfNeedIt(q);
+ buffers.forEach(buffer -> SenderTasksManager.createSenderTasks(q, buffer));
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/senders/EventSenderTask.java b/proxy/src/main/java/com/wavefront/agent/core/senders/EventSenderTask.java
new file mode 100644
index 000000000..6a7339928
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/senders/EventSenderTask.java
@@ -0,0 +1,38 @@
+package com.wavefront.agent.core.senders;
+
+import com.wavefront.agent.core.buffers.Buffer;
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.agent.core.queues.QueueStats;
+import com.wavefront.agent.data.EntityProperties;
+import com.wavefront.api.EventAPI;
+import java.util.List;
+import java.util.UUID;
+import javax.ws.rs.core.Response;
+
+class EventSenderTask extends SenderTask {
+ private final EventAPI proxyAPI;
+ private final UUID proxyId;
+
+ /**
+ * @param queue handler key, that serves as an identifier of the metrics pipeline.
+ * @param proxyAPI handles interaction with Wavefront servers as well as queueing.
+ * @param proxyId id of the proxy.
+ * @param properties container for mutable proxy settings.
+ */
+ EventSenderTask(
+ QueueInfo queue,
+ int idx,
+ EventAPI proxyAPI,
+ UUID proxyId,
+ EntityProperties properties,
+ Buffer buffer,
+ QueueStats queueStats) {
+ super(queue, idx, properties, buffer, queueStats);
+ this.proxyAPI = proxyAPI;
+ this.proxyId = proxyId;
+ }
+
+ public Response submit(List events) {
+ return proxyAPI.proxyEventsString(proxyId, "[" + String.join(",", events) + "]");
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/senders/LineDelimitedSenderTask.java b/proxy/src/main/java/com/wavefront/agent/core/senders/LineDelimitedSenderTask.java
new file mode 100644
index 000000000..1b27399b4
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/senders/LineDelimitedSenderTask.java
@@ -0,0 +1,38 @@
+package com.wavefront.agent.core.senders;
+
+import com.wavefront.agent.core.buffers.Buffer;
+import com.wavefront.agent.core.handlers.LineDelimitedUtils;
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.agent.core.queues.QueueStats;
+import com.wavefront.agent.data.EntityProperties;
+import com.wavefront.api.ProxyV2API;
+import java.util.List;
+import java.util.UUID;
+import javax.ws.rs.core.Response;
+
+class LineDelimitedSenderTask extends SenderTask {
+
+ private final ProxyV2API proxyAPI;
+ private final UUID proxyId;
+ private final String pushFormat;
+
+ LineDelimitedSenderTask(
+ QueueInfo queue,
+ int idx,
+ String pushFormat,
+ ProxyV2API proxyAPI,
+ UUID proxyId,
+ final EntityProperties properties,
+ Buffer buffer,
+ QueueStats queueStats) {
+ super(queue, idx, properties, buffer, queueStats);
+ this.pushFormat = pushFormat;
+ this.proxyId = proxyId;
+ this.proxyAPI = proxyAPI;
+ }
+
+ @Override
+ protected Response submit(List logs) {
+ return proxyAPI.proxyReport(proxyId, pushFormat, LineDelimitedUtils.joinPushData(logs));
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/senders/LogSenderTask.java b/proxy/src/main/java/com/wavefront/agent/core/senders/LogSenderTask.java
new file mode 100644
index 000000000..9f27c15f5
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/senders/LogSenderTask.java
@@ -0,0 +1,71 @@
+package com.wavefront.agent.core.senders;
+
+import com.wavefront.agent.core.buffers.Buffer;
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.agent.core.queues.QueueStats;
+import com.wavefront.agent.data.EntityProperties;
+import com.wavefront.api.LogAPI;
+import com.yammer.metrics.Metrics;
+import com.yammer.metrics.core.MetricName;
+import java.util.List;
+import java.util.UUID;
+import javax.ws.rs.core.Response;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** This class is responsible for accumulating logs and uploading them in batches. */
+public class LogSenderTask extends SenderTask {
+ private static final Logger LOGGER = LoggerFactory.getLogger("LogDataSubmission");
+ public static final String AGENT_PREFIX = "WF-PROXY-AGENT-";
+
+ private final QueueInfo queue;
+ private final LogAPI logAPI;
+ private final UUID proxyId;
+ private EntityProperties properties;
+
+ /**
+ * @param queue handler key, that serves as an identifier of the log pipeline.
+ * @param logAPI handles interaction with log systems as well as queueing.
+ * @param proxyId id of the proxy.
+ * @param properties container for mutable proxy settings.
+ */
+ LogSenderTask(
+ QueueInfo queue,
+ int idx,
+ LogAPI logAPI,
+ UUID proxyId,
+ EntityProperties properties,
+ Buffer buffer,
+ QueueStats queueStats) {
+ super(queue, idx, properties, buffer, queueStats);
+ this.queue = queue;
+ this.logAPI = logAPI;
+ this.proxyId = proxyId;
+ this.properties = properties;
+ }
+
+ public boolean checkBatchSize(int items, int bytes, int newItems, int newBytes) {
+ return bytes + newBytes <= properties.getDataPerBatch();
+ }
+
+ protected Response submit(List logs) {
+ if (LOGGER.isDebugEnabled()) {
+ for (String log : logs) {
+ LOGGER.debug("Sending a log to the backend: " + log);
+ }
+ }
+ return logAPI.proxyLogsStr(
+ AGENT_PREFIX + proxyId.toString(), "[" + String.join(",", logs) + "]");
+ }
+
+ // A 429 from VRLIC means that the daily ingestion limit has been reached
+ @Override
+ protected boolean dropOnHTTPError(Response.StatusType statusInfo, int batchSize) {
+ if (statusInfo.getStatusCode() == 429) {
+ Metrics.newCounter(new MetricName(queue.getName(), "", "failed" + ".ingestion_limit_reached"))
+ .inc(batchSize);
+ return true;
+ }
+ return super.dropOnHTTPError(statusInfo, batchSize);
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/senders/SenderTask.java b/proxy/src/main/java/com/wavefront/agent/core/senders/SenderTask.java
new file mode 100644
index 000000000..bbb96ca00
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/senders/SenderTask.java
@@ -0,0 +1,181 @@
+package com.wavefront.agent.core.senders;
+
+import static com.wavefront.common.Utils.isWavefrontResponse;
+
+import com.wavefront.agent.core.buffers.Buffer;
+import com.wavefront.agent.core.buffers.OnMsgDelegate;
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.agent.core.queues.QueueStats;
+import com.wavefront.agent.data.EntityProperties;
+import com.wavefront.common.TaggedMetricName;
+import com.yammer.metrics.Metrics;
+import com.yammer.metrics.core.MetricName;
+import com.yammer.metrics.core.TimerContext;
+import java.net.ConnectException;
+import java.net.SocketTimeoutException;
+import java.net.UnknownHostException;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import javax.net.ssl.SSLHandshakeException;
+import javax.ws.rs.ProcessingException;
+import javax.ws.rs.core.Response;
+import org.apache.logging.log4j.core.util.Throwables;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+abstract class SenderTask implements Runnable, OnMsgDelegate {
+ private static final Logger log = LoggerFactory.getLogger(SenderTask.class.getCanonicalName());
+ // new MessageDedupingLogger(LoggerFactory.getLogger(SenderTask.class.getCanonicalName()),
+ // 1000, 1);
+
+ private final QueueInfo queue;
+ private final int idx;
+ private final EntityProperties properties;
+ private final Buffer buffer;
+ private final QueueStats queueStats;
+
+ SenderTask(
+ QueueInfo queue, int idx, EntityProperties properties, Buffer buffer, QueueStats queueStats) {
+ this.queue = queue;
+ this.idx = idx;
+ this.properties = properties;
+ this.buffer = buffer;
+ this.queueStats = queueStats;
+ }
+
+ @Override
+ public void run() {
+ try {
+ buffer.onMsgBatch(queue, idx, this);
+ } catch (Throwable e) {
+ log.error("error sending " + queue.getEntityType().name(), e);
+ }
+ }
+
+ @Override
+ public boolean checkBatchSize(int items, int bytes, int newItems, int newBytes) {
+ return items + newItems <= properties.getDataPerBatch();
+ }
+
+ @Override
+ public boolean checkRates(int newItems, int newBytes) {
+ return properties.getRateLimiter().tryAcquire(newItems);
+ }
+
+ @Override
+ public void processBatch(List batch) throws SenderTaskException {
+ TimerContext timer =
+ Metrics.newTimer(
+ new MetricName("push." + queue.getName(), "", "duration"),
+ TimeUnit.MILLISECONDS,
+ TimeUnit.MINUTES)
+ .time();
+
+ try (Response response = submit(batch)) {
+ Metrics.newCounter(
+ new TaggedMetricName(
+ "push", queue.getName() + ".http." + response.getStatus() + ".count"))
+ .inc();
+ queueStats.sent.inc(batch.size());
+ if (response.getStatus() >= 200 && response.getStatus() < 300) {
+ queueStats.delivered.inc(batch.size());
+ queueStats.deliveredBytes.inc(batch.stream().mapToInt(value -> value.length()).sum());
+ } else {
+ queueStats.failed.inc(batch.size());
+ switch (response.getStatusInfo().toEnum()) {
+ case NOT_ACCEPTABLE: // CollectorApiServer RejectedExecutionException
+ case REQUEST_ENTITY_TOO_LARGE: // CollectorApiServer ReportBundleTooLargeException (PPS
+ // exceeded)
+ properties.getRateLimiter().pause();
+ break;
+ case FORBIDDEN:
+ log.warn(
+ "["
+ + queue.getName()
+ + "] HTTP "
+ + response.getStatus()
+ + ": Please verify that '"
+ + queue.getEntityType()
+ + "' is enabled for your account!");
+ break;
+ case UNAUTHORIZED:
+ case PROXY_AUTHENTICATION_REQUIRED:
+ case REQUEST_TIMEOUT:
+ if (isWavefrontResponse(response)) {
+ log.warn(
+ "["
+ + queue.getName()
+ + "] HTTP "
+ + response.getStatus()
+ + " (Unregistered proxy) received while sending data to Wavefront - please verify that your token is valid and has Proxy Management permissions!");
+ } else {
+ log.warn(
+ "["
+ + queue.getName()
+ + "] HTTP "
+ + response.getStatus()
+ + " received while sending data to Wavefront - please verify your network/HTTP proxy settings!");
+ }
+ break;
+ }
+ if (!dropOnHTTPError(response.getStatusInfo(), batch.size())) {
+ throw new SenderTaskException(
+ "HTTP error: "
+ + response.getStatus()
+ + " "
+ + response.getStatusInfo().getReasonPhrase());
+ }
+ }
+ } catch (ProcessingException ex) {
+ Throwable rootCause = Throwables.getRootCause(ex);
+ if (rootCause instanceof UnknownHostException) {
+ log.warn(
+ "["
+ + queue.getName()
+ + "] Error sending data to Wavefront: Unknown host "
+ + rootCause.getMessage()
+ + ", please check your network!");
+ } else if (rootCause instanceof ConnectException
+ || rootCause instanceof SocketTimeoutException) {
+ log.warn(
+ "["
+ + queue.getName()
+ + "] Error sending data to Wavefront: "
+ + rootCause.getMessage()
+ + ", please verify your network/HTTP proxy settings!");
+ } else if (ex.getCause() instanceof SSLHandshakeException) {
+ log.warn(
+ "["
+ + queue.getName()
+ + "] Error sending data to Wavefront: "
+ + ex.getCause()
+ + ", please verify that your environment has up-to-date root certificates!");
+ } else {
+ log.warn("[" + queue.getName() + "] Error sending data to Wavefront: " + rootCause);
+ }
+ if (log.isDebugEnabled()) {
+ log.info("Full stacktrace: ", ex);
+ }
+ throw new SenderTaskException(rootCause.getMessage());
+ } catch (Exception ex) {
+ log.warn(
+ "["
+ + queue.getName()
+ + "] Error sending data to Wavefront: "
+ + Throwables.getRootCause(ex));
+ if (log.isDebugEnabled()) {
+ log.info("Full stacktrace: ", ex);
+ }
+ throw new SenderTaskException(ex.getMessage());
+ } finally {
+ timer.stop();
+ }
+ }
+
+ /* return true if the point need to be dropped on a specif HTTP error code */
+ protected boolean dropOnHTTPError(Response.StatusType statusInfo, int batchSize) {
+ return false;
+ }
+
+ protected abstract Response submit(List events);
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/senders/SenderTaskException.java b/proxy/src/main/java/com/wavefront/agent/core/senders/SenderTaskException.java
new file mode 100644
index 000000000..465426b6f
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/senders/SenderTaskException.java
@@ -0,0 +1,7 @@
+package com.wavefront.agent.core.senders;
+
+public class SenderTaskException extends Exception {
+ public SenderTaskException(String reasonPhrase) {
+ super(reasonPhrase);
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/senders/SenderTasksManager.java b/proxy/src/main/java/com/wavefront/agent/core/senders/SenderTasksManager.java
new file mode 100644
index 000000000..e24a304ad
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/senders/SenderTasksManager.java
@@ -0,0 +1,210 @@
+package com.wavefront.agent.core.senders;
+
+import static com.wavefront.agent.ProxyContext.entityPropertiesFactoryMap;
+import static com.wavefront.api.agent.Constants.*;
+
+import com.wavefront.agent.api.APIContainer;
+import com.wavefront.agent.core.buffers.Buffer;
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.agent.core.queues.QueueStats;
+import com.wavefront.agent.data.EntityProperties;
+import com.wavefront.api.ProxyV2API;
+import com.wavefront.data.ReportableEntityType;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import javax.annotation.Nonnull;
+import javax.validation.constraints.NotNull;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** Factory for {@link SenderTask} objects. */
+public class SenderTasksManager {
+ private static final Map executors = new ConcurrentHashMap<>();
+ private static APIContainer apiContainer;
+ private static UUID proxyId;
+ private static final Logger log =
+ LoggerFactory.getLogger(SenderTasksManager.class.getCanonicalName());
+
+ /**
+ * @param apiContainer handles interaction with Wavefront servers as well as queueing.
+ * @param proxyId proxy ID.
+ */
+ public static void init(final APIContainer apiContainer, final UUID proxyId) {
+ SenderTasksManager.apiContainer = apiContainer;
+ SenderTasksManager.proxyId = proxyId;
+ // global `~proxy.buffer.fill-rate` metric aggregated from all task size estimators
+ // TODO: create this metric
+ // Metrics.newGauge(
+ // new TaggedMetricName("buffer", "fill-rate"),
+ // new Gauge() {
+ // @Override
+ // public Long value() {
+ // List sizes =
+ // taskSizeEstimators.values().stream()
+ // .map(TaskSizeEstimator::getBytesPerMinute)
+ // .filter(Objects::nonNull)
+ // .collect(Collectors.toList());
+ // return sizes.size() == 0 ? null : sizes.stream().mapToLong(x -> x).sum();
+ // }
+ // });
+ }
+
+ public static void createSenderTasks(@Nonnull QueueInfo queue, Buffer buffer) {
+ ReportableEntityType entityType = queue.getEntityType();
+ String tenantName = queue.getTenant();
+
+ String name = "submitter-" + buffer.getName() + "-" + tenantName + "-" + queue.getName();
+
+ int numThreads = entityPropertiesFactoryMap.get(tenantName).get(entityType).getFlushThreads();
+ int interval =
+ entityPropertiesFactoryMap.get(tenantName).get(entityType).getPushFlushInterval();
+ ScheduledExecutorService scheduler =
+ executors.computeIfAbsent(
+ name,
+ x ->
+ Executors.newScheduledThreadPool(
+ numThreads, new PriorityNamedThreadFactory(name, buffer.getPriority())));
+
+ QueueStats queueStats = QueueStats.get(queue.getName());
+
+ for (int i = 0; i < numThreads; i++) {
+ SenderTask sender = generateSenderTask(queue, i, buffer, queueStats);
+ scheduler.scheduleAtFixedRate(sender, interval, interval, TimeUnit.MILLISECONDS);
+ }
+ }
+
+ public static void shutdown() {
+ // TODO: stop the executor and flush all points to disk
+ executors.forEach(
+ (s, scheduler) -> {
+ try {
+ System.out.println("Stopping '" + s + "' threads");
+ scheduler.shutdown();
+ scheduler.awaitTermination(1, TimeUnit.MINUTES);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ executors.clear();
+ }
+
+ private static SenderTask generateSenderTask(
+ QueueInfo queue, int idx, Buffer buffer, QueueStats queueStats) {
+ String tenantName = queue.getTenant();
+ ReportableEntityType entityType = queue.getEntityType();
+ ProxyV2API proxyV2API = apiContainer.getProxyV2APIForTenant(tenantName);
+ EntityProperties properties = entityPropertiesFactoryMap.get(tenantName).get(entityType);
+ SenderTask senderTask;
+ switch (entityType) {
+ case POINT:
+ case DELTA_COUNTER:
+ senderTask =
+ new LineDelimitedSenderTask(
+ queue,
+ idx,
+ PUSH_FORMAT_WAVEFRONT,
+ proxyV2API,
+ proxyId,
+ properties,
+ buffer,
+ queueStats);
+ break;
+ case HISTOGRAM:
+ senderTask =
+ new LineDelimitedSenderTask(
+ queue,
+ idx,
+ PUSH_FORMAT_HISTOGRAM,
+ proxyV2API,
+ proxyId,
+ properties,
+ buffer,
+ queueStats);
+ break;
+ case SOURCE_TAG:
+ // In MONIT-25479, SOURCE_TAG does not support tag based multicasting. But still
+ // generated tasks for each tenant in case we have other multicasting mechanism
+ senderTask =
+ new SourceTagSenderTask(
+ queue,
+ idx,
+ apiContainer.getSourceTagAPIForTenant(tenantName),
+ properties,
+ buffer,
+ queueStats);
+ break;
+ case TRACE:
+ senderTask =
+ new LineDelimitedSenderTask(
+ queue,
+ idx,
+ PUSH_FORMAT_TRACING,
+ proxyV2API,
+ proxyId,
+ properties,
+ buffer,
+ queueStats);
+ break;
+ case TRACE_SPAN_LOGS:
+ // In MONIT-25479, TRACE_SPAN_LOGS does not support tag based multicasting. But still
+ // generated tasks for each tenant in case we have other multicasting mechanism
+ senderTask =
+ new LineDelimitedSenderTask(
+ queue,
+ idx,
+ PUSH_FORMAT_TRACING_SPAN_LOGS,
+ proxyV2API,
+ proxyId,
+ properties,
+ buffer,
+ queueStats);
+ break;
+ case EVENT:
+ senderTask =
+ new EventSenderTask(
+ queue,
+ idx,
+ apiContainer.getEventAPIForTenant(tenantName),
+ proxyId,
+ properties,
+ buffer,
+ queueStats);
+ break;
+ case LOGS:
+ senderTask =
+ new LogSenderTask(
+ queue,
+ idx,
+ apiContainer.getLogAPI(),
+ proxyId,
+ entityPropertiesFactoryMap.get(tenantName).get(entityType),
+ buffer,
+ queueStats);
+ break;
+ default:
+ throw new IllegalArgumentException(
+ "Unexpected entity type " + queue.getEntityType().name());
+ }
+ return senderTask;
+ }
+
+ private static class PriorityNamedThreadFactory implements ThreadFactory {
+ private final String threadNamePrefix;
+ private final AtomicInteger counter = new AtomicInteger();
+ private final int priority;
+
+ public PriorityNamedThreadFactory(@NotNull String threadNamePrefix, int priority) {
+ this.threadNamePrefix = threadNamePrefix;
+ this.priority = priority;
+ }
+
+ public Thread newThread(@NotNull Runnable r) {
+ Thread toReturn = new Thread(r);
+ toReturn.setName(this.threadNamePrefix + "-" + this.counter.getAndIncrement());
+ toReturn.setPriority(priority);
+ return toReturn;
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/core/senders/SourceTagSenderTask.java b/proxy/src/main/java/com/wavefront/agent/core/senders/SourceTagSenderTask.java
new file mode 100644
index 000000000..4ec26d38f
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/core/senders/SourceTagSenderTask.java
@@ -0,0 +1,120 @@
+package com.wavefront.agent.core.senders;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.wavefront.agent.core.buffers.Buffer;
+import com.wavefront.agent.core.buffers.BuffersManager;
+import com.wavefront.agent.core.queues.QueueInfo;
+import com.wavefront.agent.core.queues.QueueStats;
+import com.wavefront.agent.data.EntityProperties;
+import com.wavefront.api.SourceTagAPI;
+import com.wavefront.dto.SourceTag;
+import java.util.Iterator;
+import java.util.List;
+import javax.ws.rs.core.Response;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SourceTagSenderTask extends SenderTask {
+ private static final Logger log =
+ LoggerFactory.getLogger(SourceTagSenderTask.class.getCanonicalName());
+
+ private final QueueInfo queue;
+ private final SourceTagAPI proxyAPI;
+
+ SourceTagSenderTask(
+ QueueInfo queue,
+ int idx,
+ SourceTagAPI proxyAPI,
+ EntityProperties properties,
+ Buffer buffer,
+ QueueStats queueStats) {
+ super(queue, idx, properties, buffer, queueStats);
+ this.queue = queue;
+ this.proxyAPI = proxyAPI;
+ }
+
+ @Override
+ protected Response submit(List batch) {
+
+ ObjectMapper objectMapper = new ObjectMapper();
+
+ Iterator iterator = batch.iterator();
+ while (iterator.hasNext()) {
+ String sourceTagStr = iterator.next();
+ try {
+ SourceTag sourceTag = objectMapper.readValue(sourceTagStr, SourceTag.class);
+ Response res = doExecute(sourceTag);
+ if ((res.getStatus() / 100) != 2) {
+ // if there is a communication problem, we send back the point to the buffer
+ BuffersManager.sendMsg(queue, sourceTagStr);
+ iterator.forEachRemaining(s -> BuffersManager.sendMsg(queue, s));
+ }
+ } catch (JsonProcessingException e) {
+ log.error("Error parsing a SourceTag point. " + e);
+ }
+ }
+ return Response.ok().build();
+ }
+
+ private Response doExecute(SourceTag sourceTag) {
+ switch (sourceTag.getOperation()) {
+ case SOURCE_DESCRIPTION:
+ switch (sourceTag.getAction()) {
+ case DELETE:
+ Response resp = proxyAPI.removeDescription(sourceTag.getSource());
+ if (resp.getStatus() == 404) {
+ log.info(
+ "Attempting to delete description for "
+ + "a non-existent source "
+ + sourceTag.getSource()
+ + ", ignoring");
+ return Response.ok().build();
+ }
+ return resp;
+ case SAVE:
+ case ADD:
+ return proxyAPI.setDescription(
+ sourceTag.getSource(), sourceTag.getAnnotations().get(0));
+ default:
+ throw new IllegalArgumentException("Invalid acton: " + sourceTag.getAction());
+ }
+ case SOURCE_TAG:
+ switch (sourceTag.getAction()) {
+ case ADD:
+ String addTag = sourceTag.getAnnotations().get(0);
+ Response re = proxyAPI.appendTag(sourceTag.getSource(), addTag);
+ if (re.getStatus() == 404) {
+ log.info(
+ "Failed to add tag "
+ + addTag
+ + " for source "
+ + sourceTag.getSource()
+ + ", ignoring");
+ return Response.ok().build();
+ }
+ return re;
+ case DELETE:
+ String tag = sourceTag.getAnnotations().get(0);
+ Response resp = proxyAPI.removeTag(sourceTag.getSource(), tag);
+ if (resp.getStatus() == 404) {
+ log.info(
+ "Attempting to delete non-existing tag "
+ + tag
+ + " for source "
+ + sourceTag.getSource()
+ + ", ignoring");
+ return Response.ok().build();
+ }
+ return resp;
+ case SAVE:
+ return proxyAPI.setTags(sourceTag.getSource(), sourceTag.getAnnotations());
+ default:
+ throw new IllegalArgumentException("Invalid acton: " + sourceTag.getAction());
+ }
+ default:
+ throw new IllegalArgumentException(
+ "Invalid source tag operation: " + sourceTag.getOperation());
+ }
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/data/AbstractDataSubmissionTask.java b/proxy/src/main/java/com/wavefront/agent/data/AbstractDataSubmissionTask.java
deleted file mode 100644
index be644beaf..000000000
--- a/proxy/src/main/java/com/wavefront/agent/data/AbstractDataSubmissionTask.java
+++ /dev/null
@@ -1,294 +0,0 @@
-package com.wavefront.agent.data;
-
-import static com.wavefront.common.Utils.isWavefrontResponse;
-import static java.lang.Boolean.TRUE;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.MoreObjects;
-import com.google.common.base.Throwables;
-import com.wavefront.agent.queueing.TaskQueue;
-import com.wavefront.common.TaggedMetricName;
-import com.wavefront.common.logger.MessageDedupingLogger;
-import com.wavefront.data.ReportableEntityType;
-import com.yammer.metrics.Metrics;
-import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.core.MetricName;
-import com.yammer.metrics.core.TimerContext;
-import java.io.IOException;
-import java.net.ConnectException;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Supplier;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-import javax.annotation.Nullable;
-import javax.net.ssl.SSLHandshakeException;
-import javax.ws.rs.ProcessingException;
-import javax.ws.rs.core.Response;
-
-/**
- * A base class for data submission tasks.
- *
- * @param task type
- * @author vasily@wavefront.com.
- */
-@JsonInclude(JsonInclude.Include.NON_NULL)
-@JsonIgnoreProperties(ignoreUnknown = true)
-abstract class AbstractDataSubmissionTask>
- implements DataSubmissionTask {
- private static final int MAX_RETRIES = 15;
- private static final Logger log =
- new MessageDedupingLogger(
- Logger.getLogger(AbstractDataSubmissionTask.class.getCanonicalName()), 1000, 1);
-
- @JsonProperty protected long enqueuedTimeMillis = Long.MAX_VALUE;
- @JsonProperty protected int attempts = 0;
- @JsonProperty protected int serverErrors = 0;
- @JsonProperty protected String handle;
- @JsonProperty protected ReportableEntityType entityType;
- @JsonProperty protected Boolean limitRetries = null;
-
- protected transient Histogram timeSpentInQueue;
- protected transient Supplier timeProvider;
- protected transient EntityProperties properties;
- protected transient TaskQueue backlog;
-
- AbstractDataSubmissionTask() {}
-
- /**
- * @param properties entity-specific wrapper for runtime properties.
- * @param backlog backing queue.
- * @param handle port/handle
- * @param entityType entity type
- * @param timeProvider time provider (in millis)
- */
- AbstractDataSubmissionTask(
- EntityProperties properties,
- TaskQueue backlog,
- String handle,
- ReportableEntityType entityType,
- @Nullable Supplier timeProvider) {
- this.properties = properties;
- this.backlog = backlog;
- this.handle = handle;
- this.entityType = entityType;
- this.timeProvider = MoreObjects.firstNonNull(timeProvider, System::currentTimeMillis);
- }
-
- @Override
- public long getEnqueuedMillis() {
- return enqueuedTimeMillis;
- }
-
- @Override
- public ReportableEntityType getEntityType() {
- return entityType;
- }
-
- abstract Response doExecute() throws DataSubmissionException;
-
- public TaskResult execute() {
- if (enqueuedTimeMillis < Long.MAX_VALUE) {
- if (timeSpentInQueue == null) {
- timeSpentInQueue =
- Metrics.newHistogram(
- new TaggedMetricName(
- "buffer", "queue-time", "port", handle, "content", entityType.toString()));
- }
- timeSpentInQueue.update(timeProvider.get() - enqueuedTimeMillis);
- }
- attempts += 1;
- TimerContext timer =
- Metrics.newTimer(
- new MetricName("push." + handle, "", "duration"),
- TimeUnit.MILLISECONDS,
- TimeUnit.MINUTES)
- .time();
- try (Response response = doExecute()) {
- Metrics.newCounter(
- new TaggedMetricName("push", handle + ".http." + response.getStatus() + ".count"))
- .inc();
- if (response.getStatus() >= 200 && response.getStatus() < 300) {
- Metrics.newCounter(new MetricName(entityType + "." + handle, "", "delivered"))
- .inc(this.weight());
- return TaskResult.DELIVERED;
- }
- switch (response.getStatus()) {
- case 406:
- case 429:
- return handleStatus429();
- case 401:
- case 403:
- log.warning(
- "["
- + handle
- + "] HTTP "
- + response.getStatus()
- + ": "
- + "Please verify that \""
- + entityType
- + "\" is enabled for your account!");
- return checkStatusAndQueue(QueueingReason.AUTH, false);
- case 407:
- case 408:
- if (isWavefrontResponse(response)) {
- log.warning(
- "["
- + handle
- + "] HTTP "
- + response.getStatus()
- + " (Unregistered proxy) "
- + "received while sending data to Wavefront - please verify that your token is "
- + "valid and has Proxy Management permissions!");
- } else {
- log.warning(
- "["
- + handle
- + "] HTTP "
- + response.getStatus()
- + " "
- + "received while sending data to Wavefront - please verify your network/HTTP proxy"
- + " settings!");
- }
- return checkStatusAndQueue(QueueingReason.RETRY, false);
- case 413:
- splitTask(1, properties.getDataPerBatch())
- .forEach(
- x ->
- x.enqueue(
- enqueuedTimeMillis == Long.MAX_VALUE ? QueueingReason.SPLIT : null));
- return TaskResult.PERSISTED_RETRY;
- default:
- serverErrors += 1;
- if (serverErrors > MAX_RETRIES && TRUE.equals(limitRetries)) {
- log.info(
- "["
- + handle
- + "] HTTP "
- + response.getStatus()
- + " received while sending "
- + "data to Wavefront, max retries reached");
- return TaskResult.DELIVERED;
- } else {
- log.info(
- "["
- + handle
- + "] HTTP "
- + response.getStatus()
- + " received while sending "
- + "data to Wavefront, retrying");
- return checkStatusAndQueue(QueueingReason.RETRY, true);
- }
- }
- } catch (DataSubmissionException ex) {
- if (ex instanceof IgnoreStatusCodeException) {
- Metrics.newCounter(new TaggedMetricName("push", handle + ".http.404.count")).inc();
- Metrics.newCounter(new MetricName(entityType + "." + handle, "", "delivered"))
- .inc(this.weight());
- return TaskResult.DELIVERED;
- }
- throw new RuntimeException("Unhandled DataSubmissionException", ex);
- } catch (ProcessingException ex) {
- Throwable rootCause = Throwables.getRootCause(ex);
- if (rootCause instanceof UnknownHostException) {
- log.warning(
- "["
- + handle
- + "] Error sending data to Wavefront: Unknown host "
- + rootCause.getMessage()
- + ", please check your network!");
- } else if (rootCause instanceof ConnectException
- || rootCause instanceof SocketTimeoutException) {
- log.warning(
- "["
- + handle
- + "] Error sending data to Wavefront: "
- + rootCause.getMessage()
- + ", please verify your network/HTTP proxy settings!");
- } else if (ex.getCause() instanceof SSLHandshakeException) {
- log.warning(
- "["
- + handle
- + "] Error sending data to Wavefront: "
- + ex.getCause()
- + ", please verify that your environment has up-to-date root certificates!");
- } else {
- log.warning("[" + handle + "] Error sending data to Wavefront: " + rootCause);
- }
- if (log.isLoggable(Level.FINE)) {
- log.log(Level.FINE, "Full stacktrace: ", ex);
- }
- return checkStatusAndQueue(QueueingReason.RETRY, false);
- } catch (Exception ex) {
- log.warning(
- "[" + handle + "] Error sending data to Wavefront: " + Throwables.getRootCause(ex));
- if (log.isLoggable(Level.FINE)) {
- log.log(Level.FINE, "Full stacktrace: ", ex);
- }
- return checkStatusAndQueue(QueueingReason.RETRY, true);
- } finally {
- timer.stop();
- }
- }
-
- @SuppressWarnings("unchecked")
- @Override
- public void enqueue(@Nullable QueueingReason reason) {
- enqueuedTimeMillis = timeProvider.get();
- try {
- backlog.add((T) this);
- if (reason != null) {
- Metrics.newCounter(
- new TaggedMetricName(
- entityType + "." + handle, "queued", "reason", reason.toString()))
- .inc(this.weight());
- }
- } catch (IOException e) {
- Metrics.newCounter(new TaggedMetricName("buffer", "failures", "port", handle)).inc();
- log.severe(
- "["
- + handle
- + "] CRITICAL (Losing data): WF-1: Error adding task to the queue: "
- + e.getMessage());
- }
- }
-
- private TaskResult checkStatusAndQueue(QueueingReason reason, boolean requeue) {
- if (reason == QueueingReason.AUTH) return TaskResult.REMOVED;
- if (enqueuedTimeMillis == Long.MAX_VALUE) {
- if (properties.getTaskQueueLevel().isLessThan(TaskQueueLevel.ANY_ERROR)) {
- return TaskResult.RETRY_LATER;
- }
- enqueue(reason);
- return TaskResult.PERSISTED;
- }
- if (requeue) {
- enqueue(null);
- return TaskResult.PERSISTED_RETRY;
- } else {
- return TaskResult.RETRY_LATER;
- }
- }
-
- protected TaskResult handleStatus429() {
- if (enqueuedTimeMillis == Long.MAX_VALUE) {
- if (properties.getTaskQueueLevel().isLessThan(TaskQueueLevel.PUSHBACK)) {
- return TaskResult.RETRY_LATER;
- }
- enqueue(QueueingReason.PUSHBACK);
- return TaskResult.PERSISTED;
- }
- if (properties.isSplitPushWhenRateLimited()) {
- List splitTasks =
- splitTask(properties.getMinBatchSplitSize(), properties.getDataPerBatch());
- if (splitTasks.size() == 1) return TaskResult.RETRY_LATER;
- splitTasks.forEach(x -> x.enqueue(null));
- return TaskResult.PERSISTED;
- }
- return TaskResult.RETRY_LATER;
- }
-}
diff --git a/proxy/src/main/java/com/wavefront/agent/data/DataSubmissionException.java b/proxy/src/main/java/com/wavefront/agent/data/DataSubmissionException.java
deleted file mode 100644
index 5fa2f3e38..000000000
--- a/proxy/src/main/java/com/wavefront/agent/data/DataSubmissionException.java
+++ /dev/null
@@ -1,12 +0,0 @@
-package com.wavefront.agent.data;
-
-/**
- * Exception to bypass standard handling for response status codes.
- *
- * @author vasily@wavefront.com
- */
-public abstract class DataSubmissionException extends Exception {
- public DataSubmissionException(String message) {
- super(message);
- }
-}
diff --git a/proxy/src/main/java/com/wavefront/agent/data/DataSubmissionTask.java b/proxy/src/main/java/com/wavefront/agent/data/DataSubmissionTask.java
deleted file mode 100644
index 9e4a0a1a4..000000000
--- a/proxy/src/main/java/com/wavefront/agent/data/DataSubmissionTask.java
+++ /dev/null
@@ -1,61 +0,0 @@
-package com.wavefront.agent.data;
-
-import com.fasterxml.jackson.annotation.JsonTypeInfo;
-import com.wavefront.data.ReportableEntityType;
-import java.io.Serializable;
-import java.util.List;
-import javax.annotation.Nullable;
-
-/**
- * A serializable data submission task.
- *
- * @param task type
- * @author vasily@wavefront.com
- */
-@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, property = "__CLASS")
-public interface DataSubmissionTask> extends Serializable {
-
- /**
- * Returns a task weight.
- *
- * @return task weight
- */
- int weight();
-
- /**
- * Returns task enqueue time in milliseconds.
- *
- * @return enqueue time in milliseconds
- */
- long getEnqueuedMillis();
-
- /**
- * Execute this task
- *
- * @return operation result
- */
- TaskResult execute();
-
- /**
- * Persist task in the queue
- *
- * @param reason reason for queueing. used to increment metrics, if specified.
- */
- void enqueue(@Nullable QueueingReason reason);
-
- /**
- * Returns entity type handled.
- *
- * @return entity type
- */
- ReportableEntityType getEntityType();
-
- /**
- * Split the task into smaller tasks.
- *
- * @param minSplitSize Don't split the task if its weight is smaller than this number.
- * @param maxSplitSize Split tasks size cap.
- * @return tasks
- */
- List splitTask(int minSplitSize, int maxSplitSize);
-}
diff --git a/proxy/src/main/java/com/wavefront/agent/data/EntityProperties.java b/proxy/src/main/java/com/wavefront/agent/data/EntityProperties.java
index 6d60f81cc..9a072ef33 100644
--- a/proxy/src/main/java/com/wavefront/agent/data/EntityProperties.java
+++ b/proxy/src/main/java/com/wavefront/agent/data/EntityProperties.java
@@ -3,11 +3,7 @@
import com.google.common.util.concurrent.RecyclableRateLimiter;
import javax.annotation.Nullable;
-/**
- * Unified interface for dynamic entity-specific dynamic properties, that may change at runtime
- *
- * @author vasily@wavefront.com
- */
+/** Unified interface for dynamic entity-specific dynamic properties, that may change at runtime */
public interface EntityProperties {
// what we consider "unlimited"
int NO_RATE_LIMIT = 10_000_000;
@@ -40,13 +36,6 @@ public interface EntityProperties {
*/
int getDataPerBatchOriginal();
- /**
- * Whether we should split batches into smaller ones after getting HTTP 406 response from server.
- *
- * @return true if we should split on pushback
- */
- boolean isSplitPushWhenRateLimited();
-
/**
* Get initially configured rate limit (per second).
*
@@ -66,7 +55,7 @@ public interface EntityProperties {
*
* @return rate limiter
*/
- RecyclableRateLimiter getRateLimiter();
+ EntityRateLimiter getRateLimiter();
/**
* Get the number of worker threads.
@@ -96,32 +85,6 @@ public interface EntityProperties {
*/
void setDataPerBatch(@Nullable Integer dataPerBatch);
- /**
- * Do not split the batch if its size is less than this value. Only applicable when {@link
- * #isSplitPushWhenRateLimited()} is true.
- *
- * @return smallest allowed batch size
- */
- int getMinBatchSplitSize();
-
- /**
- * Max number of items that can stay in memory buffers before spooling to disk. Defaults to 16 *
- * {@link #getDataPerBatch()}, minimum size: {@link #getDataPerBatch()}. Setting this value lower
- * than default reduces memory usage, but will force the proxy to spool to disk more frequently if
- * you have points arriving at the proxy in short bursts, and/or your network latency is on the
- * higher side.
- *
- * @return memory buffer limit
- */
- int getMemoryBufferLimit();
-
- /**
- * Get current queueing behavior - defines conditions that trigger queueing.
- *
- * @return queueing behavior level
- */
- TaskQueueLevel getTaskQueueLevel();
-
/**
* Checks whether data flow for this entity type is disabled.
*
@@ -135,24 +98,4 @@ public interface EntityProperties {
* @param featureDisabled if "true", data flow for this entity type is disabled.
*/
void setFeatureDisabled(boolean featureDisabled);
-
- /**
- * Get aggregated backlog size across all ports for this entity type.
- *
- * @return backlog size
- */
- int getTotalBacklogSize();
-
- /** Updates backlog size for specific port. */
- void reportBacklogSize(String handle, int backlogSize);
-
- /**
- * Get aggregated received rate across all ports for this entity type.
- *
- * @return received rate
- */
- long getTotalReceivedRate();
-
- /** Updates received rate for specific port. */
- void reportReceivedRate(String handle, long receivedRate);
}
diff --git a/proxy/src/main/java/com/wavefront/agent/data/EntityPropertiesFactory.java b/proxy/src/main/java/com/wavefront/agent/data/EntityPropertiesFactory.java
index 2597000e7..8d052852c 100644
--- a/proxy/src/main/java/com/wavefront/agent/data/EntityPropertiesFactory.java
+++ b/proxy/src/main/java/com/wavefront/agent/data/EntityPropertiesFactory.java
@@ -2,11 +2,7 @@
import com.wavefront.data.ReportableEntityType;
-/**
- * Generates entity-specific wrappers for dynamic proxy settings.
- *
- * @author vasily@wavefront.com
- */
+/** Generates entity-specific wrappers for dynamic proxy settings. */
public interface EntityPropertiesFactory {
/**
diff --git a/proxy/src/main/java/com/wavefront/agent/data/EntityPropertiesFactoryImpl.java b/proxy/src/main/java/com/wavefront/agent/data/EntityPropertiesFactoryImpl.java
index 245504842..7d72845c1 100644
--- a/proxy/src/main/java/com/wavefront/agent/data/EntityPropertiesFactoryImpl.java
+++ b/proxy/src/main/java/com/wavefront/agent/data/EntityPropertiesFactoryImpl.java
@@ -6,9 +6,6 @@
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.RecyclableRateLimiter;
-import com.google.common.util.concurrent.RecyclableRateLimiterImpl;
-import com.google.common.util.concurrent.RecyclableRateLimiterWithMetrics;
import com.wavefront.agent.ProxyConfig;
import com.wavefront.data.ReportableEntityType;
import java.util.Map;
@@ -17,17 +14,15 @@
import java.util.concurrent.atomic.AtomicLong;
import javax.annotation.Nullable;
-/**
- * Generates entity-specific wrappers for dynamic proxy settings.
- *
- * @author vasily@wavefront.com
- */
+/** Generates entity-specific wrappers for dynamic proxy settings. */
public class EntityPropertiesFactoryImpl implements EntityPropertiesFactory {
private final Map wrappers;
private final GlobalProperties global;
- /** @param proxyConfig proxy settings container */
+ /**
+ * @param proxyConfig proxy settings container
+ */
public EntityPropertiesFactoryImpl(ProxyConfig proxyConfig) {
global = new GlobalPropertiesImpl(proxyConfig);
EntityProperties pointProperties = new PointsProperties(proxyConfig);
@@ -56,24 +51,25 @@ public GlobalProperties getGlobalProperties() {
/** Common base for all wrappers (to avoid code duplication) */
private abstract static class AbstractEntityProperties implements EntityProperties {
- private Integer dataPerBatch = null;
protected final ProxyConfig wrapped;
- private final RecyclableRateLimiter rateLimiter;
+ private final EntityRateLimiter rateLimiter;
private final LoadingCache backlogSizeCache =
Caffeine.newBuilder()
.expireAfterAccess(10, TimeUnit.SECONDS)
.build(x -> new AtomicInteger());
private final LoadingCache receivedRateCache =
Caffeine.newBuilder().expireAfterAccess(10, TimeUnit.SECONDS).build(x -> new AtomicLong());
+ private Integer dataPerBatch = null;
public AbstractEntityProperties(ProxyConfig wrapped) {
this.wrapped = wrapped;
- this.rateLimiter =
- getRateLimit() > 0
- ? new RecyclableRateLimiterWithMetrics(
- RecyclableRateLimiterImpl.create(getRateLimit(), getRateLimitMaxBurstSeconds()),
- getRateLimiterName())
- : null;
+ // this.rateLimiter = new RecyclableRateLimiterWithMetrics(
+ // RecyclableRateLimiterImpl.create(getRateLimit(),
+ // getRateLimitMaxBurstSeconds()),
+ // getRateLimiterName());
+ rateLimiter =
+ new EntityRateLimiter(
+ getRateLimit(), getRateLimitMaxBurstSeconds(), getRateLimiterName());
reportSettingAsGauge(this::getPushFlushInterval, "dynamic.pushFlushInterval");
}
@@ -88,18 +84,13 @@ public void setDataPerBatch(@Nullable Integer dataPerBatch) {
this.dataPerBatch = dataPerBatch;
}
- @Override
- public boolean isSplitPushWhenRateLimited() {
- return wrapped.isSplitPushWhenRateLimited();
- }
-
@Override
public int getRateLimitMaxBurstSeconds() {
return wrapped.getPushRateLimitMaxBurstSeconds();
}
@Override
- public RecyclableRateLimiter getRateLimiter() {
+ public EntityRateLimiter getRateLimiter() {
return rateLimiter;
}
@@ -114,41 +105,6 @@ public int getFlushThreads() {
public int getPushFlushInterval() {
return wrapped.getPushFlushInterval();
}
-
- @Override
- public int getMinBatchSplitSize() {
- return DEFAULT_MIN_SPLIT_BATCH_SIZE;
- }
-
- @Override
- public int getMemoryBufferLimit() {
- return wrapped.getPushMemoryBufferLimit();
- }
-
- @Override
- public TaskQueueLevel getTaskQueueLevel() {
- return wrapped.getTaskQueueLevel();
- }
-
- @Override
- public int getTotalBacklogSize() {
- return backlogSizeCache.asMap().values().stream().mapToInt(AtomicInteger::get).sum();
- }
-
- @Override
- public void reportBacklogSize(String handle, int backlogSize) {
- backlogSizeCache.get(handle).set(backlogSize);
- }
-
- @Override
- public long getTotalReceivedRate() {
- return receivedRateCache.asMap().values().stream().mapToLong(AtomicLong::get).sum();
- }
-
- @Override
- public void reportReceivedRate(String handle, long receivedRate) {
- receivedRateCache.get(handle).set(receivedRate);
- }
}
/** Base class for entity types that do not require separate subscriptions. */
@@ -195,7 +151,6 @@ private static final class PointsProperties extends CoreEntityProperties {
public PointsProperties(ProxyConfig wrapped) {
super(wrapped);
reportSettingAsGauge(this::getDataPerBatch, "dynamic.pushFlushMaxPoints");
- reportSettingAsGauge(this::getMemoryBufferLimit, "dynamic.pushMemoryBufferLimit");
}
@Override
@@ -219,7 +174,6 @@ private static final class HistogramsProperties extends SubscriptionBasedEntityP
public HistogramsProperties(ProxyConfig wrapped) {
super(wrapped);
reportSettingAsGauge(this::getDataPerBatch, "dynamic.pushFlushMaxHistograms");
- reportSettingAsGauge(this::getMemoryBufferLimit, "dynamic.pushMemoryBufferLimit");
}
@Override
@@ -243,7 +197,6 @@ private static final class SourceTagsProperties extends CoreEntityProperties {
public SourceTagsProperties(ProxyConfig wrapped) {
super(wrapped);
reportSettingAsGauge(this::getDataPerBatch, "dynamic.pushFlushMaxSourceTags");
- reportSettingAsGauge(this::getMemoryBufferLimit, "dynamic.pushMemoryBufferLimitSourceTags");
}
@Override
@@ -261,11 +214,6 @@ public double getRateLimit() {
return wrapped.getPushRateLimitSourceTags();
}
- @Override
- public int getMemoryBufferLimit() {
- return 16 * wrapped.getPushFlushMaxSourceTags();
- }
-
@Override
public int getFlushThreads() {
return wrapped.getFlushThreadsSourceTags();
@@ -277,7 +225,6 @@ private static final class SpansProperties extends SubscriptionBasedEntityProper
public SpansProperties(ProxyConfig wrapped) {
super(wrapped);
reportSettingAsGauge(this::getDataPerBatch, "dynamic.pushFlushMaxSpans");
- reportSettingAsGauge(this::getMemoryBufferLimit, "dynamic.pushMemoryBufferLimit");
}
@Override
@@ -301,7 +248,6 @@ private static final class SpanLogsProperties extends SubscriptionBasedEntityPro
public SpanLogsProperties(ProxyConfig wrapped) {
super(wrapped);
reportSettingAsGauge(this::getDataPerBatch, "dynamic.pushFlushMaxSpanLogs");
- reportSettingAsGauge(this::getMemoryBufferLimit, "dynamic.pushMemoryBufferLimit");
}
@Override
@@ -325,7 +271,6 @@ private static final class EventsProperties extends CoreEntityProperties {
public EventsProperties(ProxyConfig wrapped) {
super(wrapped);
reportSettingAsGauge(this::getDataPerBatch, "dynamic.pushFlushMaxEvents");
- reportSettingAsGauge(this::getMemoryBufferLimit, "dynamic.pushMemoryBufferLimitEvents");
}
@Override
@@ -343,11 +288,6 @@ public double getRateLimit() {
return wrapped.getPushRateLimitEvents();
}
- @Override
- public int getMemoryBufferLimit() {
- return 16 * wrapped.getPushFlushMaxEvents();
- }
-
@Override
public int getFlushThreads() {
return wrapped.getFlushThreadsEvents();
@@ -359,7 +299,6 @@ private static final class LogsProperties extends SubscriptionBasedEntityPropert
public LogsProperties(ProxyConfig wrapped) {
super(wrapped);
reportSettingAsGauge(this::getDataPerBatch, "dynamic.pushFlushMaxLogs");
- reportSettingAsGauge(this::getMemoryBufferLimit, "dynamic.pushMemoryBufferLimitLogs");
}
@Override
@@ -372,11 +311,6 @@ public int getDataPerBatchOriginal() {
return wrapped.getPushFlushMaxLogs();
}
- @Override
- public int getMemoryBufferLimit() {
- return wrapped.getPushMemoryBufferLimitLogs();
- }
-
@Override
public double getRateLimit() {
return wrapped.getPushRateLimitLogs();
diff --git a/proxy/src/main/java/com/wavefront/agent/data/EntityRateLimiter.java b/proxy/src/main/java/com/wavefront/agent/data/EntityRateLimiter.java
new file mode 100644
index 000000000..49246fab1
--- /dev/null
+++ b/proxy/src/main/java/com/wavefront/agent/data/EntityRateLimiter.java
@@ -0,0 +1,54 @@
+package com.wavefront.agent.data;
+
+import static java.util.concurrent.TimeUnit.MINUTES;
+
+import com.google.common.util.concurrent.RecyclableRateLimiterImpl;
+import com.google.common.util.concurrent.RecyclableRateLimiterWithMetrics;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class EntityRateLimiter {
+ private Logger log = LoggerFactory.getLogger(this.getClass().getCanonicalName());
+
+ private final RecyclableRateLimiterWithMetrics pointsLimit;
+ private AtomicBoolean paused = new AtomicBoolean(false);
+
+ public EntityRateLimiter() {
+ this(Double.MAX_VALUE, Integer.MAX_VALUE, "unlimited");
+ }
+
+ public EntityRateLimiter(double rateLimit, int rateLimitMaxBurstSeconds, String prefix) {
+ pointsLimit =
+ new RecyclableRateLimiterWithMetrics(
+ RecyclableRateLimiterImpl.create(rateLimit, rateLimitMaxBurstSeconds), prefix);
+ }
+
+ public void pause() {
+ if (!paused.get()) {
+ paused.set(true);
+ try {
+ Thread.sleep(MINUTES.toMillis(1));
+ paused.set(false);
+ } catch (InterruptedException e) {
+ log.error("error", e);
+ paused.set(false);
+ }
+ }
+ }
+
+ public void setRate(double rate) {
+ pointsLimit.setRate(rate);
+ }
+
+ public double getRate() {
+ return pointsLimit.getRate();
+ }
+
+ public boolean tryAcquire(int points) {
+ if (!paused.get()) {
+ return pointsLimit.tryAcquire(points);
+ }
+ return false;
+ }
+}
diff --git a/proxy/src/main/java/com/wavefront/agent/data/EventDataSubmissionTask.java b/proxy/src/main/java/com/wavefront/agent/data/EventDataSubmissionTask.java
deleted file mode 100644
index 5a9c13a32..000000000
--- a/proxy/src/main/java/com/wavefront/agent/data/EventDataSubmissionTask.java
+++ /dev/null
@@ -1,105 +0,0 @@
-package com.wavefront.agent.data;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonTypeInfo;
-import com.google.common.collect.ImmutableList;
-import com.wavefront.agent.queueing.TaskQueue;
-import com.wavefront.api.EventAPI;
-import com.wavefront.data.ReportableEntityType;
-import com.wavefront.dto.Event;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-import java.util.function.Supplier;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import javax.ws.rs.core.Response;
-
-/**
- * A {@link DataSubmissionTask} that handles event payloads.
- *
- * @author vasily@wavefront.com
- */
-@JsonIgnoreProperties(ignoreUnknown = true)
-@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, property = "__CLASS")
-public class EventDataSubmissionTask extends AbstractDataSubmissionTask {
- private transient EventAPI api;
- private transient UUID proxyId;
-
- @JsonProperty private List events;
-
- @SuppressWarnings("unused")
- EventDataSubmissionTask() {}
-
- /**
- * @param api API endpoint.
- * @param proxyId Proxy identifier. Used to authenticate proxy with the API.
- * @param properties entity-specific wrapper over mutable proxy settings' container.
- * @param backlog task queue.
- * @param handle Handle (usually port number) of the pipeline where the data came from.
- * @param events Data payload.
- * @param timeProvider Time provider (in millis).
- */
- public EventDataSubmissionTask(
- EventAPI api,
- UUID proxyId,
- EntityProperties properties,
- TaskQueue backlog,
- String handle,
- @Nonnull List events,
- @Nullable Supplier timeProvider) {
- super(properties, backlog, handle, ReportableEntityType.EVENT, timeProvider);
- this.api = api;
- this.proxyId = proxyId;
- this.events = new ArrayList<>(events);
- }
-
- @Override
- public Response doExecute() {
- return api.proxyEvents(proxyId, events);
- }
-
- public List splitTask(int minSplitSize, int maxSplitSize) {
- if (events.size() > Math.max(1, minSplitSize)) {
- List result = new ArrayList<>();
- int stride = Math.min(maxSplitSize, (int) Math.ceil((float) events.size() / 2.0));
- int endingIndex = 0;
- for (int startingIndex = 0; endingIndex < events.size() - 1; startingIndex += stride) {
- endingIndex = Math.min(events.size(), startingIndex + stride) - 1;
- result.add(
- new EventDataSubmissionTask(
- api,
- proxyId,
- properties,
- backlog,
- handle,
- events.subList(startingIndex, endingIndex + 1),
- timeProvider));
- }
- return result;
- }
- return ImmutableList.of(this);
- }
-
- public List payload() {
- return events;
- }
-
- @Override
- public int weight() {
- return events.size();
- }
-
- public void injectMembers(
- EventAPI api,
- UUID proxyId,
- EntityProperties properties,
- TaskQueue backlog) {
- this.api = api;
- this.proxyId = proxyId;
- this.properties = properties;
- this.backlog = backlog;
- this.timeProvider = System::currentTimeMillis;
- }
-}
diff --git a/proxy/src/main/java/com/wavefront/agent/data/GlobalProperties.java b/proxy/src/main/java/com/wavefront/agent/data/GlobalProperties.java
index 05ef682b7..bcb09136a 100644
--- a/proxy/src/main/java/com/wavefront/agent/data/GlobalProperties.java
+++ b/proxy/src/main/java/com/wavefront/agent/data/GlobalProperties.java
@@ -4,26 +4,8 @@
import java.util.List;
import javax.annotation.Nullable;
-/**
- * Unified interface for non-entity specific dynamic properties, that may change at runtime.
- *
- * @author vasily@wavefront.com
- */
+/** Unified interface for non-entity specific dynamic properties, that may change at runtime. */
public interface GlobalProperties {
- /**
- * Get base in seconds for retry thread exponential backoff.
- *
- * @return exponential backoff base value
- */
- double getRetryBackoffBaseSeconds();
-
- /**
- * Sets base in seconds for retry thread exponential backoff.
- *
- * @param retryBackoffBaseSeconds new value for exponential backoff base value. if null is
- * provided, reverts to originally configured value.
- */
- void setRetryBackoffBaseSeconds(@Nullable Double retryBackoffBaseSeconds);
/**
* Get histogram storage accuracy, as specified by the back-end.
diff --git a/proxy/src/main/java/com/wavefront/agent/data/GlobalPropertiesImpl.java b/proxy/src/main/java/com/wavefront/agent/data/GlobalPropertiesImpl.java
index 2c2bf8185..f25715481 100644
--- a/proxy/src/main/java/com/wavefront/agent/data/GlobalPropertiesImpl.java
+++ b/proxy/src/main/java/com/wavefront/agent/data/GlobalPropertiesImpl.java
@@ -1,18 +1,11 @@
package com.wavefront.agent.data;
-import static com.wavefront.agent.config.ReportableConfig.reportSettingAsGauge;
-import static org.apache.commons.lang3.ObjectUtils.firstNonNull;
-
import com.wavefront.agent.ProxyConfig;
import com.wavefront.api.agent.SpanSamplingPolicy;
import java.util.List;
import javax.annotation.Nullable;
-/**
- * Dynamic non-entity specific properties, that may change at runtime.
- *
- * @author vasily@wavefront.com
- */
+/** Dynamic non-entity specific properties, that may change at runtime. */
public final class GlobalPropertiesImpl implements GlobalProperties {
private final ProxyConfig wrapped;
private Double retryBackoffBaseSeconds = null;
@@ -23,17 +16,6 @@ public final class GlobalPropertiesImpl implements GlobalProperties {
public GlobalPropertiesImpl(ProxyConfig wrapped) {
this.wrapped = wrapped;
- reportSettingAsGauge(this::getRetryBackoffBaseSeconds, "dynamic.retryBackoffBaseSeconds");
- }
-
- @Override
- public double getRetryBackoffBaseSeconds() {
- return firstNonNull(retryBackoffBaseSeconds, wrapped.getRetryBackoffBaseSeconds());
- }
-
- @Override
- public void setRetryBackoffBaseSeconds(@Nullable Double retryBackoffBaseSeconds) {
- this.retryBackoffBaseSeconds = retryBackoffBaseSeconds;
}
@Override
diff --git a/proxy/src/main/java/com/wavefront/agent/data/IgnoreStatusCodeException.java b/proxy/src/main/java/com/wavefront/agent/data/IgnoreStatusCodeException.java
deleted file mode 100644
index 4661d0596..000000000
--- a/proxy/src/main/java/com/wavefront/agent/data/IgnoreStatusCodeException.java
+++ /dev/null
@@ -1,12 +0,0 @@
-package com.wavefront.agent.data;
-
-/**
- * Exception used to ignore 404s for DELETE API calls for sourceTags.
- *
- * @author vasily@wavefront.com
- */
-public class IgnoreStatusCodeException extends DataSubmissionException {
- public IgnoreStatusCodeException(String message) {
- super(message);
- }
-}
diff --git a/proxy/src/main/java/com/wavefront/agent/data/LineDelimitedDataSubmissionTask.java b/proxy/src/main/java/com/wavefront/agent/data/LineDelimitedDataSubmissionTask.java
deleted file mode 100644
index 58b2a0f5c..000000000
--- a/proxy/src/main/java/com/wavefront/agent/data/LineDelimitedDataSubmissionTask.java
+++ /dev/null
@@ -1,117 +0,0 @@
-package com.wavefront.agent.data;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonTypeInfo;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableList;
-import com.wavefront.agent.handlers.LineDelimitedUtils;
-import com.wavefront.agent.queueing.TaskQueue;
-import com.wavefront.api.ProxyV2API;
-import com.wavefront.data.ReportableEntityType;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-import java.util.function.Supplier;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import javax.ws.rs.core.Response;
-
-/**
- * A {@link DataSubmissionTask} that handles plaintext payloads in the newline-delimited format.
- *
- * @author vasily@wavefront.com
- */
-@JsonIgnoreProperties(ignoreUnknown = true)
-@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, property = "__CLASS")
-public class LineDelimitedDataSubmissionTask
- extends AbstractDataSubmissionTask {
-
- private transient ProxyV2API api;
- private transient UUID proxyId;
-
- @JsonProperty private String format;
- @VisibleForTesting @JsonProperty protected List payload;
-
- @SuppressWarnings("unused")
- LineDelimitedDataSubmissionTask() {}
-
- /**
- * @param api API endpoint
- * @param proxyId Proxy identifier. Used to authenticate proxy with the API.
- * @param properties entity-specific wrapper over mutable proxy settings' container.
- * @param backlog task queue.
- * @param format Data format (passed as an argument to the API)
- * @param entityType Entity type handled
- * @param handle Handle (usually port number) of the pipeline where the data came from.
- * @param payload Data payload
- * @param timeProvider Time provider (in millis)
- */
- public LineDelimitedDataSubmissionTask(
- ProxyV2API api,
- UUID proxyId,
- EntityProperties properties,
- TaskQueue backlog,
- String format,
- ReportableEntityType entityType,
- String handle,
- @Nonnull List payload,
- @Nullable Supplier timeProvider) {
- super(properties, backlog, handle, entityType, timeProvider);
- this.api = api;
- this.proxyId = proxyId;
- this.format = format;
- this.payload = new ArrayList<>(payload);
- }
-
- @Override
- Response doExecute() {
- return api.proxyReport(proxyId, format, LineDelimitedUtils.joinPushData(payload));
- }
-
- @Override
- public int weight() {
- return this.payload.size();
- }
-
- @Override
- public List splitTask(int minSplitSize, int maxSplitSize) {
- if (payload.size() > Math.max(1, minSplitSize)) {
- List result = new ArrayList<>();
- int stride = Math.min(maxSplitSize, (int) Math.ceil((float) payload.size() / 2.0));
- int endingIndex = 0;
- for (int startingIndex = 0; endingIndex < payload.size() - 1; startingIndex += stride) {
- endingIndex = Math.min(payload.size(), startingIndex + stride) - 1;
- result.add(
- new LineDelimitedDataSubmissionTask(
- api,
- proxyId,
- properties,
- backlog,
- format,
- getEntityType(),
- handle,
- payload.subList(startingIndex, endingIndex + 1),
- timeProvider));
- }
- return result;
- }
- return ImmutableList.of(this);
- }
-
- public List payload() {
- return payload;
- }
-
- public void injectMembers(
- ProxyV2API api,
- UUID proxyId,
- EntityProperties properties,
- TaskQueue backlog) {
- this.api = api;
- this.proxyId = proxyId;
- this.properties = properties;
- this.backlog = backlog;
- this.timeProvider = System::currentTimeMillis;
- }
-}
diff --git a/proxy/src/main/java/com/wavefront/agent/data/LogDataSubmissionTask.java b/proxy/src/main/java/com/wavefront/agent/data/LogDataSubmissionTask.java
deleted file mode 100644
index a34092b34..000000000
--- a/proxy/src/main/java/com/wavefront/agent/data/LogDataSubmissionTask.java
+++ /dev/null
@@ -1,127 +0,0 @@
-package com.wavefront.agent.data;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonTypeInfo;
-import com.google.common.collect.ImmutableList;
-import com.google.gson.Gson;
-import com.wavefront.agent.queueing.TaskQueue;
-import com.wavefront.api.LogAPI;
-import com.wavefront.data.ReportableEntityType;
-import com.wavefront.dto.Log;
-import com.yammer.metrics.Metrics;
-import com.yammer.metrics.core.MetricName;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-import java.util.function.Supplier;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import javax.ws.rs.core.Response;
-
-/**
- * A {@link DataSubmissionTask} that handles log payloads.
- *
- * @author amitw@vmware.com
- */
-@JsonIgnoreProperties(ignoreUnknown = true)
-@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, property = "__CLASS")
-public class LogDataSubmissionTask extends AbstractDataSubmissionTask {
- private static final Logger LOGGER = Logger.getLogger("LogDataSubmission");
- public static final String AGENT_PREFIX = "WF-PROXY-AGENT-";
- private transient LogAPI api;
- private transient UUID proxyId;
-
- @JsonProperty private List logs;
- @JsonProperty private int weight;
-
- @SuppressWarnings("unused")
- LogDataSubmissionTask() {}
-
- /**
- * @param api API endpoint.
- * @param proxyId Proxy identifier
- * @param properties entity-specific wrapper over mutable proxy settings' container.
- * @param backlog task queue.
- * @param handle Handle (usually port number) of the pipeline where the data came from.
- * @param logs Data payload.
- * @param timeProvider Time provider (in millis).
- */
- public LogDataSubmissionTask(
- LogAPI api,
- UUID proxyId,
- EntityProperties properties,
- TaskQueue backlog,
- String handle,
- @Nonnull List logs,
- @Nullable Supplier timeProvider) {
- super(properties, backlog, handle, ReportableEntityType.LOGS, timeProvider);
- this.api = api;
- this.proxyId = proxyId;
- this.logs = new ArrayList<>(logs);
- for (Log l : logs) {
- weight += l.getDataSize();
- }
- }
-
- @Override
- Response doExecute() {
- try {
- LOGGER.finest(() -> ("Logs batch sent to vRLIC: " + new Gson().toJson(logs)));
- } catch (Exception e) {
- LOGGER.log(
- Level.WARNING, "Error occurred while logging the batch sent to vRLIC: " + e.getMessage());
- }
- return api.proxyLogs(AGENT_PREFIX + proxyId.toString(), logs);
- }
-
- @Override
- protected TaskResult handleStatus429() {
- Metrics.newCounter(
- new MetricName(entityType + "." + handle, "", "failed" + ".ingestion_limit_reached"))
- .inc(this.weight());
- return TaskResult.REMOVED;
- }
-
- @Override
- public int weight() {
- return weight;
- }
-
- @Override
- public List splitTask(int minSplitSize, int maxSplitSize) {
- if (logs.size() > Math.max(1, minSplitSize)) {
- List