-
Notifications
You must be signed in to change notification settings - Fork 16
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: adds pull based metrics exporter #271
Changes from 8 commits
266d740
391d144
8cad5b6
844a632
552a77b
8f467d5
8d40246
b944b9e
24c920a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
subprojects { | ||
group = "org.hypertrace.metrics.exporter" | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
plugins { | ||
java | ||
application | ||
jacoco | ||
id("org.hypertrace.docker-java-application-plugin") | ||
id("org.hypertrace.docker-publish-plugin") | ||
id("org.hypertrace.jacoco-report-plugin") | ||
} | ||
|
||
application { | ||
mainClass.set("org.hypertrace.core.serviceframework.PlatformServiceLauncher") | ||
} | ||
|
||
hypertraceDocker { | ||
defaultImage { | ||
javaApplication { | ||
serviceName.set("${project.name}") | ||
adminPort.set(8099) | ||
} | ||
} | ||
} | ||
|
||
tasks.test { | ||
useJUnitPlatform() | ||
} | ||
|
||
dependencies { | ||
// common and framework | ||
implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.30") | ||
implementation("org.hypertrace.core.serviceframework:platform-metrics:0.1.30") | ||
|
||
// open telemetry | ||
implementation("io.opentelemetry:opentelemetry-sdk-metrics:1.7.0-alpah") | ||
// TODO: Upgrade opentelemetry-exporter-prometheus to 1.8.0 release when available | ||
// to include time stamp related changes | ||
// https://github.com/open-telemetry/opentelemetry-java/pull/3700 | ||
// For now, the exported time stamp will be the current time stamp. | ||
implementation("io.opentelemetry:opentelemetry-exporter-prometheus:1.7.0-alpha") | ||
|
||
// open telemetry proto | ||
implementation("io.opentelemetry:opentelemetry-proto:1.6.0-alpha") | ||
|
||
// kafka | ||
implementation("org.apache.kafka:kafka-clients:2.6.0") | ||
|
||
// test | ||
testImplementation("org.junit.jupiter:junit-jupiter:5.7.1") | ||
testImplementation("org.mockito:mockito-core:3.8.0") | ||
testImplementation("com.google.code.gson:gson:2.8.7") | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,62 @@ | ||
package org.hypertrace.metrics.exporter; | ||
|
||
import com.typesafe.config.Config; | ||
import io.opentelemetry.exporter.prometheus.PrometheusCollector; | ||
import org.hypertrace.core.serviceframework.PlatformService; | ||
import org.hypertrace.core.serviceframework.config.ConfigClient; | ||
import org.hypertrace.metrics.exporter.consumer.MetricsKafkaConsumer; | ||
import org.hypertrace.metrics.exporter.producer.InMemoryMetricsProducer; | ||
import org.slf4j.Logger; | ||
import org.slf4j.LoggerFactory; | ||
|
||
public class MetricsExporterService extends PlatformService { | ||
private static final Logger LOGGER = LoggerFactory.getLogger(MetricsExporterService.class); | ||
|
||
private Config config; | ||
private MetricsKafkaConsumer metricsKafkaConsumer; | ||
private Thread metricsConsumerThread; | ||
private InMemoryMetricsProducer inMemoryMetricsProducer; | ||
|
||
public MetricsExporterService(ConfigClient configClient) { | ||
super(configClient); | ||
} | ||
|
||
public void setConfig(Config config) { | ||
this.config = config; | ||
} | ||
|
||
@Override | ||
public void doInit() { | ||
config = (config != null) ? config : getAppConfig(); | ||
inMemoryMetricsProducer = new InMemoryMetricsProducer(config); | ||
metricsKafkaConsumer = new MetricsKafkaConsumer(config, inMemoryMetricsProducer); | ||
metricsConsumerThread = new Thread(metricsKafkaConsumer); | ||
// TODO: Upgrade opentelemetry-exporter-prometheus to 1.8.0 release when available | ||
// to include time stamp related changes | ||
// https://github.com/open-telemetry/opentelemetry-java/pull/3700 | ||
// For now, the exported time stamp will be the current time stamp. | ||
PrometheusCollector.create().apply(inMemoryMetricsProducer); | ||
} | ||
|
||
@Override | ||
public void doStart() { | ||
metricsConsumerThread = new Thread(metricsKafkaConsumer); | ||
metricsConsumerThread.start(); | ||
try { | ||
metricsConsumerThread.join(); | ||
} catch (InterruptedException e) { | ||
LOGGER.error("Exception in starting the thread:", e); | ||
} | ||
} | ||
|
||
@Override | ||
public void doStop() { | ||
metricsConsumerThread.interrupt(); | ||
metricsKafkaConsumer.close(); | ||
} | ||
|
||
@Override | ||
public boolean healthCheck() { | ||
return true; | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,115 @@ | ||
package org.hypertrace.metrics.exporter.consumer; | ||
|
||
import com.google.protobuf.InvalidProtocolBufferException; | ||
import com.typesafe.config.Config; | ||
import io.opentelemetry.proto.metrics.v1.ResourceMetrics; | ||
import io.opentelemetry.sdk.metrics.data.MetricData; | ||
import java.time.Duration; | ||
import java.util.ArrayList; | ||
import java.util.Collections; | ||
import java.util.HashMap; | ||
import java.util.List; | ||
import java.util.Map; | ||
import java.util.Properties; | ||
import org.apache.kafka.clients.consumer.ConsumerConfig; | ||
import org.apache.kafka.clients.consumer.ConsumerRecords; | ||
import org.apache.kafka.clients.consumer.KafkaConsumer; | ||
import org.hypertrace.metrics.exporter.producer.InMemoryMetricsProducer; | ||
import org.hypertrace.metrics.exporter.utils.OtlpProtoToMetricDataConverter; | ||
import org.slf4j.Logger; | ||
import org.slf4j.LoggerFactory; | ||
|
||
public class MetricsKafkaConsumer implements Runnable { | ||
private static final Logger LOGGER = LoggerFactory.getLogger(MetricsKafkaConsumer.class); | ||
|
||
private static final int CONSUMER_POLL_TIMEOUT_MS = 100; | ||
private static final int QUEUE_WAIT_TIME_MS = 500; | ||
private static final int WAIT_TIME_MS = 100; | ||
|
||
private static final String KAFKA_CONFIG_KEY = "kafka.config"; | ||
private static final String INPUT_TOPIC_KEY = "input.topic"; | ||
|
||
private final KafkaConsumer<byte[], byte[]> consumer; | ||
private final InMemoryMetricsProducer inMemoryMetricsProducer; | ||
|
||
public MetricsKafkaConsumer(Config config, InMemoryMetricsProducer inMemoryMetricsProducer) { | ||
consumer = new KafkaConsumer<>(prepareProperties(config.getConfig(KAFKA_CONFIG_KEY))); | ||
consumer.subscribe(Collections.singletonList(config.getString(INPUT_TOPIC_KEY))); | ||
this.inMemoryMetricsProducer = inMemoryMetricsProducer; | ||
} | ||
|
||
public void run() { | ||
while (true && !Thread.currentThread().isInterrupted()) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is true needed ? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. will address this in follow up PR. |
||
List<ResourceMetrics> resourceMetrics = new ArrayList<>(); | ||
|
||
ConsumerRecords<byte[], byte[]> records = | ||
consumer.poll(Duration.ofMillis(CONSUMER_POLL_TIMEOUT_MS)); | ||
|
||
records.forEach( | ||
record -> { | ||
try { | ||
resourceMetrics.add(ResourceMetrics.parseFrom(record.value())); | ||
} catch (InvalidProtocolBufferException e) { | ||
LOGGER.warn("Skipping record due to error", e); | ||
} | ||
}); | ||
|
||
resourceMetrics.forEach( | ||
rm -> { | ||
try { | ||
List<MetricData> metricData = OtlpProtoToMetricDataConverter.toMetricData(rm); | ||
boolean result = false; | ||
while (!result) { | ||
result = inMemoryMetricsProducer.addMetricData(metricData); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. if in memory queue is full then wait and loop until the queue is drained during a prometheus pull ? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, we are waiting for enough space to add all metrics data associated with current ResourceMetrics. this can delay the poll at least for pull duration from Prometheus. So, for now, we need to set poll session time out higher than Prometheus poll (1/3 of session time out). |
||
waitForSec(QUEUE_WAIT_TIME_MS); | ||
} | ||
} catch (Exception e) { | ||
LOGGER.debug("skipping the resource metrics due to error: {}", e); | ||
} | ||
}); | ||
|
||
waitForSec(WAIT_TIME_MS); | ||
} | ||
} | ||
|
||
public void close() { | ||
consumer.close(); | ||
} | ||
|
||
private Properties prepareProperties(Config config) { | ||
Map<String, Object> overrideProperties = new HashMap(); | ||
config.entrySet().stream() | ||
.forEach(e -> overrideProperties.put(e.getKey(), config.getString(e.getKey()))); | ||
|
||
Map<String, Object> baseProperties = getBaseProperties(); | ||
overrideProperties.forEach(baseProperties::put); | ||
|
||
Properties properties = new Properties(); | ||
properties.putAll(baseProperties); | ||
return properties; | ||
} | ||
|
||
private Map<String, Object> getBaseProperties() { | ||
Map<String, Object> baseProperties = new HashMap<>(); | ||
baseProperties.put(ConsumerConfig.GROUP_ID_CONFIG, "hypertrace-metrics-exporter"); | ||
baseProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); | ||
baseProperties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As mentioned in the description that we will handle the manual coming in the second iteration. |
||
baseProperties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000"); | ||
baseProperties.put( | ||
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, | ||
"org.apache.kafka.common.serialization.ByteArrayDeserializer"); | ||
baseProperties.put( | ||
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, | ||
"org.apache.kafka.common.serialization.ByteArrayDeserializer"); | ||
return baseProperties; | ||
} | ||
|
||
private void waitForSec(long millis) { | ||
try { | ||
Thread.sleep(millis); | ||
} catch (InterruptedException e) { | ||
Thread.currentThread().interrupt(); | ||
LOGGER.debug("While waiting, the consumer thread has interrupted"); | ||
} | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,47 @@ | ||
package org.hypertrace.metrics.exporter.producer; | ||
|
||
import com.typesafe.config.Config; | ||
import io.opentelemetry.sdk.metrics.data.MetricData; | ||
import io.opentelemetry.sdk.metrics.export.MetricProducer; | ||
import java.util.ArrayList; | ||
import java.util.Collection; | ||
import java.util.List; | ||
import java.util.concurrent.ArrayBlockingQueue; | ||
import java.util.concurrent.BlockingQueue; | ||
|
||
public class InMemoryMetricsProducer implements MetricProducer { | ||
|
||
private static final String BUFFER_CONFIG_KEY = "buffer.config"; | ||
private static final String MAX_QUEUE_SIZE = "max.queue.size"; | ||
private static final String MAX_BATCH_SIZE = "max.batch.size"; | ||
|
||
private BlockingQueue<MetricData> metricDataQueue; | ||
private int maxQueueSize; | ||
private int maxBatchSize; | ||
|
||
public InMemoryMetricsProducer(Config config) { | ||
maxQueueSize = config.getConfig(BUFFER_CONFIG_KEY).getInt(MAX_QUEUE_SIZE); | ||
maxBatchSize = config.getConfig(BUFFER_CONFIG_KEY).getInt(MAX_BATCH_SIZE); | ||
this.metricDataQueue = new ArrayBlockingQueue<>(maxQueueSize); | ||
} | ||
|
||
public boolean addMetricData(List<MetricData> metricData) { | ||
if (this.metricDataQueue.size() + metricData.size() > maxQueueSize) { | ||
return false; | ||
} | ||
|
||
for (MetricData md : metricData) { | ||
this.metricDataQueue.offer(md); | ||
} | ||
|
||
return true; | ||
} | ||
|
||
public Collection<MetricData> collectAllMetrics() { | ||
List<MetricData> metricData = new ArrayList<>(); | ||
while (metricData.size() < maxBatchSize && this.metricDataQueue.peek() != null) { | ||
metricData.add(this.metricDataQueue.poll()); | ||
} | ||
return metricData; | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I am thinking of adding TestContainer based test as a follow-up. Added test for an important one.