Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: adds pull based metrics exporter #271

Merged
merged 9 commits into from
Oct 28, 2021
17 changes: 13 additions & 4 deletions hypertrace-ingester/build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ hypertraceDocker {

dependencies {
implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.21")
implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.26")
implementation("org.hypertrace.core.serviceframework:platform-metrics:0.1.26")
implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.30")
implementation("org.hypertrace.core.serviceframework:platform-metrics:0.1.30")
implementation("org.hypertrace.core.datamodel:data-model:0.1.19")
implementation("org.hypertrace.core.viewgenerator:view-generator-framework:0.3.1")
implementation("com.typesafe:config:1.4.1")
Expand All @@ -41,6 +41,7 @@ dependencies {
implementation(project(":hypertrace-trace-enricher:hypertrace-trace-enricher"))
implementation(project(":hypertrace-view-generator:hypertrace-view-generator"))
implementation(project(":hypertrace-metrics-processor:hypertrace-metrics-processor"))
implementation(project(":hypertrace-metrics-exporter:hypertrace-metrics-exporter"))

testImplementation("org.junit.jupiter:junit-jupiter:5.7.1")
testImplementation("org.mockito:mockito-core:3.8.0")
Expand Down Expand Up @@ -81,7 +82,11 @@ tasks.register<Copy>("copyServiceConfigs") {
createCopySpec("hypertrace-metrics-processor",
"hypertrace-metrics-processor",
"main",
"common")
"common"),
createCopySpec("hypertrace-metrics-exporter",
"hypertrace-metrics-exporter",
"main",
"common")
).into("./build/resources/main/configs/")
}

Expand Down Expand Up @@ -137,7 +142,11 @@ tasks.register<Copy>("copyServiceConfigsTest") {
createCopySpec("hypertrace-metrics-processor",
"hypertrace-metrics-processor",
"test",
"hypertrace-metrics-processor")
"hypertrace-metrics-processor"),
createCopySpec("hypertrace-metrics-exporter",
"hypertrace-metrics-exporter",
"test",
"hypertrace-metrics-exporter")
).into("./build/resources/test/configs/")
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import org.hypertrace.core.serviceframework.config.ConfigUtils;
import org.hypertrace.core.spannormalizer.SpanNormalizer;
import org.hypertrace.core.viewgenerator.service.MultiViewGeneratorLauncher;
import org.hypertrace.metrics.exporter.MetricsExporterService;
import org.hypertrace.metrics.processor.MetricsProcessor;
import org.hypertrace.traceenricher.trace.enricher.TraceEnricher;
import org.slf4j.Logger;
Expand All @@ -30,9 +31,12 @@ public class HypertraceIngester extends KafkaStreamsApp {
private static final String HYPERTRACE_INGESTER_JOB_CONFIG = "hypertrace-ingester-job-config";

private Map<String, Pair<String, KafkaStreamsApp>> jobNameToSubTopology = new HashMap<>();
MetricsExporterService metricsExporterService;

public HypertraceIngester(ConfigClient configClient) {
super(configClient);
metricsExporterService = new MetricsExporterService(configClient);
metricsExporterService.setConfig(getSubJobConfig("hypertrace-metrics-exporter"));
}

private KafkaStreamsApp getSubTopologyInstance(String name) {
Expand Down Expand Up @@ -118,6 +122,24 @@ public List<String> getOutputTopics(Map<String, Object> properties) {
return outputTopics.stream().collect(Collectors.toList());
}

@Override
protected void doInit() {
super.doInit();
this.metricsExporterService.doInit();
}

@Override
protected void doStart() {
super.doStart();
this.metricsExporterService.doStart();
}

@Override
protected void doStop() {
super.doStop();
this.metricsExporterService.doStop();
}

private List<String> getSubTopologiesNames(Map<String, Object> properties) {
return getJobConfig(properties).getStringList("sub.topology.names");
}
Expand Down
3 changes: 3 additions & 0 deletions hypertrace-metrics-exporter/build.gradle.kts
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
subprojects {
group = "org.hypertrace.metrics.exporter"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
plugins {
java
application
jacoco
id("org.hypertrace.docker-java-application-plugin")
id("org.hypertrace.docker-publish-plugin")
id("org.hypertrace.jacoco-report-plugin")
}

application {
mainClass.set("org.hypertrace.core.serviceframework.PlatformServiceLauncher")
}

hypertraceDocker {
defaultImage {
javaApplication {
serviceName.set("${project.name}")
adminPort.set(8099)
}
}
}

tasks.test {
useJUnitPlatform()
}

dependencies {
// common and framework
implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.30")
implementation("org.hypertrace.core.serviceframework:platform-metrics:0.1.30")

// open telemetry
implementation("io.opentelemetry:opentelemetry-sdk-metrics:1.7.0-alpah")
// TODO: Upgrade opentelemetry-exporter-prometheus to 1.8.0 release when available
// to include time stamp related changes
// https://github.com/open-telemetry/opentelemetry-java/pull/3700
// For now, the exported time stamp will be the current time stamp.
implementation("io.opentelemetry:opentelemetry-exporter-prometheus:1.7.0-alpha")

// open telemetry proto
implementation("io.opentelemetry:opentelemetry-proto:1.6.0-alpha")

// kafka
implementation("org.apache.kafka:kafka-clients:2.6.0")

// test
testImplementation("org.junit.jupiter:junit-jupiter:5.7.1")
testImplementation("org.mockito:mockito-core:3.8.0")
testImplementation("com.google.code.gson:gson:2.8.7")
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
package org.hypertrace.metrics.exporter;

import com.typesafe.config.Config;
import io.opentelemetry.exporter.prometheus.PrometheusCollector;
import org.hypertrace.core.serviceframework.PlatformService;
import org.hypertrace.core.serviceframework.config.ConfigClient;
import org.hypertrace.metrics.exporter.consumer.MetricsKafkaConsumer;
import org.hypertrace.metrics.exporter.producer.InMemoryMetricsProducer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class MetricsExporterService extends PlatformService {
private static final Logger LOGGER = LoggerFactory.getLogger(MetricsExporterService.class);

private Config config;
private MetricsKafkaConsumer metricsKafkaConsumer;
private Thread metricsConsumerThread;
private InMemoryMetricsProducer inMemoryMetricsProducer;

public MetricsExporterService(ConfigClient configClient) {
super(configClient);
}

public void setConfig(Config config) {
this.config = config;
}

@Override
public void doInit() {
config = (config != null) ? config : getAppConfig();
inMemoryMetricsProducer = new InMemoryMetricsProducer(config);
metricsKafkaConsumer = new MetricsKafkaConsumer(config, inMemoryMetricsProducer);
metricsConsumerThread = new Thread(metricsKafkaConsumer);
// TODO: Upgrade opentelemetry-exporter-prometheus to 1.8.0 release when available
// to include time stamp related changes
// https://github.com/open-telemetry/opentelemetry-java/pull/3700
// For now, the exported time stamp will be the current time stamp.
PrometheusCollector.create().apply(inMemoryMetricsProducer);
}

@Override
public void doStart() {
metricsConsumerThread = new Thread(metricsKafkaConsumer);
metricsConsumerThread.start();
try {
metricsConsumerThread.join();
} catch (InterruptedException e) {
LOGGER.error("Exception in starting the thread:", e);
}
}

@Override
public void doStop() {
metricsConsumerThread.interrupt();
metricsKafkaConsumer.close();
}

@Override
public boolean healthCheck() {
return true;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
package org.hypertrace.metrics.exporter.consumer;

import com.google.protobuf.InvalidProtocolBufferException;
import com.typesafe.config.Config;
import io.opentelemetry.proto.metrics.v1.ResourceMetrics;
import io.opentelemetry.sdk.metrics.data.MetricData;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.hypertrace.metrics.exporter.producer.InMemoryMetricsProducer;
import org.hypertrace.metrics.exporter.utils.OtlpProtoToMetricDataConverter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class MetricsKafkaConsumer implements Runnable {
private static final Logger LOGGER = LoggerFactory.getLogger(MetricsKafkaConsumer.class);

private static final int CONSUMER_POLL_TIMEOUT_MS = 100;
private static final int QUEUE_WAIT_TIME_MS = 500;
private static final int WAIT_TIME_MS = 100;

private static final String KAFKA_CONFIG_KEY = "kafka.config";
private static final String INPUT_TOPIC_KEY = "input.topic";

private final KafkaConsumer<byte[], byte[]> consumer;
private final InMemoryMetricsProducer inMemoryMetricsProducer;

public MetricsKafkaConsumer(Config config, InMemoryMetricsProducer inMemoryMetricsProducer) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am thinking of adding TestContainer based test as a follow-up. Added test for an important one.

consumer = new KafkaConsumer<>(prepareProperties(config.getConfig(KAFKA_CONFIG_KEY)));
consumer.subscribe(Collections.singletonList(config.getString(INPUT_TOPIC_KEY)));
this.inMemoryMetricsProducer = inMemoryMetricsProducer;
}

public void run() {
while (true && !Thread.currentThread().isInterrupted()) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is true needed ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will address this in follow up PR.

List<ResourceMetrics> resourceMetrics = new ArrayList<>();

ConsumerRecords<byte[], byte[]> records =
consumer.poll(Duration.ofMillis(CONSUMER_POLL_TIMEOUT_MS));

records.forEach(
record -> {
try {
resourceMetrics.add(ResourceMetrics.parseFrom(record.value()));
} catch (InvalidProtocolBufferException e) {
LOGGER.warn("Skipping record due to error", e);
}
});

resourceMetrics.forEach(
rm -> {
try {
List<MetricData> metricData = OtlpProtoToMetricDataConverter.toMetricData(rm);
boolean result = false;
while (!result) {
result = inMemoryMetricsProducer.addMetricData(metricData);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if in memory queue is full then wait and loop until the queue is drained during a prometheus pull ?

Copy link
Contributor Author

@kotharironak kotharironak Oct 28, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, we are waiting for enough space to add all metrics data associated with current ResourceMetrics. this can delay the poll at least for pull duration from Prometheus. So, for now, we need to set poll session time out higher than Prometheus poll (1/3 of session time out).

waitForSec(QUEUE_WAIT_TIME_MS);
}
} catch (Exception e) {
LOGGER.debug("skipping the resource metrics due to error: {}", e);
}
});

waitForSec(WAIT_TIME_MS);
}
}

public void close() {
consumer.close();
}

private Properties prepareProperties(Config config) {
Map<String, Object> overrideProperties = new HashMap();
config.entrySet().stream()
.forEach(e -> overrideProperties.put(e.getKey(), config.getString(e.getKey())));

Map<String, Object> baseProperties = getBaseProperties();
overrideProperties.forEach(baseProperties::put);

Properties properties = new Properties();
properties.putAll(baseProperties);
return properties;
}

private Map<String, Object> getBaseProperties() {
Map<String, Object> baseProperties = new HashMap<>();
baseProperties.put(ConsumerConfig.GROUP_ID_CONFIG, "hypertrace-metrics-exporter");
baseProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
baseProperties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As mentioned in the description that we will handle the manual coming in the second iteration.

baseProperties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
baseProperties.put(
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
baseProperties.put(
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.ByteArrayDeserializer");
return baseProperties;
}

private void waitForSec(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOGGER.debug("While waiting, the consumer thread has interrupted");
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
package org.hypertrace.metrics.exporter.producer;

import com.typesafe.config.Config;
import io.opentelemetry.sdk.metrics.data.MetricData;
import io.opentelemetry.sdk.metrics.export.MetricProducer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;

public class InMemoryMetricsProducer implements MetricProducer {

private static final String BUFFER_CONFIG_KEY = "buffer.config";
private static final String MAX_QUEUE_SIZE = "max.queue.size";
private static final String MAX_BATCH_SIZE = "max.batch.size";

private BlockingQueue<MetricData> metricDataQueue;
private int maxQueueSize;
private int maxBatchSize;

public InMemoryMetricsProducer(Config config) {
maxQueueSize = config.getConfig(BUFFER_CONFIG_KEY).getInt(MAX_QUEUE_SIZE);
maxBatchSize = config.getConfig(BUFFER_CONFIG_KEY).getInt(MAX_BATCH_SIZE);
this.metricDataQueue = new ArrayBlockingQueue<>(maxQueueSize);
}

public boolean addMetricData(List<MetricData> metricData) {
if (this.metricDataQueue.size() + metricData.size() > maxQueueSize) {
return false;
}

for (MetricData md : metricData) {
this.metricDataQueue.offer(md);
}

return true;
}

public Collection<MetricData> collectAllMetrics() {
List<MetricData> metricData = new ArrayList<>();
while (metricData.size() < maxBatchSize && this.metricDataQueue.peek() != null) {
metricData.add(this.metricDataQueue.poll());
}
return metricData;
}
}
Loading