record,
+ FieldFormat keyFormat,
+ FieldFormat valueFormat,
+ long afterMs
+ ) {
+ return new EmulatorRecord(
+ record.topic(),
+ record.partition(),
+ record.offset(),
+ record.timestamp(),
+ afterMs,
+ keyFormat,
+ record.key(),
+ valueFormat,
+ record.value()
+ );
+ }
+ }
+
+ enum FieldFormat {
+ STRING,
+ LONG,
+ INTEGER,
+ BYTES,
+ }
+}
diff --git a/emulator/src/main/java/kafka/emulator/KafkaEmulator.java b/emulator/src/main/java/kafka/emulator/KafkaEmulator.java
new file mode 100644
index 0000000..1adca65
--- /dev/null
+++ b/emulator/src/main/java/kafka/emulator/KafkaEmulator.java
@@ -0,0 +1,325 @@
+package kafka.emulator;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.OptionalInt;
+import java.util.OptionalLong;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.stream.Collectors;
+import kafka.context.KafkaContext;
+import kafka.context.KafkaContexts;
+import org.apache.kafka.clients.admin.AdminClient;
+import org.apache.kafka.clients.admin.NewTopic;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.serialization.ByteArrayDeserializer;
+import org.apache.kafka.common.serialization.ByteArraySerializer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class KafkaEmulator {
+
+ static final Logger LOG = LoggerFactory.getLogger(KafkaEmulator.class);
+ final ArchiveStore archiveLoader;
+
+ public KafkaEmulator(ArchiveStore archiveLoader) {
+ this.archiveLoader = archiveLoader;
+ }
+
+ /**
+ * Read and package topics records into an archive.
+ *
+ * Based on the start and end conditions, start a consumer and poll records per partition.
+ * Transform into archive records and append to the archive. Once the end condition is given, the
+ * results are flushed into the zip archive files.
+ *
+ */
+ public void record(
+ KafkaContext kafkaContext,
+ List topics,
+ EmulatorArchive.FieldFormat keyFormat,
+ EmulatorArchive.FieldFormat valueFormat,
+ StartFromOption startFrom,
+ EndAtOption endAt
+ ) throws IOException {
+ final var endTime = System.currentTimeMillis();
+ // create consumer
+ final var properties = kafkaContext.properties();
+ properties.put("group.id", "emulator-" + endTime);
+ final var keyDeserializer = new ByteArrayDeserializer();
+ final var valueDeserializer = new ByteArrayDeserializer();
+ var consumer = new KafkaConsumer<>(properties, keyDeserializer, valueDeserializer);
+ // set offsets from
+ // consumer loop
+ var latestTimestamps = new HashMap();
+ final var archive = EmulatorArchive.create();
+ var listTopics = consumer.listTopics();
+ var topicPartitions = new ArrayList();
+ for (var topic : topics) {
+ var partitionsInfo = listTopics.get(topic);
+ topicPartitions.addAll(
+ partitionsInfo
+ .stream()
+ .map(info -> new TopicPartition(info.topic(), info.partition()))
+ .toList()
+ );
+ }
+ consumer.assign(topicPartitions);
+ if (!startFrom.offsets().isEmpty()) {
+ startFrom.offsets().forEach(consumer::seek);
+ }
+ if (startFrom.timestamp().isPresent()) {
+ final var ts = startFrom.timestamp().getAsLong();
+ final var offsets = consumer.offsetsForTimes(
+ topicPartitions.stream().collect(Collectors.toMap(tp -> tp, tp -> ts))
+ );
+ offsets.forEach((tp, offsetAndTimestamp) ->
+ consumer.seek(tp, offsetAndTimestamp.offset())
+ );
+ } else {
+ consumer.seekToBeginning(topicPartitions);
+ }
+
+ var allDone = false;
+ var done = topicPartitions.stream().collect(Collectors.toMap(tp -> tp, tp -> false));
+ while (!allDone) {
+ // set offsets to
+ // break by topic-partition
+ var records = consumer.poll(Duration.ofSeconds(5));
+ for (var partition : records.partitions()) {
+ if (done.get(partition)) break;
+ // start: per partition
+ var perPartition = records.records(partition);
+ for (var record : perPartition) {
+ // transform to ZipRecord
+ var latestTimestamp = latestTimestamps.getOrDefault(partition, -1L);
+ final var currentTimestamp = record.timestamp();
+ if (currentTimestamp >= endTime) {
+ break;
+ }
+ final long afterMs;
+ if (latestTimestamp < 0) {
+ afterMs = 0;
+ } else {
+ afterMs = currentTimestamp - latestTimestamp;
+ }
+ var zipRecord = EmulatorArchive.EmulatorRecord.from(
+ record,
+ keyFormat,
+ valueFormat,
+ afterMs
+ );
+ // append to topic-partition file
+ archive.append(partition, zipRecord);
+ latestTimestamps.put(partition, currentTimestamp);
+ if (isDone(partition, archive, endAt)) {
+ done.put(partition, true);
+ break;
+ }
+ }
+ // end: per partition
+ }
+ if (records.isEmpty()) {
+ allDone = true;
+ } else {
+ allDone = done.values().stream().reduce((r, l) -> r && l).orElse(false);
+ }
+ }
+ archiveLoader.save(archive);
+ }
+
+ private boolean isDone(TopicPartition tp, EmulatorArchive archive, EndAtOption endAt) {
+ if (!endAt.now()) {
+ if (endAt.recordsPerPartition().isPresent()) {
+ var enough = true;
+ final var total = endAt.recordsPerPartition().getAsInt();
+ final var size = archive.records(tp).size();
+ if (total > size) {
+ enough = false;
+ }
+ return enough;
+ }
+
+ if (!endAt.offsets.isEmpty()) {
+ return archive.oldestOffsets(tp) >= endAt.offsets().get(tp);
+ }
+
+ if (endAt.timestamp().isPresent()) {
+ return archive.oldestTimestamps(tp) >= endAt.timestamp().getAsLong();
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Read zip archive files and produce records into Kafka topics with the frequency defined in the
+ * archive.
+ */
+ public void replay(
+ KafkaContext kafkaContext,
+ Map topicMap,
+ boolean noWait
+ ) throws InterruptedException {
+ // load archive
+ var archive = archiveLoader.load();
+ // create producer
+ var keySerializer = new ByteArraySerializer();
+ var valueSerializer = new ByteArraySerializer();
+ final var props = kafkaContext.properties();
+ props.put("acks", "1");
+ var producer = new KafkaProducer<>(props, keySerializer, valueSerializer);
+ // per partition
+ final var topicPartitionNumber = archive.topicPartitionNumber();
+ // prepare topics
+ try (var admin = AdminClient.create(kafkaContext.properties())) {
+ var topics = admin.listTopics().names().get();
+ var newTopics = new ArrayList();
+ for (var t : topicPartitionNumber.keySet()) {
+ final var topicName = topicMap.getOrDefault(t, t);
+ if (!topics.contains(topicName)) {
+ final var newTopic = new NewTopic(
+ topicName,
+ Optional.of(topicPartitionNumber.get(t)),
+ Optional.empty()
+ );
+ newTopics.add(newTopic);
+ }
+ }
+ admin.createTopics(newTopics).all().get();
+ } catch (ExecutionException | InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+
+ final var size = archive.topicPartitions().size();
+ var executor = Executors.newFixedThreadPool(size);
+ var countDownLatch = new CountDownLatch(size);
+ for (var topicPartition : archive.topicPartitions()) {
+ var rs = archive.records(topicPartition);
+ executor.submit(() -> {
+ long prevTime = 0L;
+ // per record
+ for (var r : rs) {
+ // prepare record
+ final var topicName = topicMap.getOrDefault(r.topic(), r.topic());
+ var record = new ProducerRecord<>(topicName, r.partition(), r.key(), r.value());
+ try {
+ // wait
+ var wait = (prevTime + r.afterMs()) - System.currentTimeMillis();
+ if (!noWait && wait > 0) {
+ LOG.info("{}:{}: waiting {} ms.", topicPartition, r.offset(), r.afterMs());
+ Thread.sleep(r.afterMs());
+ } else {
+ LOG.info(
+ "{}:{}: no waiting (after: {} ms.)",
+ topicPartition,
+ r.offset(),
+ r.afterMs()
+ );
+ }
+ var meta = producer.send(record).get();
+ prevTime = meta.timestamp();
+ } catch (InterruptedException | ExecutionException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ countDownLatch.countDown();
+ LOG.info("Replay for {} finished", topicPartition);
+ });
+ }
+ countDownLatch.await();
+ LOG.info("Replay finished");
+ executor.shutdown();
+ }
+
+ record StartFromOption(
+ boolean beginning,
+ Map offsets,
+ OptionalLong timestamp
+ ) {
+ public static StartFromOption of() {
+ return new StartFromOption(true, Map.of(), OptionalLong.empty());
+ }
+ public static StartFromOption of(Map offsets) {
+ return new StartFromOption(true, offsets, OptionalLong.empty());
+ }
+ public static StartFromOption of(Map offsets, long timestamp) {
+ return new StartFromOption(false, offsets, OptionalLong.of(timestamp));
+ }
+ public static StartFromOption of(Instant timestamp) {
+ return new StartFromOption(
+ false,
+ Map.of(),
+ OptionalLong.of(timestamp.toEpochMilli())
+ );
+ }
+ }
+
+ record EndAtOption(
+ boolean now,
+ OptionalInt recordsPerPartition,
+ Map offsets,
+ OptionalLong timestamp
+ ) {
+ public static EndAtOption of() {
+ return new EndAtOption(true, OptionalInt.empty(), Map.of(), OptionalLong.empty());
+ }
+ public static EndAtOption of(int recordsPerPartition) {
+ return new EndAtOption(
+ false,
+ OptionalInt.of(recordsPerPartition),
+ Map.of(),
+ OptionalLong.empty()
+ );
+ }
+
+ public static EndAtOption of(Instant timestamp) {
+ return new EndAtOption(
+ false,
+ OptionalInt.empty(),
+ Map.of(),
+ OptionalLong.of(timestamp.toEpochMilli())
+ );
+ }
+ }
+
+ public static void main(String[] args) throws IOException, InterruptedException {
+ var archiveLoader = new ArchiveStore.SqliteArchiveLoader(Path.of("test.db"));
+ var emulator = new KafkaEmulator(archiveLoader);
+ var context = KafkaContexts.load().get("local");
+
+ emulator.record(
+ context,
+ List.of("t5"),
+ EmulatorArchive.FieldFormat.STRING,
+ EmulatorArchive.FieldFormat.STRING,
+ StartFromOption.of(),
+ EndAtOption.of(10)
+ );
+
+ emulator.replay(context, Map.of("t5", "t14"), true);
+ // Check Thread handling by parallel stream
+ // final var map = Map.of("s1", "a", "s2", "b", "s3", "c");
+ // map.keySet().parallelStream()
+ // .forEach(
+ // k -> {
+ // System.out.println(Thread.currentThread().getName());
+ // try {
+ // Thread.sleep(1000);
+ // } catch (InterruptedException e) {
+ // throw new RuntimeException(e);
+ // }
+ // System.out.println(map.get(k));
+ // });
+ }
+}
diff --git a/emulator/src/main/java/kafka/zip/ArchiveLoader.java b/emulator/src/main/java/kafka/zip/ArchiveLoader.java
deleted file mode 100644
index f2e25a6..0000000
--- a/emulator/src/main/java/kafka/zip/ArchiveLoader.java
+++ /dev/null
@@ -1,6 +0,0 @@
-package kafka.zip;
-
-public interface ArchiveLoader {
- EmulatorArchive open();
- void save();
-}
diff --git a/emulator/src/main/java/kafka/zip/Cli.java b/emulator/src/main/java/kafka/zip/Cli.java
deleted file mode 100644
index 238c78a..0000000
--- a/emulator/src/main/java/kafka/zip/Cli.java
+++ /dev/null
@@ -1,124 +0,0 @@
-package kafka.zip;
-
-import static java.lang.System.err;
-
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.List;
-import java.util.Optional;
-import java.util.Properties;
-import kafka.context.KafkaContexts;
-import kafka.context.SchemaRegistryContexts;
-import picocli.CommandLine;
-
-public class Cli {
- static class PackCommand {
- PropertiesOption propertiesOption;
- List topics;
- }
-
- static class UnpackCommand {
- PropertiesOption propertiesOption;
- boolean dryRun; // false
- RepeatOptions repeatOptions;
-
- static class RepeatOptions {
- boolean repeat; // false
- long afterMs;
- }
- }
-
- static class PropertiesOption {
-
- @CommandLine.Option(
- names = {"-c", "--config"},
- description = "Client configuration properties file." + "Must include connection to Kafka")
- Optional configPath;
-
- @CommandLine.ArgGroup(exclusive = false)
- ContextOption contextOption;
-
- public Properties load() {
- return configPath
- .map(
- path -> {
- try {
- final var p = new Properties();
- p.load(Files.newInputStream(path));
- return p;
- } catch (Exception e) {
- throw new IllegalArgumentException(
- "ERROR: properties file at %s is failing to load".formatted(path));
- }
- })
- .orElseGet(
- () -> {
- try {
- return contextOption.load();
- } catch (IOException e) {
- throw new IllegalArgumentException("ERROR: loading contexts");
- }
- });
- }
- }
-
- static class ContextOption {
-
- @CommandLine.Option(names = "--kafka", description = "Kafka context name", required = true)
- String kafkaContextName;
-
- @CommandLine.Option(names = "--sr", description = "Schema Registry context name")
- Optional srContextName;
-
- public Properties load() throws IOException {
- final var kafkas = KafkaContexts.load();
- final var props = new Properties();
- if (kafkas.has(kafkaContextName)) {
- final var kafka = kafkas.get(kafkaContextName);
- final var kafkaProps = kafka.properties();
- props.putAll(kafkaProps);
-
- if (srContextName.isPresent()) {
- final var srs = SchemaRegistryContexts.load();
- final var srName = srContextName.get();
- if (srs.has(srName)) {
- final var sr = srs.get(srName);
- final var srProps = sr.properties();
- props.putAll(srProps);
- } else {
- err.printf(
- "WARN: Schema Registry context `%s` not found. Proceeding without it.%n", srName);
- }
- }
-
- return props;
- } else {
- err.printf(
- "ERROR: Kafka context `%s` not found. Check that context already exist.%n",
- kafkaContextName);
- return null;
- }
- }
- }
-
- static class VersionProviderWithConfigProvider implements CommandLine.IVersionProvider {
-
- @Override
- public String[] getVersion() throws IOException {
- final var url =
- VersionProviderWithConfigProvider.class.getClassLoader().getResource("cli.properties");
- if (url == null) {
- return new String[] {
- "No cli.properties file found in the classpath.",
- };
- }
- final var properties = new Properties();
- properties.load(url.openStream());
- return new String[] {
- properties.getProperty("appName") + " version " + properties.getProperty("appVersion") + "",
- "Built: " + properties.getProperty("appBuildTime"),
- };
- }
- }
-}
diff --git a/emulator/src/main/java/kafka/zip/EmulatorArchive.java b/emulator/src/main/java/kafka/zip/EmulatorArchive.java
deleted file mode 100644
index fc7accc..0000000
--- a/emulator/src/main/java/kafka/zip/EmulatorArchive.java
+++ /dev/null
@@ -1,93 +0,0 @@
-package kafka.zip;
-
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.StandardOpenOption;
-import java.util.ArrayList;
-import java.util.Base64;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.common.TopicPartition;
-
-public class EmulatorArchive {
- Map> records = new HashMap<>();
-
- public static EmulatorArchive load(Path directory) throws IOException {
- try (var list = Files.list(directory)) {
- var tpToPath = list
- .filter(p -> p.endsWith(".txt"))
- .filter(p -> p.getFileName().toString().contains("-"))
- .collect(Collectors.toMap(p -> {
- var filename = p.getFileName().toString();
- var topic = filename.substring(0, filename.lastIndexOf("-") - 1);
- var partition = Integer.parseInt(
- filename.substring(filename.lastIndexOf("-"), filename.lastIndexOf(".")));
- return new TopicPartition(topic, partition);
- }, p -> p));
-
- }
- return null;
- }
-
- public static EmulatorArchive create() {
- return new EmulatorArchive();
- }
-
- public void append(TopicPartition topicPartition, EmulatorRecord zipRecord) {
- records.computeIfPresent(topicPartition, (topicPartition1, zipRecords) -> {
- zipRecords.add(zipRecord);
- return zipRecords;
- });
- records.computeIfAbsent(topicPartition, tp -> {
- final var zipRecords = new ArrayList();
- zipRecords.add(zipRecord);
- return zipRecords;
- });
- }
-
- public boolean isDone() {
- return false;
- }
-
- public void save() throws IOException {
- for (var tp : records.keySet()) {
- var tpPath = Path.of(tp.topic() + "-" + tp.partition() + ".csv");
- Files.writeString(tpPath, "after_ms,key,value\n", StandardOpenOption.CREATE);
- for (var record : records.get(tp)) {
- Files.writeString(tpPath, record.toLine(), StandardOpenOption.APPEND);
- }
- }
- }
-
- record EmulatorRecord(
- String topic,
- int partition,
- long afterMs,
- FieldFormat keyFormat,
- byte[] key,
- FieldFormat valueFormat,
- byte[] value) {
- public static EmulatorRecord from(ConsumerRecord record, long afterMs) {
- return new EmulatorRecord(record.topic(), record.partition(), afterMs, FieldFormat.BYTES, record.key(), FieldFormat.BYTES, record.value());
- }
-
- public String toLine () {
- var k = key == null ? "" : Base64.getEncoder().encodeToString(key);
- var v = value == null ? "" : Base64.getEncoder().encodeToString(value);
- return afterMs
- + "," + k
- + "," + v
- + "\n";
- }
- }
-
- enum FieldFormat {
- STRING,
- LONG,
- BYTES
- }
-}
diff --git a/emulator/src/main/java/kafka/zip/KafkaEmulator.java b/emulator/src/main/java/kafka/zip/KafkaEmulator.java
deleted file mode 100644
index e7148e3..0000000
--- a/emulator/src/main/java/kafka/zip/KafkaEmulator.java
+++ /dev/null
@@ -1,134 +0,0 @@
-package kafka.zip;
-
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.time.Duration;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import kafka.context.KafkaContexts;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.serialization.ByteArrayDeserializer;
-import org.apache.kafka.common.serialization.ByteArraySerializer;
-
-public class KafkaEmulator {
-
- /**
- * Read and package topics records into an archive.
- *
- * Based on the start and end conditions, start a consumer and poll records per partition.
- * Transform into archive records and append to the archive. Once the end condition is given, the
- * results are flushed into the zip archive files.
- *
- * @param kafkaContext
- * @param topics
- * @param fromBeginning // TODO replace with startCondition // TODO add endCondition
- * @return
- */
- public RecordingResult record(
- KafkaContexts.KafkaContext kafkaContext, List topics, boolean fromBeginning)
- throws IOException {
- final var endTime = System.currentTimeMillis();
- // create consumer
- final var properties = kafkaContext.properties();
- properties.put("group.id", "emulator-" + endTime);
- final var keyDeserializer = new ByteArrayDeserializer();
- final var valueDeserializer = new ByteArrayDeserializer();
- var consumer = new KafkaConsumer<>(properties, keyDeserializer, valueDeserializer);
- // set offsets from
- // consumer loop
- var done = false;
- var latestTimestamps = new HashMap();
- final var archive = EmulatorArchive.create();
- var listTopics = consumer.listTopics();
- var topicPartitions = new ArrayList();
- for (var topic : topics) {
- var partitionsInfo = listTopics.get(topic);
- topicPartitions.addAll(
- partitionsInfo.stream()
- .map(info -> new TopicPartition(info.topic(), info.partition()))
- .toList());
- }
- consumer.assign(topicPartitions);
- consumer.seekToBeginning(topicPartitions);
- while (!done) {
- // set offsets to
- // break by topic-partition
- var records = consumer.poll(Duration.ofSeconds(5));
- for (var partition : records.partitions()) {
- // start: per partition
- var perPartition = records.records(partition);
- for (var record : perPartition) {
- // transform to ZipRecord
- var latestTimestamp = latestTimestamps.getOrDefault(partition, -1L);
- final var currentTimestamp = record.timestamp();
- if (currentTimestamp >= endTime) {
- break;
- }
- final long afterMs;
- if (latestTimestamp < 0) {
- afterMs = 0;
- } else {
- afterMs = currentTimestamp - latestTimestamp;
- }
- var zipRecord = EmulatorArchive.EmulatorRecord.from(record, afterMs);
- // append to topic-partition file
- archive.append(partition, zipRecord);
- latestTimestamps.put(partition, currentTimestamp);
- }
- if (isDone(archive)) {
- done = true;
- break;
- }
- // end: per partition
- }
- if (records.isEmpty()) {
- done = true;
- }
- }
- return new RecordingResult(archive);
- }
-
- private boolean isDone(EmulatorArchive archive) {
- return false;
- }
-
- /**
- * Read zip archive files and produce records into Kafka topics with the frequency defined in the
- * archive.
- *
- * @param kafkaContext
- * @return
- */
- public ReplayResult replay(KafkaContexts.KafkaContext kafkaContext, Path directory) throws IOException {
- //create producer
- var keySerializer = new ByteArraySerializer();
- var valueSerializer = new ByteArraySerializer();
- var producer = new KafkaProducer<>(kafkaContext.properties(), keySerializer, valueSerializer);
- //per partition
- //per record
- //prepare record
- //wait
- return null;
- }
-
- private class RecordingResult {
- final EmulatorArchive archive;
-
- private RecordingResult(EmulatorArchive archive) {
- this.archive = archive;
- }
- }
-
- private class ReplayResult {}
-
- public static void main(String[] args) throws IOException {
- var zip = new KafkaEmulator();
- var context = KafkaContexts.load().get("local");
- var result = zip.record(context, List.of("t1"), true);
- result.archive.save();
- }
-}
diff --git a/emulator/src/main/java/kafka/zip/SqliteArchiveLoader.java b/emulator/src/main/java/kafka/zip/SqliteArchiveLoader.java
deleted file mode 100644
index a0f8aa6..0000000
--- a/emulator/src/main/java/kafka/zip/SqliteArchiveLoader.java
+++ /dev/null
@@ -1,23 +0,0 @@
-package kafka.zip;
-
-import java.nio.file.Path;
-
-public class SqliteArchiveLoader implements ArchiveLoader {
-
- final Path archivePath;
-
- public SqliteArchiveLoader(Path archivePath) {
- this.archivePath = archivePath;
- }
-
- @Override
- public EmulatorArchive open() {
-
- return null;
- }
-
- @Override
- public void save() {
-
- }
-}
diff --git a/pom.xml b/pom.xml
index 9f86188..bd3206a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -29,9 +29,9 @@
${java.version}
4.6.3
- ${version.clean.plugin}
+ 3.1.0
7.0.1
- 0.2.0
+ 0.3.1-SNAPSHOT
2.13.3
1.7.36
5.8.2
@@ -243,11 +243,6 @@
maven-javadoc-plugin
${version.javadoc.plugin}
-
- org.apache.maven.plugins
- maven-site-plugin
- ${version.site.plugin}
-
org.apache.maven.plugins
maven-source-plugin
@@ -283,11 +278,20 @@
- com.spotify.fmt
- fmt-maven-plugin
- 2.16
+ com.hubspot.maven.plugins
+ prettier-maven-plugin
+ 0.16
+
+ 1.5.0
+ 90
+ 2
+ false
+ true
+ true
+
+ validate
check
diff --git a/producer-datagen/src/main/java/kafka/cli/producer/datagen/Cli.java b/producer-datagen/src/main/java/kafka/cli/producer/datagen/Cli.java
index b34b01d..c25c66f 100644
--- a/producer-datagen/src/main/java/kafka/cli/producer/datagen/Cli.java
+++ b/producer-datagen/src/main/java/kafka/cli/producer/datagen/Cli.java
@@ -23,18 +23,19 @@
import picocli.CommandLine.Option;
@CommandLine.Command(
- name = "kfk-producer-datagen",
- versionProvider = VersionProviderWithConfigProvider.class,
- mixinStandardHelpOptions = true,
- descriptionHeading = "Kafka CLI - Producer Datagen",
- description = "Kafka Producer with Data generation",
- subcommands = {
- PerfCommand.class,
- IntervalCommand.class,
- ProduceOnceCommand.class,
- SampleCommand.class,
- ListTopicsCommand.class,
- })
+ name = "kfk-producer-datagen",
+ versionProvider = VersionProviderWithConfigProvider.class,
+ mixinStandardHelpOptions = true,
+ descriptionHeading = "Kafka CLI - Producer Datagen",
+ description = "Kafka Producer with Data generation",
+ subcommands = {
+ PerfCommand.class,
+ IntervalCommand.class,
+ ProduceOnceCommand.class,
+ SampleCommand.class,
+ ListTopicsCommand.class,
+ }
+)
public class Cli implements Callable {
public static void main(String[] args) {
@@ -51,10 +52,10 @@ public Integer call() {
public static class PropertiesOption {
@CommandLine.Option(
- names = {"-c", "--config"},
- description =
- "Client configuration properties file."
- + "Must include connection to Kafka and Schema Registry")
+ names = { "-c", "--config" },
+ description = "Client configuration properties file." +
+ "Must include connection to Kafka and Schema Registry"
+ )
Optional configPath;
@ArgGroup(exclusive = false)
@@ -62,25 +63,24 @@ public static class PropertiesOption {
public Properties load() {
return configPath
- .map(
- path -> {
- try {
- final var p = new Properties();
- p.load(Files.newInputStream(path));
- return p;
- } catch (Exception e) {
- throw new IllegalArgumentException(
- "ERROR: properties file at %s is failing to load".formatted(path));
- }
- })
- .orElseGet(
- () -> {
- try {
- return contextOption.load();
- } catch (IOException e) {
- throw new IllegalArgumentException("ERROR: loading contexts");
- }
- });
+ .map(path -> {
+ try {
+ final var p = new Properties();
+ p.load(Files.newInputStream(path));
+ return p;
+ } catch (Exception e) {
+ throw new IllegalArgumentException(
+ "ERROR: properties file at %s is failing to load".formatted(path)
+ );
+ }
+ })
+ .orElseGet(() -> {
+ try {
+ return contextOption.load();
+ } catch (IOException e) {
+ throw new IllegalArgumentException("ERROR: loading contexts");
+ }
+ });
}
}
@@ -109,15 +109,18 @@ public Properties load() throws IOException {
props.putAll(srProps);
} else {
err.printf(
- "WARN: Schema Registry context `%s` not found. Proceeding without it.%n", srName);
+ "WARN: Schema Registry context `%s` not found. Proceeding without it.%n",
+ srName
+ );
}
}
return props;
} else {
err.printf(
- "ERROR: Kafka context `%s` not found. Check that context already exist.%n",
- kafkaContextName);
+ "ERROR: Kafka context `%s` not found. Check that context already exist.%n",
+ kafkaContextName
+ );
return null;
}
}
@@ -126,13 +129,15 @@ public Properties load() throws IOException {
public static class SchemaSourceOption {
@Option(
- names = {"-q", "--quickstart"},
- description = "Quickstart name. Valid values: ${COMPLETION-CANDIDATES}")
+ names = { "-q", "--quickstart" },
+ description = "Quickstart name. Valid values: ${COMPLETION-CANDIDATES}"
+ )
public Optional quickstart;
@Option(
- names = {"-s", "--schema"},
- description = "Path to Avro schema to use for generating records.")
+ names = { "-s", "--schema" },
+ description = "Path to Avro schema to use for generating records."
+ )
public Optional schemaPath;
}
@@ -141,16 +146,18 @@ static class VersionProviderWithConfigProvider implements IVersionProvider {
@Override
public String[] getVersion() throws IOException {
final var url =
- VersionProviderWithConfigProvider.class.getClassLoader().getResource("cli.properties");
+ VersionProviderWithConfigProvider.class.getClassLoader()
+ .getResource("cli.properties");
if (url == null) {
- return new String[] {
- "No cli.properties file found in the classpath.",
- };
+ return new String[] { "No cli.properties file found in the classpath." };
}
final var properties = new Properties();
properties.load(url.openStream());
return new String[] {
- properties.getProperty("appName") + " version " + properties.getProperty("appVersion") + "",
+ properties.getProperty("appName") +
+ " version " +
+ properties.getProperty("appVersion") +
+ "",
"Built: " + properties.getProperty("appBuildTime"),
};
}
diff --git a/producer-datagen/src/main/java/kafka/cli/producer/datagen/IntervalRunner.java b/producer-datagen/src/main/java/kafka/cli/producer/datagen/IntervalRunner.java
index fcf3abd..59d7a9f 100644
--- a/producer-datagen/src/main/java/kafka/cli/producer/datagen/IntervalRunner.java
+++ b/producer-datagen/src/main/java/kafka/cli/producer/datagen/IntervalRunner.java
@@ -13,10 +13,11 @@ public class IntervalRunner {
final Stats stats;
public IntervalRunner(
- Config config,
- KafkaProducer producer,
- PayloadGenerator payloadGenerator,
- Stats stats) {
+ Config config,
+ KafkaProducer producer,
+ PayloadGenerator payloadGenerator,
+ Stats stats
+ ) {
this.config = config;
this.producer = producer;
this.payloadGenerator = payloadGenerator;
diff --git a/producer-datagen/src/main/java/kafka/cli/producer/datagen/PayloadGenerator.java b/producer-datagen/src/main/java/kafka/cli/producer/datagen/PayloadGenerator.java
index 9d8a671..8543549 100644
--- a/producer-datagen/src/main/java/kafka/cli/producer/datagen/PayloadGenerator.java
+++ b/producer-datagen/src/main/java/kafka/cli/producer/datagen/PayloadGenerator.java
@@ -37,19 +37,18 @@ public PayloadGenerator(Config config) {
this.format = config.format();
this.random = new Random();
config
- .randomSeed()
- .ifPresent(
- r -> {
- random.setSeed(r);
- random.setSeed(random.nextLong());
- });
+ .randomSeed()
+ .ifPresent(r -> {
+ random.setSeed(r);
+ random.setSeed(random.nextLong());
+ });
this.generator =
- new Generator.Builder()
- .random(random)
- .generation(config.count())
- .schema(config.schema())
- .build();
+ new Generator.Builder()
+ .random(random)
+ .generation(config.count())
+ .schema(config.schema())
+ .build();
this.keyFieldName = config.keyFieldName();
}
@@ -57,9 +56,11 @@ public GenericRecord get() {
final Object generatedObject = generator.generate();
if (!(generatedObject instanceof GenericRecord)) {
throw new RuntimeException(
- String.format(
- "Expected Avro Random Generator to return instance of GenericRecord, found %s instead",
- generatedObject.getClass().getName()));
+ String.format(
+ "Expected Avro Random Generator to return instance of GenericRecord, found %s instead",
+ generatedObject.getClass().getName()
+ )
+ );
}
return (GenericRecord) generatedObject;
}
@@ -81,7 +82,9 @@ String toJson(GenericRecord record) {
final var outputStream = new ByteArrayOutputStream();
final var schema = record.getSchema();
final var datumWriter = new GenericDatumWriter(schema);
- final var encoder = EncoderFactory.get().jsonEncoder(record.getSchema(), outputStream);
+ final var encoder = EncoderFactory
+ .get()
+ .jsonEncoder(record.getSchema(), outputStream);
datumWriter.write(record, encoder);
encoder.flush();
return outputStream.toString();
@@ -121,30 +124,30 @@ public String schema() {
}
public record Config(
- Optional randomSeed,
- Optional quickstart,
- Optional schemaPath,
- long count,
- Format format) {
-
+ Optional randomSeed,
+ Optional quickstart,
+ Optional schemaPath,
+ long count,
+ Format format
+ ) {
Schema schema() {
return quickstart
- .map(Quickstart::getSchemaFilename)
- .map(Config::getSchemaFromSchemaFileName)
- .orElse(
- schemaPath
- .map(
- s -> {
- Schema schemaFromSchemaFileName = null;
- try {
- schemaFromSchemaFileName =
- getSchemaFromSchemaFileName(Files.newInputStream(schemaPath.get()));
- } catch (IOException e) {
- e.printStackTrace();
- }
- return schemaFromSchemaFileName;
- })
- .orElse(null));
+ .map(Quickstart::getSchemaFilename)
+ .map(Config::getSchemaFromSchemaFileName)
+ .orElse(
+ schemaPath
+ .map(s -> {
+ Schema schemaFromSchemaFileName = null;
+ try {
+ schemaFromSchemaFileName =
+ getSchemaFromSchemaFileName(Files.newInputStream(schemaPath.get()));
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return schemaFromSchemaFileName;
+ })
+ .orElse(null)
+ );
}
public static Schema getSchemaFromSchemaFileName(InputStream stream) {
@@ -174,14 +177,20 @@ public enum Format {
AVRO,
}
- public static Serializer