From 020db59aacb80b18fdbe1817032a5045dbccc740 Mon Sep 17 00:00:00 2001 From: Gary Wilber <41303831+GaryWilber@users.noreply.github.com> Date: Tue, 16 Jul 2024 10:22:30 -0700 Subject: [PATCH] Update to librdkafka 2.5.0 (#1086) --- README.md | 10 ++++----- config.d.ts | 53 ++++++++++++++++++++++++++++++++--------------- deps/librdkafka | 2 +- errors.d.ts | 4 +++- lib/error.js | 4 +++- package-lock.json | 4 ++-- package.json | 6 +++--- 7 files changed, 53 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index b4748c5d..5e3a15f9 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ I am looking for *your* help to make this project even better! If you're interes The `node-rdkafka` library is a high-performance NodeJS client for [Apache Kafka](http://kafka.apache.org/) that wraps the native [librdkafka](https://github.com/edenhill/librdkafka) library. All the complexity of balancing writes across partitions and managing (possibly ever-changing) brokers should be encapsulated in the library. -__This library currently uses `librdkafka` version `2.3.0`.__ +__This library currently uses `librdkafka` version `2.5.0`.__ ## Reference Docs @@ -60,7 +60,7 @@ Using Alpine Linux? Check out the [docs](https://github.com/Blizzard/node-rdkafk ### Windows -Windows build **is not** compiled from `librdkafka` source but it is rather linked against the appropriate version of [NuGet librdkafka.redist](https://www.nuget.org/packages/librdkafka.redist/) static binary that gets downloaded from `https://globalcdn.nuget.org/packages/librdkafka.redist.2.3.0.nupkg` during installation. This download link can be changed using the environment variable `NODE_RDKAFKA_NUGET_BASE_URL` that defaults to `https://globalcdn.nuget.org/packages/` when it's no set. +Windows build **is not** compiled from `librdkafka` source but it is rather linked against the appropriate version of [NuGet librdkafka.redist](https://www.nuget.org/packages/librdkafka.redist/) static binary that gets downloaded from `https://globalcdn.nuget.org/packages/librdkafka.redist.2.5.0.nupkg` during installation. This download link can be changed using the environment variable `NODE_RDKAFKA_NUGET_BASE_URL` that defaults to `https://globalcdn.nuget.org/packages/` when it's no set. Requirements: * [node-gyp for Windows](https://github.com/nodejs/node-gyp#on-windows) @@ -97,7 +97,7 @@ const Kafka = require('node-rdkafka'); ## Configuration -You can pass many configuration options to `librdkafka`. A full list can be found in `librdkafka`'s [Configuration.md](https://github.com/edenhill/librdkafka/blob/v2.3.0/CONFIGURATION.md) +You can pass many configuration options to `librdkafka`. A full list can be found in `librdkafka`'s [Configuration.md](https://github.com/edenhill/librdkafka/blob/v2.5.0/CONFIGURATION.md) Configuration keys that have the suffix `_cb` are designated as callbacks. Some of these keys are informational and you can choose to opt-in (for example, `dr_cb`). Others are callbacks designed to @@ -132,7 +132,7 @@ You can also get the version of `librdkafka` const Kafka = require('node-rdkafka'); console.log(Kafka.librdkafkaVersion); -// #=> 2.3.0 +// #=> 2.5.0 ``` ## Sending Messages @@ -145,7 +145,7 @@ const producer = new Kafka.Producer({ }); ``` -A `Producer` requires only `metadata.broker.list` (the Kafka brokers) to be created. The values in this list are separated by commas. For other configuration options, see the [Configuration.md](https://github.com/edenhill/librdkafka/blob/v2.3.0/CONFIGURATION.md) file described previously. +A `Producer` requires only `metadata.broker.list` (the Kafka brokers) to be created. The values in this list are separated by commas. For other configuration options, see the [Configuration.md](https://github.com/edenhill/librdkafka/blob/v2.5.0/CONFIGURATION.md) file described previously. The following example illustrates a list with several `librdkafka` options set. diff --git a/config.d.ts b/config.d.ts index e78a4141..09c69ede 100644 --- a/config.d.ts +++ b/config.d.ts @@ -1,4 +1,4 @@ -// ====== Generated from librdkafka 2.3.0 file CONFIGURATION.md ====== +// ====== Generated from librdkafka 2.5.0 file CONFIGURATION.md ====== // Code that generated this is a derivative work of the code from Nam Nguyen // https://gist.github.com/ntgn81/066c2c8ec5b4238f85d1e9168a04e3fb @@ -620,12 +620,33 @@ export interface GlobalConfig { "client.rack"?: string; /** - * Controls how the client uses DNS lookups. By default, when the lookup returns multiple IP addresses for a hostname, they will all be attempted for connection before the connection is considered failed. This applies to both bootstrap and advertised servers. If the value is set to `resolve_canonical_bootstrap_servers_only`, each entry will be resolved and expanded into a list of canonical names. NOTE: Default here is different from the Java client's default behavior, which connects only to the first IP address returned for a hostname. + * The backoff time in milliseconds before retrying a protocol request, this is the first backoff time, and will be backed off exponentially until number of retries is exhausted, and it's capped by retry.backoff.max.ms. + * + * @default 100 + */ + "retry.backoff.ms"?: number; + + /** + * The max backoff time in milliseconds before retrying a protocol request, this is the atmost backoff allowed for exponentially backed off requests. + * + * @default 1000 + */ + "retry.backoff.max.ms"?: number; + + /** + * Controls how the client uses DNS lookups. By default, when the lookup returns multiple IP addresses for a hostname, they will all be attempted for connection before the connection is considered failed. This applies to both bootstrap and advertised servers. If the value is set to `resolve_canonical_bootstrap_servers_only`, each entry will be resolved and expanded into a list of canonical names. **WARNING**: `resolve_canonical_bootstrap_servers_only` must only be used with `GSSAPI` (Kerberos) as `sasl.mechanism`, as it's the only purpose of this configuration value. **NOTE**: Default here is different from the Java client's default behavior, which connects only to the first IP address returned for a hostname. * * @default use_all_dns_ips */ "client.dns.lookup"?: 'use_all_dns_ips' | 'resolve_canonical_bootstrap_servers_only'; + /** + * Whether to enable pushing of client metrics to the cluster, if the cluster has a client metrics subscription which matches this client + * + * @default true + */ + "enable.metrics.push"?: boolean; + /** * Enables or disables `event.*` emitting. * @@ -703,20 +724,6 @@ export interface ProducerGlobalConfig extends GlobalConfig { */ "retries"?: number; - /** - * The backoff time in milliseconds before retrying a protocol request, this is the first backoff time, and will be backed off exponentially until number of retries is exhausted, and it's capped by retry.backoff.max.ms. - * - * @default 100 - */ - "retry.backoff.ms"?: number; - - /** - * The max backoff time in milliseconds before retrying a protocol request, this is the atmost backoff allowed for exponentially backed off requests. - * - * @default 1000 - */ - "retry.backoff.max.ms"?: number; - /** * The threshold of outstanding not yet transmitted broker requests needed to backpressure the producer's message accumulator. If the number of not yet transmitted requests equals or exceeds this number, produce request creation that would have otherwise been triggered (for example, in accordance with linger.ms) will be delayed. A lower number yields larger and more effective batches. A higher value can improve latency when using compression on slow machines. * @@ -810,12 +817,24 @@ export interface ConsumerGlobalConfig extends GlobalConfig { "heartbeat.interval.ms"?: number; /** - * Group protocol type. NOTE: Currently, the only supported group protocol type is `consumer`. + * Group protocol type for the `classic` group protocol. NOTE: Currently, the only supported group protocol type is `consumer`. * * @default consumer */ "group.protocol.type"?: string; + /** + * Group protocol to use. Use `classic` for the original protocol and `consumer` for the new protocol introduced in KIP-848. Available protocols: classic or consumer. Default is `classic`, but will change to `consumer` in next releases. + * + * @default classic + */ + "group.protocol"?: 'classic' | 'consumer'; + + /** + * Server side assignor to use. Keep it null to make server select a suitable assignor for the group. Available assignors: uniform or range. Default is null + */ + "group.remote.assignor"?: string; + /** * How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment. * diff --git a/deps/librdkafka b/deps/librdkafka index 95a542c8..6eaf89fb 160000 --- a/deps/librdkafka +++ b/deps/librdkafka @@ -1 +1 @@ -Subproject commit 95a542c87c61d2c45b445f91c73dd5442eb04f3c +Subproject commit 6eaf89fb124c421b66b43b195879d458a3a31f86 diff --git a/errors.d.ts b/errors.d.ts index 439ec72b..3f182d36 100644 --- a/errors.d.ts +++ b/errors.d.ts @@ -1,4 +1,4 @@ -// ====== Generated from librdkafka 2.3.0 file src-cpp/rdkafkacpp.h ====== +// ====== Generated from librdkafka 2.5.0 file src-cpp/rdkafkacpp.h ====== export const CODES: { ERRORS: { /* Internal errors to rdkafka: */ /** Begin internal error codes (**-200**) */ @@ -128,8 +128,10 @@ export const CODES: { ERRORS: { ERR__AUTO_OFFSET_RESET: number, /** Partition log truncation detected (**-139**) */ ERR__LOG_TRUNCATION: number, + /** End internal error codes (**-100**) */ ERR__END: number, + /* Kafka broker errors: */ /** Unknown broker error (**-1**) */ ERR_UNKNOWN: number, diff --git a/lib/error.js b/lib/error.js index 0f14e1e8..ba204896 100644 --- a/lib/error.js +++ b/lib/error.js @@ -27,7 +27,7 @@ LibrdKafkaError.wrap = errorWrap; * @enum {number} * @constant */ -// ====== Generated from librdkafka 2.3.0 file src-cpp/rdkafkacpp.h ====== +// ====== Generated from librdkafka 2.5.0 file src-cpp/rdkafkacpp.h ====== LibrdKafkaError.codes = { /* Internal errors to rdkafka: */ @@ -158,8 +158,10 @@ LibrdKafkaError.codes = { ERR__AUTO_OFFSET_RESET: -140, /** Partition log truncation detected */ ERR__LOG_TRUNCATION: -139, + /** End internal error codes */ ERR__END: -100, + /* Kafka broker errors: */ /** Unknown broker error */ ERR_UNKNOWN: -1, diff --git a/package-lock.json b/package-lock.json index 74b5ad03..ab680e56 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "node-rdkafka", - "version": "v3.0.1", + "version": "v3.1.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "node-rdkafka", - "version": "v3.0.1", + "version": "v3.1.0", "hasInstallScript": true, "license": "MIT", "dependencies": { diff --git a/package.json b/package.json index 080a69ce..54497902 100644 --- a/package.json +++ b/package.json @@ -1,8 +1,8 @@ { "name": "node-rdkafka", - "version": "v3.0.1", + "version": "v3.1.0", "description": "Node.js bindings for librdkafka", - "librdkafka": "2.3.0", + "librdkafka": "2.5.0", "main": "lib/index.js", "scripts": { "configure": "node-gyp configure", @@ -45,4 +45,4 @@ "engines": { "node": ">=16" } -} +} \ No newline at end of file