From a17bf43fdd0dc3633819e4b44bd612382b1e84f9 Mon Sep 17 00:00:00 2001 From: Marc Bachmann Date: Tue, 14 Dec 2021 15:06:21 +0100 Subject: [PATCH] feat: Upgrade to opentelemetry@0.27, keeps compatibility with v0.24 --- example/index.js | 9 +- example/package-lock.json | 216 +++++++++++++++++++------- example/package.json | 5 +- index.js | 17 +- metrics/eventLoopLag.js | 30 ++-- metrics/gc.js | 2 +- metrics/heapSizeAndUsed.js | 12 +- metrics/heapSpacesSizeAndUsed.js | 42 +++-- metrics/osMemoryHeap.js | 2 +- metrics/osMemoryHeapLinux.js | 12 +- metrics/processCpuTotal.js | 6 +- metrics/processHandles.js | 4 +- metrics/processOpenFileDescriptors.js | 2 +- metrics/processRequests.js | 4 +- 14 files changed, 242 insertions(+), 121 deletions(-) diff --git a/example/index.js b/example/index.js index 0ee480f..ec254a7 100644 --- a/example/index.js +++ b/example/index.js @@ -1,7 +1,7 @@ -const {MeterProvider} = require('@opentelemetry/metrics') +const {MeterProvider} = require('@opentelemetry/sdk-metrics-base') const {PrometheusExporter} = require('@opentelemetry/exporter-prometheus') -const exporter = new PrometheusExporter({startServer: true}, () => { +const exporter = new PrometheusExporter({port: 9464, startServer: true}, () => { // eslint-disable-next-line no-console console.log( `Prometheus scrape endpoint: http://localhost:%s%s`, @@ -16,3 +16,8 @@ const meterProvider = new MeterProvider({ }) require('opentelemetry-node-metrics')(meterProvider) + + +// With opentelemetry 0.27, the proecss somehow doesn't keep any open handles +// and stops without that +setInterval(() => {}, 1000) diff --git a/example/package-lock.json b/example/package-lock.json index f5c41e5..a2de1ad 100644 --- a/example/package-lock.json +++ b/example/package-lock.json @@ -1,84 +1,184 @@ { "name": "opentelemetry-node-metrics-example", "version": "1.0.0", - "lockfileVersion": 1, + "lockfileVersion": 2, "requires": true, + "packages": { + "": { + "name": "opentelemetry-node-metrics-example", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "@opentelemetry/exporter-prometheus": "^0.27.0", + "@opentelemetry/sdk-metrics-base": "^0.27.0", + "opentelemetry-node-metrics": "file:.." + } + }, + "..": { + "version": "0.0.0-placeholder", + "license": "Apache-2.0" + }, + "node_modules/@opentelemetry/api": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.0.3.tgz", + "integrity": "sha512-puWxACExDe9nxbBB3lOymQFrLYml2dVOrd7USiVRnSbgXE+KwBu+HxFvxrzfqsiSda9IWsXJG1ef7C1O2/GmKQ==", + "peer": true, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/api-metrics": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-metrics/-/api-metrics-0.27.0.tgz", + "integrity": "sha512-tB79288bwjkdhPNpw4UdOEy3bacVwtol6Que7cAu8KEJ9ULjRfSiwpYEwJY/oER3xZ7zNFz0uiJ7N1jSiotpVA==", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/core": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.0.1.tgz", + "integrity": "sha512-90nQ2X6b/8X+xjcLDBYKooAcOsIlwLRYm+1VsxcX5cHl6V4CSVmDpBreQSDH/A21SqROzapk6813008SatmPpQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "1.0.1" + }, + "engines": { + "node": ">=8.5.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.1.0" + } + }, + "node_modules/@opentelemetry/exporter-prometheus": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-prometheus/-/exporter-prometheus-0.27.0.tgz", + "integrity": "sha512-fbdV+iQAv/WNhiv57C6+9Lwyhc6yJuDy3eyYSyPSVPgCyclGS3fvyTaWNwPbrkE53W8zZHPm+88rAaeQgEjo0w==", + "dependencies": { + "@opentelemetry/api-metrics": "0.27.0", + "@opentelemetry/core": "1.0.1", + "@opentelemetry/sdk-metrics-base": "0.27.0" + }, + "engines": { + "node": ">=8.0.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/sdk-metrics-base": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics-base/-/sdk-metrics-base-0.27.0.tgz", + "integrity": "sha512-HpiWI4sVNsjp3FGyUlc24KvUY2Whl4PQVwcbA/gWv2kHaLQrDJrWC+3rjUR+87Mrd0nsiqJ85xhGFU6IK8h7gg==", + "dependencies": { + "@opentelemetry/api-metrics": "0.27.0", + "@opentelemetry/core": "1.0.1", + "@opentelemetry/resources": "1.0.1", + "lodash.merge": "^4.6.2" + }, + "engines": { + "node": ">=8.0.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/sdk-metrics-base/node_modules/@opentelemetry/resources": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.0.1.tgz", + "integrity": "sha512-p8DevOaAEepPucUtImR4cZKHOE2L1jgQAtkdZporV+XnxPA/HqCHPEESyUVuo4f5M0NUlL6k5Pba75KwNJlTRg==", + "dependencies": { + "@opentelemetry/core": "1.0.1", + "@opentelemetry/semantic-conventions": "1.0.1" + }, + "engines": { + "node": ">=8.0.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.1.0" + } + }, + "node_modules/@opentelemetry/semantic-conventions": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.0.1.tgz", + "integrity": "sha512-7XU1sfQ8uCVcXLxtAHA8r3qaLJ2oq7sKtEwzZhzuEXqYmjW+n+J4yM3kNo0HQo3Xp1eUe47UM6Wy6yuAvIyllg==", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==" + }, + "node_modules/opentelemetry-node-metrics": { + "resolved": "..", + "link": true + } + }, "dependencies": { "@opentelemetry/api": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-0.14.0.tgz", - "integrity": "sha512-L7RMuZr5LzMmZiQSQDy9O1jo0q+DaLy6XpYJfIGfYSfoJA5qzYwUP3sP1uMIQ549DvxAgM3ng85EaPTM/hUHwQ==", - "requires": { - "@opentelemetry/context-base": "^0.14.0" - } + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.0.3.tgz", + "integrity": "sha512-puWxACExDe9nxbBB3lOymQFrLYml2dVOrd7USiVRnSbgXE+KwBu+HxFvxrzfqsiSda9IWsXJG1ef7C1O2/GmKQ==", + "peer": true }, - "@opentelemetry/context-base": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/context-base/-/context-base-0.14.0.tgz", - "integrity": "sha512-sDOAZcYwynHFTbLo6n8kIbLiVF3a3BLkrmehJUyEbT9F+Smbi47kLGS2gG2g0fjBLR/Lr1InPD7kXL7FaTqEkw==" + "@opentelemetry/api-metrics": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-metrics/-/api-metrics-0.27.0.tgz", + "integrity": "sha512-tB79288bwjkdhPNpw4UdOEy3bacVwtol6Que7cAu8KEJ9ULjRfSiwpYEwJY/oER3xZ7zNFz0uiJ7N1jSiotpVA==" }, "@opentelemetry/core": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-0.14.0.tgz", - "integrity": "sha512-HJ4VM0cV6c5qjdW7C7koB2IT4ADunCOehxnKFRslQkbDqAEA1w42AZ9679siYALpWYxNqcJyqF2jxCNtfNHa6Q==", + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.0.1.tgz", + "integrity": "sha512-90nQ2X6b/8X+xjcLDBYKooAcOsIlwLRYm+1VsxcX5cHl6V4CSVmDpBreQSDH/A21SqROzapk6813008SatmPpQ==", "requires": { - "@opentelemetry/api": "^0.14.0", - "@opentelemetry/context-base": "^0.14.0", - "semver": "^7.1.3" + "@opentelemetry/semantic-conventions": "1.0.1" } }, "@opentelemetry/exporter-prometheus": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-prometheus/-/exporter-prometheus-0.14.0.tgz", - "integrity": "sha512-pgNqKxk3AHugW5niVqAmcdcIuGTUYjK0ylIN5x2FWxLExVnD1aj9ZEsuNfl7vfr1sxBNJ4jhTVV4ixVirmKFeQ==", + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-prometheus/-/exporter-prometheus-0.27.0.tgz", + "integrity": "sha512-fbdV+iQAv/WNhiv57C6+9Lwyhc6yJuDy3eyYSyPSVPgCyclGS3fvyTaWNwPbrkE53W8zZHPm+88rAaeQgEjo0w==", "requires": { - "@opentelemetry/api": "^0.14.0", - "@opentelemetry/core": "^0.14.0", - "@opentelemetry/metrics": "^0.14.0" + "@opentelemetry/api-metrics": "0.27.0", + "@opentelemetry/core": "1.0.1", + "@opentelemetry/sdk-metrics-base": "0.27.0" } }, - "@opentelemetry/metrics": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/metrics/-/metrics-0.14.0.tgz", - "integrity": "sha512-gIC/ykE7V+5qKybSHr+83ATTaXEZLUdADLjLmd8ZIu1+I7dx8ZM+OJCmWyGyZppD1ZlclvhybQDUQOCRKpWMWQ==", + "@opentelemetry/sdk-metrics-base": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics-base/-/sdk-metrics-base-0.27.0.tgz", + "integrity": "sha512-HpiWI4sVNsjp3FGyUlc24KvUY2Whl4PQVwcbA/gWv2kHaLQrDJrWC+3rjUR+87Mrd0nsiqJ85xhGFU6IK8h7gg==", "requires": { - "@opentelemetry/api": "^0.14.0", - "@opentelemetry/core": "^0.14.0", - "@opentelemetry/resources": "^0.14.0" + "@opentelemetry/api-metrics": "0.27.0", + "@opentelemetry/core": "1.0.1", + "@opentelemetry/resources": "1.0.1", + "lodash.merge": "^4.6.2" + }, + "dependencies": { + "@opentelemetry/resources": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.0.1.tgz", + "integrity": "sha512-p8DevOaAEepPucUtImR4cZKHOE2L1jgQAtkdZporV+XnxPA/HqCHPEESyUVuo4f5M0NUlL6k5Pba75KwNJlTRg==", + "requires": { + "@opentelemetry/core": "1.0.1", + "@opentelemetry/semantic-conventions": "1.0.1" + } + } } }, - "@opentelemetry/resources": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-0.14.0.tgz", - "integrity": "sha512-7XVML4HxvoH6kWY+x0mhMc5m0a2YBvPCTSX7yAqyp9XIGvFpdjzAE2ggJ40DZrL1sPv9f0QYAbnIKFDVLBTfGA==", - "requires": { - "@opentelemetry/api": "^0.14.0", - "@opentelemetry/core": "^0.14.0" - } + "@opentelemetry/semantic-conventions": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.0.1.tgz", + "integrity": "sha512-7XU1sfQ8uCVcXLxtAHA8r3qaLJ2oq7sKtEwzZhzuEXqYmjW+n+J4yM3kNo0HQo3Xp1eUe47UM6Wy6yuAvIyllg==" }, - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "requires": { - "yallist": "^4.0.0" - } + "lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==" }, "opentelemetry-node-metrics": { "version": "file:.." - }, - "semver": { - "version": "7.3.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.4.tgz", - "integrity": "sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw==", - "requires": { - "lru-cache": "^6.0.0" - } - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" } } } diff --git a/example/package.json b/example/package.json index ec6e53e..f4d5088 100644 --- a/example/package.json +++ b/example/package.json @@ -4,11 +4,10 @@ "description": "", "main": "index.js", "dependencies": { - "@opentelemetry/exporter-prometheus": "^0.14.0", - "@opentelemetry/metrics": "^0.14.0", + "@opentelemetry/exporter-prometheus": "^0.27.0", + "@opentelemetry/sdk-metrics-base": "^0.27.0", "opentelemetry-node-metrics": "file:.." }, - "devDependencies": {}, "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, diff --git a/index.js b/index.js index fde0e88..f69081b 100644 --- a/index.js +++ b/index.js @@ -3,7 +3,20 @@ module.exports = function setupNodeMetrics (meterProvider, config) { config.prefix = config.prefix ? config.prefix : '' config.labels = config.labels ? config.labels : {} - const meter = meterProvider.getMeter('opentelemetry-node-metrics') + let meter = meterProvider.getMeter('opentelemetry-node-metrics') + + // keep opentelemetry compatibility with v0.24.x + if (!meter.createObservableGauge) { + meter = { + createObservableGauge: meter.createValueObserver.bind(meter), + createHistogram: meter.createValueRecorder.bind(meter), + createCounter: meter.createCounter.bind(meter), + createUpDownCounter: meter.createUpDownCounter.bind(meter) + } + } + + require('./metrics/version')(meter, config) + require('./metrics/processStartTime')(meter, config) require('./metrics/eventLoopLag')(meter, config) require('./metrics/gc')(meter, config) require('./metrics/heapSizeAndUsed')(meter, config) @@ -14,6 +27,4 @@ module.exports = function setupNodeMetrics (meterProvider, config) { require('./metrics/processMaxFileDescriptors')(meter, config) require('./metrics/processOpenFileDescriptors')(meter, config) require('./metrics/processRequests')(meter, config) - require('./metrics/processStartTime')(meter, config) - require('./metrics/version')(meter, config) } diff --git a/metrics/eventLoopLag.js b/metrics/eventLoopLag.js index dc01c4f..9d18270 100644 --- a/metrics/eventLoopLag.js +++ b/metrics/eventLoopLag.js @@ -20,47 +20,44 @@ module.exports = (meter, {prefix, labels, eventLoopMonitoringPrecision}) => { histogram.enable() - const lag = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG, { + const lag = meter.createObservableGauge(prefix + NODEJS_EVENTLOOP_LAG, { description: 'Lag of event loop in seconds.' }).bind(labels) - const lagMin = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_MIN, { + const lagMin = meter.createObservableGauge(prefix + NODEJS_EVENTLOOP_LAG_MIN, { description: 'The minimum recorded event loop delay.' }).bind(labels) - const lagMax = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_MAX, { + const lagMax = meter.createObservableGauge(prefix + NODEJS_EVENTLOOP_LAG_MAX, { description: 'The maximum recorded event loop delay.' }).bind(labels) - const lagMean = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_MEAN, { + const lagMean = meter.createObservableGauge(prefix + NODEJS_EVENTLOOP_LAG_MEAN, { description: 'The mean of the recorded event loop delays.' }).bind(labels) - const lagStddev = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_STDDEV, { + const lagStddev = meter.createObservableGauge(prefix + NODEJS_EVENTLOOP_LAG_STDDEV, { description: 'The standard deviation of the recorded event loop delays.' }).bind(labels) - const lagP50 = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_P50, { + const lagP50 = meter.createObservableGauge(prefix + NODEJS_EVENTLOOP_LAG_P50, { description: 'The 50th percentile of the recorded event loop delays.' }).bind(labels) - const lagP90 = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_P90, { + const lagP90 = meter.createObservableGauge(prefix + NODEJS_EVENTLOOP_LAG_P90, { description: 'The 90th percentile of the recorded event loop delays.' }).bind(labels) - const lagP99 = meter.createValueObserver(prefix + NODEJS_EVENTLOOP_LAG_P99, { + const lagP99 = meter.createObservableGauge(prefix + NODEJS_EVENTLOOP_LAG_P99, { description: 'The 99th percentile of the recorded event loop delays.' - }).bind(labels) + }, async () => { + const startTime = process.hrtime() + await new Promise((resolve) => setImmediate(() => resolve())) - function reportEventloopLag (start) { - const delta = process.hrtime(start) + const delta = process.hrtime(startTime) const nanosec = (delta[0] * 1e9) + delta[1] const seconds = nanosec / 1e9 lag.update(seconds) - } - - meter.createBatchObserver((observerBatchResult) => { - setImmediate(reportEventloopLag, process.hrtime()) lagMin.update(histogram.min / 1e9) lagMax.update(histogram.max / 1e9) @@ -69,8 +66,9 @@ module.exports = (meter, {prefix, labels, eventLoopMonitoringPrecision}) => { lagP50.update(histogram.percentile(50) / 1e9) lagP90.update(histogram.percentile(90) / 1e9) lagP99.update(histogram.percentile(99) / 1e9) - }) + }).bind(labels) + lag.update(0) } module.exports.metricNames = [ diff --git a/metrics/gc.js b/metrics/gc.js index b20f662..b8a85f8 100644 --- a/metrics/gc.js +++ b/metrics/gc.js @@ -7,7 +7,7 @@ const DEFAULT_GC_DURATION_BUCKETS = [0.001, 0.01, 0.1, 1, 2, 5] module.exports = (meter, {prefix, labels, gcDurationBuckets}) => { const boundaries = gcDurationBuckets || DEFAULT_GC_DURATION_BUCKETS - const histogram = meter.createValueRecorder(prefix + NODEJS_GC_DURATION_SECONDS, { + const histogram = meter.createHistogram(prefix + NODEJS_GC_DURATION_SECONDS, { description: 'Garbage collection duration by kind, one of major, minor, incremental or weakcb.', boundaries }) diff --git a/metrics/heapSizeAndUsed.js b/metrics/heapSizeAndUsed.js index 7b0004e..6534239 100644 --- a/metrics/heapSizeAndUsed.js +++ b/metrics/heapSizeAndUsed.js @@ -5,25 +5,23 @@ const NODEJS_HEAP_SIZE_USED = 'nodejs_heap_size_used_bytes' const NODEJS_EXTERNAL_MEMORY = 'nodejs_external_memory_bytes' module.exports = (meter, {labels, prefix}) => { - const heapSizeTotal = meter.createValueObserver(prefix + NODEJS_HEAP_SIZE_TOTAL, { + const heapSizeTotal = meter.createObservableGauge(prefix + NODEJS_HEAP_SIZE_TOTAL, { description: 'Process heap size from Node.js in bytes.' }).bind(labels) - const heapSizeUsed = meter.createValueObserver(prefix + NODEJS_HEAP_SIZE_USED, { + const heapSizeUsed = meter.createObservableGauge(prefix + NODEJS_HEAP_SIZE_USED, { description: 'Process heap size used from Node.js in bytes.' }).bind(labels) - const externalMemUsed = meter.createValueObserver(prefix + NODEJS_EXTERNAL_MEMORY, { + const externalMemUsed = meter.createObservableGauge(prefix + NODEJS_EXTERNAL_MEMORY, { description: 'Node.js external memory size in bytes.' - }).bind(labels) - - meter.createBatchObserver(() => { + }, () => { const memUsage = safeMemoryUsage() if (!memUsage) return heapSizeTotal.update(memUsage.heapTotal) heapSizeUsed.update(memUsage.heapUsed) if (memUsage.external !== undefined) externalMemUsed.update(memUsage.external) - }) + }).bind(labels) } module.exports.metricNames = [ diff --git a/metrics/heapSpacesSizeAndUsed.js b/metrics/heapSpacesSizeAndUsed.js index e0351dc..ec67c38 100644 --- a/metrics/heapSpacesSizeAndUsed.js +++ b/metrics/heapSpacesSizeAndUsed.js @@ -8,21 +8,35 @@ for (const metricType of METRICS) { } module.exports = (meter, {prefix, labels}) => { - const gauges = {} - for (const metricType of METRICS) { - gauges[metricType] = meter.createValueObserver(prefix + NODEJS_HEAP_SIZE[metricType], { - description: `Process heap space size ${metricType} from Node.js in bytes.` - }) - } - - meter.createBatchObserver((observerBatchResult) => { + const boundMetricsBySpace = {} + + const total = meter.createObservableGauge(prefix + NODEJS_HEAP_SIZE.total, { + description: `Process heap space size total from Node.js in bytes.` + }) + + const used = meter.createObservableGauge(prefix + NODEJS_HEAP_SIZE.used, { + description: `Process heap space size used from Node.js in bytes.` + }) + + const available = meter.createObservableGauge(prefix + NODEJS_HEAP_SIZE.available, { + description: `Process heap space size available from Node.js in bytes.` + }, () => { for (const space of v8.getHeapSpaceStatistics()) { - const spaceName = space.space_name.substr(0, space.space_name.indexOf('_space')) - observerBatchResult.observe({space: spaceName, ...labels}, [ - gauges.total.observation(space.space_size), - gauges.used.observation(space.space_used_size), - gauges.available.observation(space.space_available_size) - ]) + let bound = boundMetricsBySpace[space.space_name] + if (!bound) { + const spaceName = space.space_name.substr(0, space.space_name.indexOf('_space')) + boundMetricsBySpace[space.space_name] = { + total: total.bind({...labels, space: spaceName}), + used: used.bind({...labels, space: spaceName}), + available: available.bind({...labels, space: spaceName}) + } + + bound = boundMetricsBySpace[space.space_name] + } + + bound.total.update(space.space_size) + bound.used.update(space.space_used_size) + bound.available.update(space.space_available_size) } }) } diff --git a/metrics/osMemoryHeap.js b/metrics/osMemoryHeap.js index b3cb3b8..ce4509d 100644 --- a/metrics/osMemoryHeap.js +++ b/metrics/osMemoryHeap.js @@ -4,7 +4,7 @@ const safeMemoryUsage = require('./helpers/safeMemoryUsage') const PROCESS_RESIDENT_MEMORY = 'process_resident_memory_bytes' function notLinuxVariant (meter, {prefix, labels}) { - const boundMeter = meter.createValueObserver(prefix + PROCESS_RESIDENT_MEMORY, { + const boundMeter = meter.createObservableGauge(prefix + PROCESS_RESIDENT_MEMORY, { description: 'Resident memory size in bytes.' }, () => { const memUsage = safeMemoryUsage() diff --git a/metrics/osMemoryHeapLinux.js b/metrics/osMemoryHeapLinux.js index e4d8cb4..ca9576e 100644 --- a/metrics/osMemoryHeapLinux.js +++ b/metrics/osMemoryHeapLinux.js @@ -29,19 +29,17 @@ function structureOutput (input) { } module.exports = (meter, {prefix, labels}) => { - const residentMemGauge = meter.createValueObserver(prefix + PROCESS_RESIDENT_MEMORY, { + const residentMemGauge = meter.createObservableGauge(prefix + PROCESS_RESIDENT_MEMORY, { description: 'Resident memory size in bytes.' }).bind(labels) - const virtualMemGauge = meter.createValueObserver(prefix + PROCESS_VIRTUAL_MEMORY, { + const virtualMemGauge = meter.createObservableGauge(prefix + PROCESS_VIRTUAL_MEMORY, { description: 'Virtual memory size in bytes.' }).bind(labels) - const heapSizeMemGauge = meter.createValueObserver(prefix + PROCESS_HEAP, { + const heapSizeMemGauge = meter.createObservableGauge(prefix + PROCESS_HEAP, { description: 'Process heap size in bytes.' - }).bind(labels) - - meter.createBatchObserver(() => { + }, () => { try { // Sync I/O is often problematic, but /proc isn't really I/O, it // a virtual filesystem that maps directly to in-kernel data @@ -58,7 +56,7 @@ module.exports = (meter, {prefix, labels}) => { } catch { // noop } - }) + }).bind(labels) } module.exports.metricNames = [ diff --git a/metrics/processCpuTotal.js b/metrics/processCpuTotal.js index 7d5c325..8104176 100644 --- a/metrics/processCpuTotal.js +++ b/metrics/processCpuTotal.js @@ -16,9 +16,7 @@ module.exports = (meter, {prefix, labels}) => { const cpuUsageCounter = meter.createCounter(prefix + PROCESS_CPU_SECONDS, { description: 'Total user and system CPU time spent in seconds.' - }).bind(labels) - - meter.createBatchObserver(() => { + }, () => { const cpuUsage = process.cpuUsage() const userUsageMicros = cpuUsage.user - lastCpuUsage.user const systemUsageMicros = cpuUsage.system - lastCpuUsage.system @@ -27,7 +25,7 @@ module.exports = (meter, {prefix, labels}) => { cpuUserUsageCounter.add(userUsageMicros / 1e6) cpuSystemUsageCounter.add(systemUsageMicros / 1e6) cpuUsageCounter.add((userUsageMicros + systemUsageMicros) / 1e6) - }) + }).bind(labels) } module.exports.metricNames = [ diff --git a/metrics/processHandles.js b/metrics/processHandles.js index f285579..adf1d01 100644 --- a/metrics/processHandles.js +++ b/metrics/processHandles.js @@ -9,13 +9,13 @@ module.exports = (meter, {prefix, labels}) => { if (typeof process._getActiveHandles !== 'function') return const aggregateByObjectName = createAggregatorByObjectName() - const activeHandlesMetric = meter.createValueObserver(prefix + NODEJS_ACTIVE_HANDLES, { + const activeHandlesMetric = meter.createObservableGauge(prefix + NODEJS_ACTIVE_HANDLES, { description: 'Number of active libuv handles grouped by handle type. Every handle type is C++ class name.' // eslint-disable-line max-len }, () => { aggregateByObjectName(activeHandlesMetric, labels, process._getActiveHandles()) }) - const boundTotalMetric = meter.createValueObserver(prefix + NODEJS_ACTIVE_HANDLES_TOTAL, { + const boundTotalMetric = meter.createObservableGauge(prefix + NODEJS_ACTIVE_HANDLES_TOTAL, { description: 'Total number of active handles.' }, () => { const handles = process._getActiveHandles() diff --git a/metrics/processOpenFileDescriptors.js b/metrics/processOpenFileDescriptors.js index d57752f..4ded68e 100644 --- a/metrics/processOpenFileDescriptors.js +++ b/metrics/processOpenFileDescriptors.js @@ -6,7 +6,7 @@ const PROCESS_OPEN_FDS = 'process_open_fds' module.exports = (meter, {prefix, labels}) => { if (process.platform !== 'linux') return - const boundInstrument = meter.createValueObserver(prefix + PROCESS_OPEN_FDS, { + const boundInstrument = meter.createObservableGauge(prefix + PROCESS_OPEN_FDS, { description: 'Number of open file descriptors.' }, () => { try { diff --git a/metrics/processRequests.js b/metrics/processRequests.js index 478ae06..fafe01b 100644 --- a/metrics/processRequests.js +++ b/metrics/processRequests.js @@ -9,13 +9,13 @@ module.exports = (meter, {prefix, labels}) => { if (typeof process._getActiveRequests !== 'function') return const aggregateByObjectName = createAggregatorByObjectName() - const activeRequestsMetric = meter.createValueObserver(prefix + NODEJS_ACTIVE_REQUESTS, { + const activeRequestsMetric = meter.createObservableGauge(prefix + NODEJS_ACTIVE_REQUESTS, { description: 'Number of active libuv requests grouped by request type. Every request type is C++ class name.' // eslint-disable-line max-len }, () => { aggregateByObjectName(activeRequestsMetric, labels, process._getActiveRequests()) }) - const boundTotalRequests = meter.createValueObserver(prefix + NODEJS_ACTIVE_REQUESTS_TOTAL, { + const boundTotalRequests = meter.createObservableGauge(prefix + NODEJS_ACTIVE_REQUESTS_TOTAL, { description: 'Total number of active requests.' }, () => { boundTotalRequests.update(process._getActiveRequests().length)