From be103eba4115eadee7d497aac95531a99312df1c Mon Sep 17 00:00:00 2001 From: Anna Henningsen Date: Thu, 29 Mar 2018 22:21:17 +0200 Subject: [PATCH] src: re-add `Realloc()` shrink after reading stream data This would otherwise keep a lot of unused memory lying around, and in particular add up to a page per chunk of memory overhead for network reads, potentially opening a DoS vector if the resulting `Buffer` objects are kept around indefinitely (e.g. stored in a list and not concatenated until the socket finishes). This fixes CVE-2018-7164. Refs: https://github.com/nodejs-private/security/issues/186 Refs: https://github.com/nodejs/node/commit/7c4b09b24bbe7d6a8cbad256f47b30a101a909ea PR-URL: https://github.com/nodejs-private/node-private/pull/129 Reviewed-By: Michael Dawson Reviewed-By: Evan Lucas --- src/stream_base.cc | 3 +- ...t-net-bytes-per-incoming-chunk-overhead.js | 41 +++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) create mode 100644 test/sequential/test-net-bytes-per-incoming-chunk-overhead.js diff --git a/src/stream_base.cc b/src/stream_base.cc index 1d1d324841537f..3f2e4183291d35 100644 --- a/src/stream_base.cc +++ b/src/stream_base.cc @@ -376,8 +376,9 @@ void EmitToJSStreamListener::OnStreamRead(ssize_t nread, const uv_buf_t& buf) { } CHECK_LE(static_cast(nread), buf.len); + char* base = Realloc(buf.base, nread); - Local obj = Buffer::New(env, buf.base, nread).ToLocalChecked(); + Local obj = Buffer::New(env, base, nread).ToLocalChecked(); stream->CallJSOnreadMethod(nread, obj); } diff --git a/test/sequential/test-net-bytes-per-incoming-chunk-overhead.js b/test/sequential/test-net-bytes-per-incoming-chunk-overhead.js new file mode 100644 index 00000000000000..8f766e8c7a4106 --- /dev/null +++ b/test/sequential/test-net-bytes-per-incoming-chunk-overhead.js @@ -0,0 +1,41 @@ +// Flags: --expose-gc +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const net = require('net'); + +// Tests that, when receiving small chunks, we do not keep the full length +// of the original allocation for the libuv read call in memory. + +let client; +let baseRSS; +const receivedChunks = []; +const N = 250000; + +const server = net.createServer(common.mustCall((socket) => { + baseRSS = process.memoryUsage().rss; + + socket.setNoDelay(true); + socket.on('data', (chunk) => { + receivedChunks.push(chunk); + if (receivedChunks.length < N) { + client.write('a'); + } else { + client.end(); + server.close(); + } + }); +})).listen(0, common.mustCall(() => { + client = net.connect(server.address().port); + client.setNoDelay(true); + client.write('hello!'); +})); + +process.on('exit', () => { + global.gc(); + const bytesPerChunk = + (process.memoryUsage().rss - baseRSS) / receivedChunks.length; + // We should always have less than one page (usually ~ 4 kB) per chunk. + assert(bytesPerChunk < 512, `measured ${bytesPerChunk} bytes per chunk`); +});