Skip to content

Commit

Permalink
net: fix timeouts during long writes
Browse files Browse the repository at this point in the history
Add updateWriteQueueSize which updates and returns queue size
(net & tls). Make _onTimeout check whether an active write
is ongoing and if so, call _unrefTimer rather than emitting
a timeout event.

Add http & https test that checks whether long-lasting (but
active) writes timeout or can finish writing as expected.

PR-URL: nodejs#15791
Fixes: nodejs#15082
Reviewed-By: Anna Henningsen <anna@addaleax.net>
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Reviewed-By: Refael Ackermann <refack@gmail.com>
Reviewed-By: Fedor Indutny <fedor.indutny@gmail.com>
  • Loading branch information
apapirovski committed Dec 12, 2017
1 parent 7533f59 commit 3b5ac77
Show file tree
Hide file tree
Showing 7 changed files with 210 additions and 5 deletions.
8 changes: 8 additions & 0 deletions lib/net.js
Original file line number Diff line number Diff line change
Expand Up @@ -334,6 +334,14 @@ Socket.prototype.setTimeout = function(msecs, callback) {


Socket.prototype._onTimeout = function() {
// `.prevWriteQueueSize` !== `.updateWriteQueueSize()` means there is
// an active write in progress, so we suppress the timeout.
const prevWriteQueueSize = this._handle.writeQueueSize;
if (prevWriteQueueSize > 0 &&
prevWriteQueueSize !== this._handle.updateWriteQueueSize()) {
this._unrefTimer();
return;
}
debug('_onTimeout');
this.emit('timeout');
};
Expand Down
22 changes: 18 additions & 4 deletions src/stream_wrap.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ StreamWrap::StreamWrap(Environment* env,
void StreamWrap::AddMethods(Environment* env,
v8::Local<v8::FunctionTemplate> target,
int flags) {
env->SetProtoMethod(target, "updateWriteQueueSize", UpdateWriteQueueSize);
env->SetProtoMethod(target, "setBlocking", SetBlocking);
StreamBase::AddMethods<StreamWrap>(env, target, flags);
}
Expand Down Expand Up @@ -113,11 +114,14 @@ bool StreamWrap::IsIPCPipe() {
}


void StreamWrap::UpdateWriteQueueSize() {
uint32_t StreamWrap::UpdateWriteQueueSize() {
HandleScope scope(env()->isolate());
Local<Integer> write_queue_size =
Integer::NewFromUnsigned(env()->isolate(), stream()->write_queue_size);
object()->Set(env()->write_queue_size_string(), write_queue_size);
uint32_t write_queue_size = stream()->write_queue_size;
object()->Set(env()->context(),
env()->write_queue_size_string(),
Integer::NewFromUnsigned(env()->isolate(),
write_queue_size)).FromJust();
return write_queue_size;
}


Expand Down Expand Up @@ -242,6 +246,16 @@ void StreamWrap::OnRead(uv_stream_t* handle,
}


void StreamWrap::UpdateWriteQueueSize(
const FunctionCallbackInfo<Value>& args) {
StreamWrap* wrap;
ASSIGN_OR_RETURN_UNWRAP(&wrap, args.Holder());

uint32_t write_queue_size = wrap->UpdateWriteQueueSize();
args.GetReturnValue().Set(write_queue_size);
}


void StreamWrap::SetBlocking(const FunctionCallbackInfo<Value>& args) {
StreamWrap* wrap;
ASSIGN_OR_RETURN_UNWRAP(&wrap, args.Holder());
Expand Down
4 changes: 3 additions & 1 deletion src/stream_wrap.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,13 +67,15 @@ class StreamWrap : public HandleWrap, public StreamBase {
}

AsyncWrap* GetAsyncWrap() override;
void UpdateWriteQueueSize();
uint32_t UpdateWriteQueueSize();

static void AddMethods(Environment* env,
v8::Local<v8::FunctionTemplate> target,
int flags = StreamBase::kFlagNone);

private:
static void UpdateWriteQueueSize(
const v8::FunctionCallbackInfo<v8::Value>& args);
static void SetBlocking(const v8::FunctionCallbackInfo<v8::Value>& args);

// Callbacks for libuv
Expand Down
10 changes: 10 additions & 0 deletions src/tls_wrap.cc
Original file line number Diff line number Diff line change
Expand Up @@ -911,6 +911,15 @@ int TLSWrap::SelectSNIContextCallback(SSL* s, int* ad, void* arg) {
#endif // SSL_CTRL_SET_TLSEXT_SERVERNAME_CB


void TLSWrap::UpdateWriteQueueSize(const FunctionCallbackInfo<Value>& args) {
TLSWrap* wrap;
ASSIGN_OR_RETURN_UNWRAP(&wrap, args.Holder());

uint32_t write_queue_size = wrap->UpdateWriteQueueSize();
args.GetReturnValue().Set(write_queue_size);
}


void TLSWrap::Initialize(Local<Object> target,
Local<Value> unused,
Local<Context> context) {
Expand All @@ -932,6 +941,7 @@ void TLSWrap::Initialize(Local<Object> target,
env->SetProtoMethod(t, "enableSessionCallbacks", EnableSessionCallbacks);
env->SetProtoMethod(t, "destroySSL", DestroySSL);
env->SetProtoMethod(t, "enableCertCb", EnableCertCb);
env->SetProtoMethod(t, "updateWriteQueueSize", UpdateWriteQueueSize);

StreamBase::AddMethods<TLSWrap>(env, t, StreamBase::kFlagHasWritev);
SSLWrap<TLSWrap>::AddMethods(env, t);
Expand Down
4 changes: 4 additions & 0 deletions src/tls_wrap.h
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,10 @@ class TLSWrap : public AsyncWrap,
// If true - delivered EOF to the js-land, either after `close_notify`, or
// after the `UV_EOF` on socket.
bool eof_;

private:
static void UpdateWriteQueueSize(
const v8::FunctionCallbackInfo<v8::Value>& args);
};

} // namespace node
Expand Down
80 changes: 80 additions & 0 deletions test/sequential/test-http-keep-alive-large-write.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
'use strict';
const common = require('../common');
const assert = require('assert');
const http = require('http');

// This test assesses whether long-running writes can complete
// or timeout because the socket is not aware that the backing
// stream is still writing.
// To simulate a slow client, we write a really large chunk and
// then proceed through the following cycle:
// 1) Receive first 'data' event and record currently written size
// 2) Once we've read up to currently written size recorded above,
// we pause the stream and wait longer than the server timeout
// 3) Socket.prototype._onTimeout triggers and should confirm
// that the backing stream is still active and writing
// 4) Our timer fires, we resume the socket and start at 1)

const minReadSize = 250000;
const serverTimeout = common.platformTimeout(500);
let offsetTimeout = common.platformTimeout(100);
let serverConnectionHandle;
let writeSize = 3000000;
let didReceiveData = false;
// this represents each cycles write size, where the cycle consists
// of `write > read > _onTimeout`
let currentWriteSize = 0;

const server = http.createServer(common.mustCall((req, res) => {
const content = Buffer.alloc(writeSize, 0x44);

res.writeHead(200, {
'Content-Type': 'application/octet-stream',
'Content-Length': content.length.toString(),
'Vary': 'Accept-Encoding'
});

serverConnectionHandle = res.socket._handle;
res.write(content);
res.end();
}));
server.setTimeout(serverTimeout);
server.on('timeout', () => {
assert.strictEqual(didReceiveData, false, 'Should not timeout');
});

server.listen(0, common.mustCall(() => {
http.get({
path: '/',
port: server.address().port
}, common.mustCall((res) => {
const resume = () => res.resume();
let receivedBufferLength = 0;
let firstReceivedAt;
res.on('data', common.mustCallAtLeast((buf) => {
if (receivedBufferLength === 0) {
currentWriteSize = Math.max(
minReadSize,
writeSize - serverConnectionHandle.writeQueueSize
);
didReceiveData = false;
firstReceivedAt = Date.now();
}
receivedBufferLength += buf.length;
if (receivedBufferLength >= currentWriteSize) {
didReceiveData = true;
writeSize = serverConnectionHandle.writeQueueSize;
receivedBufferLength = 0;
res.pause();
setTimeout(
resume,
serverTimeout + offsetTimeout - (Date.now() - firstReceivedAt)
);
offsetTimeout = 0;
}
}, 1));
res.on('end', common.mustCall(() => {
server.close();
}));
}));
}));
87 changes: 87 additions & 0 deletions test/sequential/test-https-keep-alive-large-write.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
'use strict';
const common = require('../common');
if (!common.hasCrypto)
common.skip('missing crypto');
const assert = require('assert');
const fixtures = require('../common/fixtures');
const https = require('https');

// This test assesses whether long-running writes can complete
// or timeout because the socket is not aware that the backing
// stream is still writing.
// To simulate a slow client, we write a really large chunk and
// then proceed through the following cycle:
// 1) Receive first 'data' event and record currently written size
// 2) Once we've read up to currently written size recorded above,
// we pause the stream and wait longer than the server timeout
// 3) Socket.prototype._onTimeout triggers and should confirm
// that the backing stream is still active and writing
// 4) Our timer fires, we resume the socket and start at 1)

const minReadSize = 250000;
const serverTimeout = common.platformTimeout(500);
let offsetTimeout = common.platformTimeout(100);
let serverConnectionHandle;
let writeSize = 2000000;
let didReceiveData = false;
// this represents each cycles write size, where the cycle consists
// of `write > read > _onTimeout`
let currentWriteSize = 0;

const server = https.createServer({
key: fixtures.readKey('agent1-key.pem'),
cert: fixtures.readKey('agent1-cert.pem')
}, common.mustCall((req, res) => {
const content = Buffer.alloc(writeSize, 0x44);

res.writeHead(200, {
'Content-Type': 'application/octet-stream',
'Content-Length': content.length.toString(),
'Vary': 'Accept-Encoding'
});

serverConnectionHandle = res.socket._handle;
res.write(content);
res.end();
}));
server.setTimeout(serverTimeout);
server.on('timeout', () => {
assert.strictEqual(didReceiveData, false, 'Should not timeout');
});

server.listen(0, common.mustCall(() => {
https.get({
path: '/',
port: server.address().port,
rejectUnauthorized: false
}, common.mustCall((res) => {
const resume = () => res.resume();
let receivedBufferLength = 0;
let firstReceivedAt;
res.on('data', common.mustCallAtLeast((buf) => {
if (receivedBufferLength === 0) {
currentWriteSize = Math.max(
minReadSize,
writeSize - serverConnectionHandle.writeQueueSize
);
didReceiveData = false;
firstReceivedAt = Date.now();
}
receivedBufferLength += buf.length;
if (receivedBufferLength >= currentWriteSize) {
didReceiveData = true;
writeSize = serverConnectionHandle.writeQueueSize;
receivedBufferLength = 0;
res.pause();
setTimeout(
resume,
serverTimeout + offsetTimeout - (Date.now() - firstReceivedAt)
);
offsetTimeout = 0;
}
}, 1));
res.on('end', common.mustCall(() => {
server.close();
}));
}));
}));

0 comments on commit 3b5ac77

Please sign in to comment.