diff --git a/test/sequential/test-http-pipeline-flood.js b/test/sequential/test-http-pipeline-flood.js index cb9fc97a865b70..f8de38c7a84a3a 100644 --- a/test/sequential/test-http-pipeline-flood.js +++ b/test/sequential/test-http-pipeline-flood.js @@ -2,6 +2,16 @@ var common = require('../common'); var assert = require('assert'); +// Here we are testing the HTTP server module's flood prevention mechanism. +// When writeable.write returns false (ie the underlying send() indicated the +// native buffer is full), the HTTP server cork()s the readable part of the +// stream. This means that new requests will not be read (however request which +// have already been read, but are awaiting processing will still be +// processed). + +// Normally when the writable stream emits a 'drain' event, the server then +// uncorks the readable stream, although we arent testing that part here. + switch (process.argv[2]) { case undefined: return parent(); @@ -18,22 +28,31 @@ function parent() { var childClosed = false; var requests = 0; var connections = 0; + var backloggedReqs = 0; var server = http.createServer(function(req, res) { requests++; res.setHeader('content-length', bigResponse.length); - res.end(bigResponse); + if (!res.write(bigResponse)) { + if (backloggedReqs == 0) { + // Once the native buffer fills (ie write() returns false), the flood + // prevention should kick in. + // This means the stream should emit no more 'data' events. However we + // may still be asked to process more requests if they were read before + // mechanism activated. + req.socket.on('data', function() { assert(false); }); + } + backloggedReqs++; + } + res.end(); }); server.on('connection', function(conn) { connections++; }); - // kill the connection after a bit, verifying that the - // flood of requests was eventually halted. server.setTimeout(200, function(conn) { gotTimeout = true; - conn.destroy(); }); server.listen(common.PORT, function() { @@ -53,9 +72,9 @@ function parent() { assert.equal(connections, 1); // The number of requests we end up processing before the outgoing // connection backs up and requires a drain is implementation-dependent. - // We can safely assume is more than 250. console.log('server got %d requests', requests); - assert(requests >= 250); + console.log('server sent %d backlogged requests', backloggedReqs); + console.log('ok'); }); } @@ -63,7 +82,6 @@ function parent() { function child() { var net = require('net'); - var gotEpipe = false; var conn = net.connect({ port: common.PORT }); var req = 'GET / HTTP/1.1\r\nHost: localhost:' + @@ -72,17 +90,14 @@ function child() { req = new Array(10241).join(req); conn.on('connect', function() { + //kill child after 1s of flooding + setTimeout(function() { conn.destroy(); }, 1000); write(); }); conn.on('drain', write); - conn.on('error', function(er) { - gotEpipe = true; - }); - process.on('exit', function() { - assert(gotEpipe); console.log('ok - child'); });