mirror of
https://github.com/nodejs/node.git
synced 2024-11-21 10:59:27 +00:00
1abff07392
This should give a performance boost accross the board. Given that the old limit is a decod old and memory capacity has doubled many times since I think it is appropriate to slightly bump the default limit. PR-URL: https://github.com/nodejs/node/pull/52037 Refs: https://github.com/nodejs/node/pull/46608 Refs: https://github.com/nodejs/node/pull/50120 Reviewed-By: Rafael Gonzaga <rafael.nunu@hotmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Yagiz Nizipli <yagiz.nizipli@sentry.io> Reviewed-By: Chengzhong Wu <legendecas@gmail.com> Reviewed-By: Moshe Atlow <moshe@atlow.co.il> Reviewed-By: Mohammed Keyvanzadeh <mohammadkeyvanzade94@gmail.com> Reviewed-By: Trivikram Kamat <trivikr.dev@gmail.com> Reviewed-By: Ruben Bridgewater <ruben@bridgewater.de>
134 lines
3.1 KiB
JavaScript
134 lines
3.1 KiB
JavaScript
// In this benchmark, we connect a client to the server, and write
|
|
// as many bytes as we can in the specified time (default = 10s)
|
|
'use strict';
|
|
|
|
const common = require('../common.js');
|
|
const util = require('util');
|
|
|
|
// If there are --dur=N and --len=N args, then
|
|
// run the function with those settings.
|
|
// if not, then queue up a bunch of child processes.
|
|
const bench = common.createBenchmark(main, {
|
|
len: [102400, 1024 * 64 * 16],
|
|
type: ['utf', 'asc', 'buf'],
|
|
dur: [5],
|
|
}, {
|
|
test: { len: 1024 },
|
|
flags: [ '--expose-internals', '--no-warnings' ],
|
|
});
|
|
|
|
function main({ dur, len, type }) {
|
|
const {
|
|
TCP,
|
|
TCPConnectWrap,
|
|
constants: TCPConstants,
|
|
} = common.binding('tcp_wrap');
|
|
const { WriteWrap } = common.binding('stream_wrap');
|
|
const PORT = common.PORT;
|
|
|
|
const serverHandle = new TCP(TCPConstants.SERVER);
|
|
let err = serverHandle.bind('127.0.0.1', PORT);
|
|
if (err)
|
|
fail(err, 'bind');
|
|
|
|
err = serverHandle.listen(511);
|
|
if (err)
|
|
fail(err, 'listen');
|
|
|
|
serverHandle.onconnection = function(err, clientHandle) {
|
|
if (err)
|
|
fail(err, 'connect');
|
|
|
|
// The meat of the benchmark is right here:
|
|
bench.start();
|
|
let bytes = 0;
|
|
|
|
setTimeout(() => {
|
|
// report in Gb/sec
|
|
bench.end((bytes * 8) / (1024 * 1024 * 1024));
|
|
process.exit(0);
|
|
}, dur * 1000);
|
|
|
|
clientHandle.onread = function(buffer) {
|
|
// We're not expecting to ever get an EOF from the client.
|
|
// Just lots of data forever.
|
|
if (!buffer)
|
|
fail('read');
|
|
|
|
// Don't slice the buffer. The point of this is to isolate, not
|
|
// simulate real traffic.
|
|
bytes += buffer.byteLength;
|
|
};
|
|
|
|
clientHandle.readStart();
|
|
};
|
|
|
|
client(type, len);
|
|
|
|
function fail(err, syscall) {
|
|
throw util._errnoException(err, syscall);
|
|
}
|
|
|
|
function client(type, len) {
|
|
let chunk;
|
|
switch (type) {
|
|
case 'buf':
|
|
chunk = Buffer.alloc(len, 'x');
|
|
break;
|
|
case 'utf':
|
|
chunk = 'ü'.repeat(len / 2);
|
|
break;
|
|
case 'asc':
|
|
chunk = 'x'.repeat(len);
|
|
break;
|
|
default:
|
|
throw new Error(`invalid type: ${type}`);
|
|
}
|
|
|
|
const clientHandle = new TCP(TCPConstants.SOCKET);
|
|
const connectReq = new TCPConnectWrap();
|
|
const err = clientHandle.connect(connectReq, '127.0.0.1', PORT);
|
|
|
|
if (err)
|
|
fail(err, 'connect');
|
|
|
|
clientHandle.readStart();
|
|
|
|
connectReq.oncomplete = function(err) {
|
|
if (err)
|
|
fail(err, 'connect');
|
|
|
|
while (clientHandle.writeQueueSize === 0)
|
|
write();
|
|
};
|
|
|
|
function write() {
|
|
const writeReq = new WriteWrap();
|
|
writeReq.oncomplete = afterWrite;
|
|
let err;
|
|
switch (type) {
|
|
case 'buf':
|
|
err = clientHandle.writeBuffer(writeReq, chunk);
|
|
break;
|
|
case 'utf':
|
|
err = clientHandle.writeUtf8String(writeReq, chunk);
|
|
break;
|
|
case 'asc':
|
|
err = clientHandle.writeAsciiString(writeReq, chunk);
|
|
break;
|
|
}
|
|
|
|
if (err)
|
|
fail(err, 'write');
|
|
}
|
|
|
|
function afterWrite(err, handle) {
|
|
if (err)
|
|
fail(err, 'write');
|
|
|
|
while (clientHandle.writeQueueSize === 0)
|
|
write();
|
|
}
|
|
}
|
|
}
|