aports/testing/electron/0002-http-add-maximum-chunk-extension-size.patch
2024-03-16 13:15:43 +00:00

274 lines
8.5 KiB
Diff

From 01d3bb793a5ef3bf0a36dde868626869e09fb558 Mon Sep 17 00:00:00 2001
From: Paolo Insogna <paolo@cowtech.it>
Date: Wed, 3 Jan 2024 07:23:15 +0100
Subject: [PATCH] http: add maximum chunk extension size
PR-URL: https://github.com/nodejs-private/node-private/pull/519
Fixes: https://hackerone.com/reports/2233486
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Reviewed-By: Marco Ippolito <marcoippolito54@gmail.com>
Reviewed-By: Rafael Gonzaga <rafael.nunu@hotmail.com>
CVE-ID: CVE-2024-22019
---
doc/api/errors.md | 12 ++
lib/_http_server.js | 8 ++
src/node_http_parser.cc | 23 ++-
.../test-http-chunk-extensions-limit.js | 131 ++++++++++++++++++
4 files changed, 171 insertions(+), 3 deletions(-)
create mode 100644 test/parallel/test-http-chunk-extensions-limit.js
diff --git a/third_party/electron_node/doc/api/errors.md b/third_party/electron_node/doc/api/errors.md
index 95ad3c9c671..9429baff516 100644
--- a/third_party/electron_node/doc/api/errors.md
+++ b/third_party/electron_node/doc/api/errors.md
@@ -3140,6 +3140,18 @@ malconfigured clients, if more than 8 KiB of HTTP header data is received then
HTTP parsing will abort without a request or response object being created, and
an `Error` with this code will be emitted.
+<a id="HPE_CHUNK_EXTENSIONS_OVERFLOW"></a>
+
+### `HPE_CHUNK_EXTENSIONS_OVERFLOW`
+
+<!-- YAML
+added: REPLACEME
+-->
+
+Too much data was received for a chunk extensions. In order to protect against
+malicious or malconfigured clients, if more than 16 KiB of data is received
+then an `Error` with this code will be emitted.
+
<a id="HPE_UNEXPECTED_CONTENT_LENGTH"></a>
### `HPE_UNEXPECTED_CONTENT_LENGTH`
diff --git a/third_party/electron_node/lib/_http_server.js b/third_party/electron_node/lib/_http_server.js
index c62ea175995..c512653e60e 100644
--- a/third_party/electron_node/lib/_http_server.js
+++ b/third_party/electron_node/lib/_http_server.js
@@ -857,6 +857,11 @@ const requestHeaderFieldsTooLargeResponse = Buffer.from(
'Connection: close\r\n\r\n', 'ascii',
);
+const requestChunkExtensionsTooLargeResponse = Buffer.from(
+ `HTTP/1.1 413 ${STATUS_CODES[413]}\r\n` +
+ 'Connection: close\r\n\r\n', 'ascii',
+);
+
function warnUnclosedSocket() {
if (warnUnclosedSocket.emitted) {
return;
@@ -892,6 +897,9 @@ function socketOnError(e) {
case 'HPE_HEADER_OVERFLOW':
response = requestHeaderFieldsTooLargeResponse;
break;
+ case 'HPE_CHUNK_EXTENSIONS_OVERFLOW':
+ response = requestChunkExtensionsTooLargeResponse;
+ break;
case 'ERR_HTTP_REQUEST_TIMEOUT':
response = requestTimeoutResponse;
break;
diff --git a/third_party/electron_node/src/node_http_parser.cc b/third_party/electron_node/src/node_http_parser.cc
index a12d89c3cd6..c190eace435 100644
--- a/third_party/electron_node/src/node_http_parser.cc
+++ b/third_party/electron_node/src/node_http_parser.cc
@@ -79,6 +79,8 @@ const uint32_t kOnExecute = 5;
const uint32_t kOnTimeout = 6;
// Any more fields than this will be flushed into JS
const size_t kMaxHeaderFieldsCount = 32;
+// Maximum size of chunk extensions
+const size_t kMaxChunkExtensionsSize = 16384;
const uint32_t kLenientNone = 0;
const uint32_t kLenientHeaders = 1 << 0;
@@ -261,6 +263,7 @@ class Parser : public AsyncWrap, public StreamListener {
num_fields_ = num_values_ = 0;
headers_completed_ = false;
+ chunk_extensions_nread_ = 0;
last_message_start_ = uv_hrtime();
url_.Reset();
status_message_.Reset();
@@ -516,9 +519,22 @@ class Parser : public AsyncWrap, public StreamListener {
return 0;
}
- // Reset nread for the next chunk
+ int on_chunk_extension(const char* at, size_t length) {
+ chunk_extensions_nread_ += length;
+
+ if (chunk_extensions_nread_ > kMaxChunkExtensionsSize) {
+ llhttp_set_error_reason(&parser_,
+ "HPE_CHUNK_EXTENSIONS_OVERFLOW:Chunk extensions overflow");
+ return HPE_USER;
+ }
+
+ return 0;
+ }
+
+ // Reset nread for the next chunk and also reset the extensions counter
int on_chunk_header() {
header_nread_ = 0;
+ chunk_extensions_nread_ = 0;
return 0;
}
@@ -986,6 +1002,7 @@ class Parser : public AsyncWrap, public StreamListener {
bool headers_completed_ = false;
bool pending_pause_ = false;
uint64_t header_nread_ = 0;
+ uint64_t chunk_extensions_nread_ = 0;
uint64_t max_http_header_size_;
uint64_t last_message_start_;
ConnectionsList* connectionsList_;
@@ -1164,9 +1181,9 @@ const llhttp_settings_t Parser::settings = {
Proxy<DataCall, &Parser::on_header_value>::Raw,
// on_chunk_extension_name
- nullptr,
+ Proxy<DataCall, &Parser::on_chunk_extension>::Raw,
// on_chunk_extension_value
- nullptr,
+ Proxy<DataCall, &Parser::on_chunk_extension>::Raw,
Proxy<Call, &Parser::on_headers_complete>::Raw,
Proxy<DataCall, &Parser::on_body>::Raw,
diff --git a/third_party/electron_node/test/parallel/test-http-chunk-extensions-limit.js b/third_party/electron_node/test/parallel/test-http-chunk-extensions-limit.js
new file mode 100644
index 00000000000..6868b3da6cb
--- /dev/null
+++ b/third_party/electron_node/test/parallel/test-http-chunk-extensions-limit.js
@@ -0,0 +1,131 @@
+'use strict';
+
+const common = require('../common');
+const http = require('http');
+const net = require('net');
+const assert = require('assert');
+
+// Verify that chunk extensions are limited in size when sent all together.
+{
+ const server = http.createServer((req, res) => {
+ req.on('end', () => {
+ res.writeHead(200, { 'Content-Type': 'text/plain' });
+ res.end('bye');
+ });
+
+ req.resume();
+ });
+
+ server.listen(0, () => {
+ const sock = net.connect(server.address().port);
+ let data = '';
+
+ sock.on('data', (chunk) => data += chunk.toString('utf-8'));
+
+ sock.on('end', common.mustCall(function() {
+ assert.strictEqual(data, 'HTTP/1.1 413 Payload Too Large\r\nConnection: close\r\n\r\n');
+ server.close();
+ }));
+
+ sock.end('' +
+ 'GET / HTTP/1.1\r\n' +
+ 'Host: localhost:8080\r\n' +
+ 'Transfer-Encoding: chunked\r\n\r\n' +
+ '2;' + 'A'.repeat(20000) + '=bar\r\nAA\r\n' +
+ '0\r\n\r\n'
+ );
+ });
+}
+
+// Verify that chunk extensions are limited in size when sent in intervals.
+{
+ const server = http.createServer((req, res) => {
+ req.on('end', () => {
+ res.writeHead(200, { 'Content-Type': 'text/plain' });
+ res.end('bye');
+ });
+
+ req.resume();
+ });
+
+ server.listen(0, () => {
+ const sock = net.connect(server.address().port);
+ let remaining = 20000;
+ let data = '';
+
+ const interval = setInterval(
+ () => {
+ if (remaining > 0) {
+ sock.write('A'.repeat(1000));
+ } else {
+ sock.write('=bar\r\nAA\r\n0\r\n\r\n');
+ clearInterval(interval);
+ }
+
+ remaining -= 1000;
+ },
+ common.platformTimeout(20),
+ ).unref();
+
+ sock.on('data', (chunk) => data += chunk.toString('utf-8'));
+
+ sock.on('end', common.mustCall(function() {
+ assert.strictEqual(data, 'HTTP/1.1 413 Payload Too Large\r\nConnection: close\r\n\r\n');
+ server.close();
+ }));
+
+ sock.write('' +
+ 'GET / HTTP/1.1\r\n' +
+ 'Host: localhost:8080\r\n' +
+ 'Transfer-Encoding: chunked\r\n\r\n' +
+ '2;'
+ );
+ });
+}
+
+// Verify the chunk extensions is correctly reset after a chunk
+{
+ const server = http.createServer((req, res) => {
+ req.on('end', () => {
+ res.writeHead(200, { 'content-type': 'text/plain', 'connection': 'close', 'date': 'now' });
+ res.end('bye');
+ });
+
+ req.resume();
+ });
+
+ server.listen(0, () => {
+ const sock = net.connect(server.address().port);
+ let data = '';
+
+ sock.on('data', (chunk) => data += chunk.toString('utf-8'));
+
+ sock.on('end', common.mustCall(function() {
+ assert.strictEqual(
+ data,
+ 'HTTP/1.1 200 OK\r\n' +
+ 'content-type: text/plain\r\n' +
+ 'connection: close\r\n' +
+ 'date: now\r\n' +
+ 'Transfer-Encoding: chunked\r\n' +
+ '\r\n' +
+ '3\r\n' +
+ 'bye\r\n' +
+ '0\r\n' +
+ '\r\n',
+ );
+
+ server.close();
+ }));
+
+ sock.end('' +
+ 'GET / HTTP/1.1\r\n' +
+ 'Host: localhost:8080\r\n' +
+ 'Transfer-Encoding: chunked\r\n\r\n' +
+ '2;' + 'A'.repeat(10000) + '=bar\r\nAA\r\n' +
+ '2;' + 'A'.repeat(10000) + '=bar\r\nAA\r\n' +
+ '2;' + 'A'.repeat(10000) + '=bar\r\nAA\r\n' +
+ '0\r\n\r\n'
+ );
+ });
+}
--
2.44.0