Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve error reporting on oversized messages #572

Merged
merged 5 commits into from
Nov 20, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 16 additions & 1 deletion src/enclave/rpcendpoint.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ namespace enclave
return std::make_pair(actor, method);
}

bool handle_data(const std::vector<uint8_t>& data)
bool handle_data(const std::vector<uint8_t>& data) override
{
LOG_DEBUG_FMT(
"Entered handle_data, session {} with {} bytes",
Expand Down Expand Up @@ -129,5 +129,20 @@ namespace enclave

return true;
}

std::vector<uint8_t> oversized_message_error(
size_t msg_size, size_t max_msg_size) override
{
return jsonrpc::pack(
jsonrpc::error_response(
0,
jsonrpc::StandardErrorCodes::PARSE_ERROR,
fmt::format(
"Requested message ({} bytes) is too large. Maximum allowed is {} "
"bytes. Closing connection.",
msg_size,
max_msg_size)),
jsonrpc::Pack::Text);
}
};
}
19 changes: 18 additions & 1 deletion src/enclave/tlsendpoint.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ namespace enclave
std::unique_ptr<ringbuffer::AbstractWriter> to_host;
size_t session_id;

private:
enum Status
{
handshake,
Expand All @@ -27,6 +26,24 @@ namespace enclave
error
};

Status get_status() const
{
return status;
}

virtual std::vector<uint8_t> oversized_message_error(
size_t msg_size, size_t max_msg_size)
{
const auto s = fmt::format(
"Requested message ({} bytes) is too large. Maximum allowed is {} "
"bytes. Closing connection.",
msg_size,
max_msg_size);
const auto data = (const uint8_t*)s.data();
return std::vector<uint8_t>(data, data + s.size());
}

private:
std::vector<uint8_t> pending_write;
std::vector<uint8_t> pending_read;
// Decrypted data, read through mbedtls
Expand Down
20 changes: 19 additions & 1 deletion src/enclave/tlsframedendpoint.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ namespace enclave
uint32_t msg_size;
size_t count;

static constexpr size_t max_msg_size = 2 * 1024 * 1024;

public:
FramedTLSEndpoint(
size_t session_id,
Expand All @@ -24,6 +26,16 @@ namespace enclave

void recv(const uint8_t* data, size_t size)
{
const auto status = get_status();
if (status >= closed)
{
LOG_INFO_FMT(
"Received additional data for {}, ignoring due to status {}",
session_id,
status);
return;
}

recv_buffered(data, size);

while (true)
Expand All @@ -43,8 +55,14 @@ namespace enclave

// Arbitrary limit on RPC size to stop a client from requesting
// a very large allocation.
if (msg_size > 1 << 21)
if (msg_size > max_msg_size)
{
LOG_FAIL_FMT(
"Received oversized message request ({} bytes) - closing session "
"{}",
msg_size,
session_id);
send(oversized_message_error(msg_size, max_msg_size));
close();
return;
}
Expand Down
14 changes: 8 additions & 6 deletions src/host/tcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ namespace asynchost
friend class close_ptr<TCPImpl>;

static constexpr int backlog = 128;
static constexpr size_t read_size = 1024;
static constexpr size_t max_read_size = 16384;
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This reduces the output spam for large messages (reduces the number of reads by a factor of 16 so we), and should also improve performance for them. But this may be at the cost of performance for smaller messages, and was a deadend in tracking down the actual failure so should perhaps be removed.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this be a comment in the code as well?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It seems like this doesn't actually have a noticeable impact on performance either way, so I think we can just ignore this for now (and hopefully remember it as a potentially bad choice if it comes back to bite us in future?).

I think it should actually be possible to just keep a single buffer in this object, rather than reallocating on every request, but I'll also leave that investigation on the backburner atm.


enum Status
{
Expand Down Expand Up @@ -456,15 +456,17 @@ namespace asynchost
return true;
}

static void on_alloc(uv_handle_t* handle, size_t, uv_buf_t* buf)
static void on_alloc(
uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf)
{
static_cast<TCPImpl*>(handle->data)->on_alloc(buf);
static_cast<TCPImpl*>(handle->data)->on_alloc(suggested_size, buf);
}

void on_alloc(uv_buf_t* buf)
void on_alloc(size_t suggested_size, uv_buf_t* buf)
{
buf->base = new char[read_size];
buf->len = read_size;
auto alloc_size = std::min(suggested_size, max_read_size);
buf->base = new char[alloc_size];
buf->len = alloc_size;
}

void on_free(const uv_buf_t* buf)
Expand Down
11 changes: 11 additions & 0 deletions tests/infra/jsonrpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,15 @@ def from_json(self, data):
self._from_parsed(parsed)


def human_readable_size(n):
suffixes = ("B", "KB", "MB", "GB")
i = 0
while n >= 1024 and i < len(suffixes) - 1:
n /= 1024.0
i += 1
return f"{n:,.2f} {suffixes[i]}"


class FramedTLSClient:
def __init__(self, host, port, server_hostname, cert=None, key=None, cafile=None):
self.host = host
Expand Down Expand Up @@ -159,11 +168,13 @@ def connect(self):
self.conn.connect((self.host, self.port))

def send(self, msg):
LOG.trace(f"Sending {human_readable_size(len(msg))} message")
frame = struct.pack("<I", len(msg)) + msg
self.conn.sendall(frame)

def _read(self):
(size,) = struct.unpack("<I", self.conn.recv(4))
LOG.trace(f"Reading {human_readable_size(size)} response")
data = self.conn.recv(size)
while len(data) < size:
data += self.conn.recv(size - len(data))
Expand Down