Skip to content

Commit

Permalink
[wasm] Fix out-of-bound behavior for bulk ops
Browse files Browse the repository at this point in the history
The bulk memory operations should not bounds check ahead of time, but
instead should write as many bytes as possible until the first
out-of-bounds access.

Bug: v8:8890
Change-Id: Ia8179fe268fc65816c34a8f3461ed0a0d35600aa
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1497520
Commit-Queue: Ben Smith <binji@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60040}
  • Loading branch information
binji authored and Commit Bot committed Mar 5, 2019
1 parent d077f9b commit 5f4f57e
Show file tree
Hide file tree
Showing 8 changed files with 259 additions and 59 deletions.
97 changes: 61 additions & 36 deletions src/compiler/wasm-compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3405,37 +3405,32 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
return index;
}

// Check that the range [start, start + size) is in the range [0, max).
void WasmGraphBuilder::BoundsCheckRange(Node* start, Node* size, Node* max,
wasm::WasmCodePosition position) {
// The accessed memory is [start, end), where {end} is {start + size}. We
// want to check that {start + size <= max}, making sure that {start + size}
// doesn't overflow. This can be expressed as {start <= max - size} as long
// as {max - size} isn't negative, which is true if {size <= max}.
Node* WasmGraphBuilder::BoundsCheckRange(Node* start, Node** size, Node* max,
wasm::WasmCodePosition position) {
auto m = mcgraph()->machine();
Node* cond = graph()->NewNode(m->Uint32LessThanOrEqual(), size, max);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);

// This produces a positive number, since {size <= max}.
Node* effective_size = graph()->NewNode(m->Int32Sub(), max, size);

// Introduce the actual bounds check.
Node* check =
graph()->NewNode(m->Uint32LessThanOrEqual(), start, effective_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, check, position);

// TODO(binji): Does this need addtional untrusted_code_mitigations_ mask
// like BoundsCheckMem above?
// The region we are trying to access is [start, start+size). If
// {start} > {max}, none of this region is valid, so we trap. Otherwise,
// there may be a subset of the region that is valid. {max - start} is the
// maximum valid size, so if {max - start < size}, then the region is
// partially out-of-bounds.
TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Uint32LessThan(), max, start), position);
Node* sub = graph()->NewNode(m->Int32Sub(), max, start);
Node* fail = graph()->NewNode(m->Uint32LessThan(), sub, *size);
Diamond d(graph(), mcgraph()->common(), fail, BranchHint::kFalse);
d.Chain(Control());
*size = d.Phi(MachineRepresentation::kWord32, sub, *size);
return fail;
}

Node* WasmGraphBuilder::BoundsCheckMemRange(Node* start, Node* size,
Node* WasmGraphBuilder::BoundsCheckMemRange(Node** start, Node** size,
wasm::WasmCodePosition position) {
// TODO(binji): Support trap handler.
if (!FLAG_wasm_no_bounds_checks) {
BoundsCheckRange(start, size, instance_cache_->mem_size, position);
}
return graph()->NewNode(mcgraph()->machine()->IntAdd(), MemBuffer(0),
Uint32ToUintptr(start));
// TODO(binji): Support trap handler and no bounds check mode.
Node* fail =
BoundsCheckRange(*start, size, instance_cache_->mem_size, position);
*start = graph()->NewNode(mcgraph()->machine()->IntAdd(), MemBuffer(0),
Uint32ToUintptr(*start));
return fail;
}

const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
Expand Down Expand Up @@ -4377,10 +4372,11 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position);
dst = BoundsCheckMemRange(dst, size, position);
MachineOperatorBuilder* m = mcgraph()->machine();
Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
auto m = mcgraph()->machine();

Node* seg_index = Uint32Constant(data_segment_index);
Node* src_fail;

{
// Load segment size from WasmInstanceObject::data_segment_sizes.
Expand All @@ -4394,7 +4390,7 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Effect(), Control()));

// Bounds check the src index against the segment size.
BoundsCheckRange(src, size, seg_size, position);
src_fail = BoundsCheckRange(src, &size, seg_size, position);
}

{
Expand All @@ -4418,7 +4414,10 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
MachineType sig_types[] = {MachineType::Pointer(), MachineType::Pointer(),
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
return BuildCCall(&sig, function, dst, src, size);
BuildCCall(&sig, function, dst, src, size);
return TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Word32Or(), dst_fail, src_fail),
position);
}

Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
Expand All @@ -4435,25 +4434,51 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,

Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
wasm::WasmCodePosition position) {
dst = BoundsCheckMemRange(dst, size, position);
src = BoundsCheckMemRange(src, size, position);
auto m = mcgraph()->machine();
// The data must be copied backward if the regions overlap and src < dst. The
// regions overlap if {src + size > dst && dst + size > src}. Since we already
// test that {src < dst}, we know that {dst + size > src}, so this simplifies
// to just {src + size > dst}. That sum can overflow, but if we subtract
// {size} from both sides of the inequality we get the equivalent test
// {size > dst - src}.
Node* copy_backward = graph()->NewNode(
m->Word32And(), graph()->NewNode(m->Uint32LessThan(), src, dst),
graph()->NewNode(m->Uint32LessThan(),
graph()->NewNode(m->Int32Sub(), dst, src), size));

Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);

// Trap without copying any bytes if we are copying backward and the copy is
// partially out-of-bounds. We only need to check that the dst region is
// out-of-bounds, because we know that {src < dst}, so the src region is
// always out of bounds if the dst region is.
TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Word32And(), dst_fail, copy_backward),
position);

Node* src_fail = BoundsCheckMemRange(&src, &size, position);

Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
ExternalReference::wasm_memory_copy()));
MachineType sig_types[] = {MachineType::Pointer(), MachineType::Pointer(),
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
return BuildCCall(&sig, function, dst, src, size);
BuildCCall(&sig, function, dst, src, size);
return TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Word32Or(), dst_fail, src_fail),
position);
}

Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
wasm::WasmCodePosition position) {
dst = BoundsCheckMemRange(dst, size, position);
Node* fail = BoundsCheckMemRange(&dst, &size, position);
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
ExternalReference::wasm_memory_fill()));
MachineType sig_types[] = {MachineType::Pointer(), MachineType::Uint32(),
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
return BuildCCall(&sig, function, dst, value, size);
BuildCCall(&sig, function, dst, value, size);
return TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position);
}

Node* WasmGraphBuilder::CheckElemSegmentIsPassiveAndNotDropped(
Expand Down
15 changes: 10 additions & 5 deletions src/compiler/wasm-compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -441,11 +441,16 @@ class WasmGraphBuilder {
Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck);
// Check that the range [start, start + size) is in the range [0, max).
void BoundsCheckRange(Node* start, Node* size, Node* max,
wasm::WasmCodePosition);
// BoundsCheckMemRange receives a uint32 {start} and {size} and returns
// a pointer into memory at that index, if it is in bounds.
Node* BoundsCheckMemRange(Node* start, Node* size, wasm::WasmCodePosition);
// Also updates *size with the valid range. Returns true if the range is
// partially out-of-bounds, traps if it is completely out-of-bounds.
Node* BoundsCheckRange(Node* start, Node** size, Node* max,
wasm::WasmCodePosition);
// BoundsCheckMemRange receives a uint32 {start} and {size}, and checks if it
// is in bounds. Also updates *size with the valid range, and converts *start
// to a pointer into memory at that index. Returns true if the range is
// partially out-of-bounds, traps if it is completely out-of-bounds.
Node* BoundsCheckMemRange(Node** start, Node** size, wasm::WasmCodePosition);

Node* CheckBoundsAndAlignment(uint8_t access_size, Node* index,
uint32_t offset, wasm::WasmCodePosition);

Expand Down
15 changes: 15 additions & 0 deletions src/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,21 @@ inline constexpr bool IsInBounds(size_t index, size_t length, size_t max) {
return length <= max && index <= (max - length);
}

// Checks if [index, index+length) is in range [0, max). If not, {length} is
// clamped to its valid range. Note that this check works even if
// {index+length} would wrap around.
template <typename T>
inline bool ClampToBounds(T index, T* length, T max) {
if (index > max) {
*length = 0;
return false;
}
T avail = max - index;
bool oob = *length > avail;
if (oob) *length = avail;
return !oob;
}

// X must be a power of 2. Returns the number of trailing zeros.
template <typename T,
typename = typename std::enable_if<std::is_integral<T>::value>::type>
Expand Down
9 changes: 5 additions & 4 deletions src/wasm/module-instantiate.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1481,11 +1481,12 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
// TODO(wasm): Move this functionality into wasm-objects, since it is used
// for both instantiation and in the implementation of the table.init
// instruction.
if (!IsInBounds(dst, count, table_instance.table_size)) return false;
if (!IsInBounds(src, count, elem_segment.entries.size())) return false;
bool ok = ClampToBounds<size_t>(dst, &count, table_instance.table_size);
// Use & instead of && so the clamp is not short-circuited.
ok &= ClampToBounds<size_t>(src, &count, elem_segment.entries.size());

const WasmModule* module = instance->module();
for (uint32_t i = 0; i < count; ++i) {
for (size_t i = 0; i < count; ++i) {
uint32_t func_index = elem_segment.entries[src + i];
int entry_index = static_cast<int>(dst + i);

Expand Down Expand Up @@ -1547,7 +1548,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
instance, func_index);
}
}
return true;
return ok;
}

void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
Expand Down
27 changes: 17 additions & 10 deletions src/wasm/wasm-objects.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1442,9 +1442,9 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {

namespace {
void CopyTableEntriesImpl(Handle<WasmInstanceObject> instance, uint32_t dst,
uint32_t src, uint32_t count) {
uint32_t src, uint32_t count, bool copy_backward) {
DCHECK(IsInBounds(dst, count, instance->indirect_function_table_size()));
if (src < dst) {
if (copy_backward) {
for (uint32_t i = count; i > 0; i--) {
auto to_entry = IndirectFunctionTableEntry(instance, dst + i - 1);
auto from_entry = IndirectFunctionTableEntry(instance, src + i - 1);
Expand All @@ -1471,14 +1471,21 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
CHECK_EQ(0, table_src_index);
CHECK_EQ(0, table_dst_index);
auto max = instance->indirect_function_table_size();
if (!IsInBounds(dst, count, max)) return false;
if (!IsInBounds(src, count, max)) return false;
if (dst == src) return true; // no-op
bool copy_backward = src < dst && dst - src < count;
bool ok = ClampToBounds(dst, &count, max);
// Use & instead of && so the clamp is not short-circuited.
ok &= ClampToBounds(src, &count, max);

// If performing a partial copy when copying backward, then the first access
// will be out-of-bounds, so no entries should be copied.
if (copy_backward && !ok) return ok;

if (dst == src || count == 0) return ok; // no-op

if (!instance->has_table_object()) {
// No table object, only need to update this instance.
CopyTableEntriesImpl(instance, dst, src, count);
return true;
CopyTableEntriesImpl(instance, dst, src, count, copy_backward);
return ok;
}

Handle<WasmTableObject> table =
Expand All @@ -1491,12 +1498,12 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
isolate);
CopyTableEntriesImpl(target_instance, dst, src, count);
CopyTableEntriesImpl(target_instance, dst, src, count, copy_backward);
}

// Copy the function entries.
Handle<FixedArray> functions(table->elements(), isolate);
if (src < dst) {
if (copy_backward) {
for (uint32_t i = count; i > 0; i--) {
functions->set(dst + i - 1, functions->get(src + i - 1));
}
Expand All @@ -1505,7 +1512,7 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
functions->set(dst + i, functions->get(src + i));
}
}
return true;
return ok;
}

// static
Expand Down
Loading

0 comments on commit 5f4f57e

Please sign in to comment.