Skip to content

Commit

Permalink
Add support for overlapping containers
Browse files Browse the repository at this point in the history
Adds support for overlapping containers.
Adds test case for that.
  • Loading branch information
AdvenamTacet committed Aug 28, 2024
1 parent 1c89e0f commit 93ab376
Show file tree
Hide file tree
Showing 2 changed files with 173 additions and 16 deletions.
109 changes: 98 additions & 11 deletions compiler-rt/lib/asan/asan_poisoning.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -639,6 +639,42 @@ static void SlowCopyContainerAnnotations(uptr old_storage_beg,
}
}

// This function is basically the same as SlowCopyContainerAnnotations,
// but goes through elements in reversed order
static void SlowRCopyContainerAnnotations(uptr old_storage_beg,
uptr old_storage_end,
uptr new_storage_beg,
uptr new_storage_end) {
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
uptr new_internal_beg = RoundDownTo(new_storage_beg, granularity);
uptr new_internal_end = RoundDownTo(new_storage_end, granularity);
uptr old_ptr = old_storage_end;
uptr new_ptr = new_storage_end;

while (new_ptr > new_storage_beg) {
uptr granule_begin = RoundDownTo(new_ptr - 1, granularity);
uptr unpoisoned_bytes = 0;

for (; new_ptr != granule_begin && new_ptr != new_storage_beg;
--new_ptr, --old_ptr) {
if (unpoisoned_bytes == 0 && !AddressIsPoisoned(old_ptr - 1)) {
unpoisoned_bytes = new_ptr - granule_begin;
}
}

if (new_ptr >= new_internal_end && !AddressIsPoisoned(new_storage_end)) {
continue;
}

if (granule_begin == new_ptr || unpoisoned_bytes != 0) {
AnnotateContainerGranuleAccessibleBytes(granule_begin, unpoisoned_bytes);
} else if (!AddressIsPoisoned(new_storage_beg)) {
AnnotateContainerGranuleAccessibleBytes(granule_begin,
new_storage_beg - granule_begin);
}
}
}

// This function copies ASan memory annotations (poisoned/unpoisoned states)
// from one buffer to another.
// It's main purpose is to help with relocating trivially relocatable objects,
Expand Down Expand Up @@ -678,9 +714,61 @@ void __sanitizer_copy_contiguous_container_annotations(
&stack);
}

if (old_storage_beg == old_storage_end)
if (old_storage_beg == old_storage_end || old_storage_beg == new_storage_beg)
return;
// The only edge cases involve edge granules when the container starts or
// ends within a granule. We already know that the container's start and end
// points lie in different granules.
uptr old_external_end = RoundUpTo(old_storage_end, granularity);
if (old_storage_beg < new_storage_beg &&
new_storage_beg <= old_external_end) {
// In this case, we have to copy elements in reversed order, because
// destination buffer starts in the middle of the source buffer (or shares
// first granule with it).
// It still may be possible to optimize, but reversed order has to be kept.
if (old_storage_beg % granularity != new_storage_beg % granularity ||
WithinOneGranule(new_storage_beg, new_storage_end)) {
SlowRCopyContainerAnnotations(old_storage_beg, old_storage_end,
new_storage_beg, new_storage_end);
return;
}

uptr new_internal_end = RoundDownTo(new_storage_end, granularity);
if (new_internal_end != new_storage_end &&
AddressIsPoisoned(new_storage_end)) {
// Last granule
uptr old_internal_end = RoundDownTo(old_storage_end, granularity);
if (AddressIsPoisoned(old_storage_end)) {
CopyGranuleAnnotation(new_internal_end, old_internal_end);
} else {
AnnotateContainerGranuleAccessibleBytes(
new_internal_end, old_storage_end - old_internal_end);
}
}

uptr new_internal_beg = RoundUpTo(new_storage_beg, granularity);
if (new_internal_end > new_internal_beg) {
uptr old_internal_beg = RoundUpTo(old_storage_beg, granularity);
__builtin_memmove((u8 *)MemToShadow(new_internal_beg),
(u8 *)MemToShadow(old_internal_beg),
(new_internal_end - new_internal_beg) / granularity);
}

if (new_internal_beg != new_storage_beg) {
// First granule
uptr new_external_beg = RoundDownTo(new_storage_beg, granularity);
uptr old_external_beg = RoundDownTo(old_storage_beg, granularity);
if (!AddressIsPoisoned(old_storage_beg)) {
CopyGranuleAnnotation(new_external_beg, old_external_beg);
} else if (!AddressIsPoisoned(new_storage_beg)) {
AnnotateContainerGranuleAccessibleBytes(
new_external_beg, new_storage_beg - new_external_beg);
}
}
return;
}

// Simple copy of annotations of all internal granules.
if (old_storage_beg % granularity != new_storage_beg % granularity ||
WithinOneGranule(new_storage_beg, new_storage_end)) {
SlowCopyContainerAnnotations(old_storage_beg, old_storage_end,
Expand All @@ -689,16 +777,6 @@ void __sanitizer_copy_contiguous_container_annotations(
}

uptr new_internal_beg = RoundUpTo(new_storage_beg, granularity);
uptr new_internal_end = RoundDownTo(new_storage_end, granularity);
if (new_internal_end > new_internal_beg) {
uptr old_internal_beg = RoundUpTo(old_storage_beg, granularity);
__builtin_memcpy((u8 *)MemToShadow(new_internal_beg),
(u8 *)MemToShadow(old_internal_beg),
(new_internal_end - new_internal_beg) / granularity);
}
// The only remaining cases involve edge granules when the container starts or
// ends within a granule. We already know that the container's start and end
// points lie in different granules.
if (new_internal_beg != new_storage_beg) {
// First granule
uptr new_external_beg = RoundDownTo(new_storage_beg, granularity);
Expand All @@ -710,6 +788,15 @@ void __sanitizer_copy_contiguous_container_annotations(
new_external_beg, new_storage_beg - new_external_beg);
}
}

uptr new_internal_end = RoundDownTo(new_storage_end, granularity);
if (new_internal_end > new_internal_beg) {
uptr old_internal_beg = RoundUpTo(old_storage_beg, granularity);
__builtin_memmove((u8 *)MemToShadow(new_internal_beg),
(u8 *)MemToShadow(old_internal_beg),
(new_internal_end - new_internal_beg) / granularity);
}

if (new_internal_end != new_storage_end &&
AddressIsPoisoned(new_storage_end)) {
// Last granule
Expand Down
80 changes: 75 additions & 5 deletions compiler-rt/test/asan/TestCases/move_container_annotations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ void TestNonOverlappingContainers(size_t capacity, size_t off_old,
assert(!__asan_address_is_poisoned(cur));
}
}
//In every granule, poisoned memory should be after last expected unpoisoned.

char *next;
for (cur = new_beg; cur + kGranularity <= new_end; cur = next) {
next = RoundUp(cur + 1);
Expand All @@ -124,15 +124,14 @@ void TestNonOverlappingContainers(size_t capacity, size_t off_old,
}
}
// [cur; new_end) is not checked yet.
// If new_buffer were not poisoned, it cannot be poisoned and we can ignore
// a separate check.
// If new_buffer were not poisoned, it cannot be poisoned.
// If new_buffer were poisoned, it should be same as earlier.
if (cur < new_end && poison_new) {
if (cur < new_end) {
size_t unpoisoned = count_unpoisoned(poison_states, new_end - cur);
if (unpoisoned > 0) {
assert(!__asan_address_is_poisoned(cur + unpoisoned - 1));
}
if (cur + unpoisoned < new_end) {
if (cur + unpoisoned < new_end && poison_new) {
assert(__asan_address_is_poisoned(cur + unpoisoned));
}
}
Expand All @@ -148,13 +147,84 @@ void TestNonOverlappingContainers(size_t capacity, size_t off_old,
delete[] new_buffer;
}

void TestOverlappingContainers(size_t capacity, size_t off_old, size_t off_new,
int poison_buffers) {
size_t buffer_size = capacity + off_old + off_new + kGranularity * 3;
char *buffer = new char[buffer_size];
char *buffer_end = buffer + buffer_size;
bool poison_whole = poison_buffers % 2 == 1;
bool poison_new = poison_buffers / 2 == 1;
char *old_beg = buffer + kGranularity + off_old;
char *new_beg = buffer + kGranularity + off_new;
char *old_end = old_beg + capacity;
char *new_end = new_beg + capacity;

for (int i = 0; i < 35; i++) {
if (poison_whole)
__asan_poison_memory_region(buffer, buffer_size);
if (poison_new)
__asan_poison_memory_region(new_beg, new_end - new_beg);

RandomPoison(old_beg, old_end);
std::deque<int> poison_states = GetPoisonedState(old_beg, old_end);
__sanitizer_copy_contiguous_container_annotations(old_beg, old_end, new_beg,
new_end);
// This variable is used only when buffer ends in the middle of a granule.
bool can_modify_last_granule = __asan_address_is_poisoned(new_end);

// If whole buffer were poisoned, expected state of memory before first container
// is undetermined.
// If old buffer were not poisoned, that memory should still be unpoisoned.
char *cur;
if (!poison_whole) {
for (cur = buffer; cur < old_beg && cur < new_beg; ++cur) {
assert(!__asan_address_is_poisoned(cur));
}
}

// Memory after end of both containers should be the same as at the beginning.
for (cur = (old_end > new_end) ? old_end : new_end; cur < buffer_end;
++cur) {
assert(__asan_address_is_poisoned(cur) == poison_whole);
}

char *next;
for (cur = new_beg; cur + kGranularity <= new_end; cur = next) {
next = RoundUp(cur + 1);
size_t unpoisoned = count_unpoisoned(poison_states, next - cur);
if (unpoisoned > 0) {
assert(!__asan_address_is_poisoned(cur + unpoisoned - 1));
}
if (cur + unpoisoned < next) {
assert(__asan_address_is_poisoned(cur + unpoisoned));
}
}
// [cur; new_end) is not checked yet, if container ends in the middle of a granule.
// It can be poisoned, only if non-container bytes in that granule were poisoned.
// Otherwise, it should be unpoisoned.
if (cur < new_end) {
size_t unpoisoned = count_unpoisoned(poison_states, new_end - cur);
if (unpoisoned > 0) {
assert(!__asan_address_is_poisoned(cur + unpoisoned - 1));
}
if (cur + unpoisoned < new_end && can_modify_last_granule) {
assert(__asan_address_is_poisoned(cur + unpoisoned));
}
}
}

__asan_unpoison_memory_region(buffer, buffer_size);
delete[] buffer;
}

int main(int argc, char **argv) {
int n = argc == 1 ? 64 : atoi(argv[1]);
for (size_t j = 0; j < kGranularity + 2; j++) {
for (size_t k = 0; k < kGranularity + 2; k++) {
for (int i = 0; i <= n; i++) {
for (int poison = 0; poison < 4; ++poison) {
TestNonOverlappingContainers(i, j, k, poison);
TestOverlappingContainers(i, j, k, poison);
}
}
}
Expand Down

0 comments on commit 93ab376

Please sign in to comment.