Skip to content

Commit

Permalink
libsanitizer merge from upstream r173241
Browse files Browse the repository at this point in the history
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@195404 138bc75d-0d04-0410-961f-82ee72b054a4
  • Loading branch information
kcc committed Jan 23, 2013
1 parent bc5663d commit 4a2c1ff
Show file tree
Hide file tree
Showing 58 changed files with 1,564 additions and 1,060 deletions.
4 changes: 4 additions & 0 deletions gcc/ChangeLog
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
2013-01-23 Kostya Serebryany <kcc@google.com>

* config/darwin.h: remove dependency on CoreFoundation (asan on Mac OS).

2013-01-23 Jakub Jelinek <jakub@redhat.com>

PR target/49069
Expand Down
2 changes: 1 addition & 1 deletion gcc/config/darwin.h
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ extern GTY(()) int darwin_ms_struct;
%{L*} %(link_libgcc) %o %{fprofile-arcs|fprofile-generate*|coverage:-lgcov} \
%{fopenmp|ftree-parallelize-loops=*: \
%{static|static-libgcc|static-libstdc++|static-libgfortran: libgomp.a%s; : -lgomp } } \
%{fsanitize=address: -framework CoreFoundation -lasan } \
%{fsanitize=address: -lasan } \
%{fgnu-tm: \
%{static|static-libgcc|static-libstdc++|static-libgfortran: libitm.a%s; : -litm } } \
%{!nostdlib:%{!nodefaultlibs:\
Expand Down
6 changes: 6 additions & 0 deletions libsanitizer/ChangeLog
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
2013-01-23 Kostya Serebryany <kcc@google.com>

PR sanitizer/55989
* All source files: Merge from upstream r173241.
* merge.sh: Support merging .inc files.

2013-01-16 Jakub Jelinek <jakub@redhat.com>

* sanitizer_common/Makefile.am (AM_CXXFLAGS): Remove
Expand Down
2 changes: 1 addition & 1 deletion libsanitizer/MERGE
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
171973
173241

The first line of this file holds the svn revision number of the
last merge done from the master library sources.
18 changes: 9 additions & 9 deletions libsanitizer/asan/asan_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,14 @@
#if ASAN_ALLOCATOR_VERSION == 1
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_lock.h"
#include "asan_mapping.h"
#include "asan_stats.h"
#include "asan_report.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_mutex.h"

namespace __asan {

Expand Down Expand Up @@ -227,7 +227,7 @@ class MallocInfo {
AsanChunk *m = 0;
AsanChunk **fl = &free_lists_[size_class];
{
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
for (uptr i = 0; i < n_chunks; i++) {
if (!(*fl)) {
*fl = GetNewChunks(size_class);
Expand All @@ -245,7 +245,7 @@ class MallocInfo {
void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
bool eat_free_lists) {
CHECK(flags()->quarantine_size > 0);
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
AsanChunkFifoList *q = &x->quarantine_;
if (q->size() > 0) {
quarantine_.PushList(q);
Expand All @@ -269,18 +269,18 @@ class MallocInfo {
}

void BypassThreadLocalQuarantine(AsanChunk *chunk) {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
quarantine_.Push(chunk);
}

AsanChunk *FindChunkByAddr(uptr addr) {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
return FindChunkByAddrUnlocked(addr);
}

uptr AllocationSize(uptr ptr) {
if (!ptr) return 0;
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);

// Make sure this is our chunk and |ptr| actually points to the beginning
// of the allocated memory.
Expand All @@ -303,7 +303,7 @@ class MallocInfo {
}

void PrintStatus() {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
uptr malloced = 0;

Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
Expand All @@ -321,7 +321,7 @@ class MallocInfo {
}

PageGroup *FindPageGroup(uptr addr) {
ScopedLock lock(&mu_);
BlockingMutexLock lock(&mu_);
return FindPageGroupUnlocked(addr);
}

Expand Down Expand Up @@ -479,7 +479,7 @@ class MallocInfo {

AsanChunk *free_lists_[kNumberOfSizeClasses];
AsanChunkFifoList quarantine_;
AsanLock mu_;
BlockingMutex mu_;

PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
atomic_uint32_t n_page_groups_;
Expand Down
20 changes: 15 additions & 5 deletions libsanitizer/asan/asan_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,14 @@
// We are in the process of transitioning from the old allocator (version 1)
// to a new one (version 2). The change is quite intrusive so both allocators
// will co-exist in the source base for a while. The actual allocator is chosen
// at build time by redefining this macrozz.
#define ASAN_ALLOCATOR_VERSION 1
// at build time by redefining this macro.
#ifndef ASAN_ALLOCATOR_VERSION
# if ASAN_LINUX && !ASAN_ANDROID
# define ASAN_ALLOCATOR_VERSION 2
# else
# define ASAN_ALLOCATOR_VERSION 1
# endif
#endif // ASAN_ALLOCATOR_VERSION

namespace __asan {

Expand Down Expand Up @@ -96,17 +102,21 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {

struct AsanThreadLocalMallocStorage {
explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
: quarantine_(x) { }
#if ASAN_ALLOCATOR_VERSION == 1
: quarantine_(x)
#endif
{ }
AsanThreadLocalMallocStorage() {
CHECK(REAL(memset));
REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
}

AsanChunkFifoList quarantine_;
#if ASAN_ALLOCATOR_VERSION == 1
AsanChunkFifoList quarantine_;
AsanChunk *free_lists_[kNumberOfSizeClasses];
#else
uptr allocator2_cache[1024]; // Opaque.
uptr quarantine_cache[16];
uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
#endif
void CommitBack();
};
Expand Down
130 changes: 62 additions & 68 deletions libsanitizer/asan/asan_allocator2.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"

namespace __asan {

Expand Down Expand Up @@ -90,15 +91,6 @@ static const uptr kMaxThreadLocalQuarantine =

static const uptr kReturnOnZeroMalloc = 2048; // Zero page is protected.

static int inited = 0;

static void Init() {
if (inited) return;
__asan_init();
inited = true; // this must happen before any threads are created.
allocator.Init();
}

// Every chunk of memory allocated by this allocator can be in one of 3 states:
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
Expand Down Expand Up @@ -244,31 +236,26 @@ void AsanChunkView::GetFreeStack(StackTrace *stack) {
chunk_->FreeStackSize());
}

class Quarantine: public AsanChunkFifoList {
public:
void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) {
AsanChunkFifoList *q = &ms->quarantine_;
if (!q->size()) return;
SpinMutexLock l(&mutex_);
PushList(q);
PopAndDeallocateLoop(ms);
}
struct QuarantineCallback;
typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
typedef AsanQuarantine::Cache QuarantineCache;
static AsanQuarantine quarantine(LINKER_INITIALIZED);
static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;

void BypassThreadLocalQuarantine(AsanChunk *m) {
SpinMutexLock l(&mutex_);
Push(m);
}
QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
}

private:
void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) {
while (size() > (uptr)flags()->quarantine_size) {
PopAndDeallocate(ms);
}
struct QuarantineCallback {
explicit QuarantineCallback(AllocatorCache *cache)
: cache_(cache) {
}
void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) {
CHECK_GT(size(), 0);
AsanChunk *m = Pop();
CHECK(m);

void Recycle(AsanChunk *m) {
CHECK(m->chunk_state == CHUNK_QUARANTINE);
m->chunk_state = CHUNK_AVAILABLE;
CHECK_NE(m->alloc_tid, kInvalidTid);
Expand All @@ -288,34 +275,27 @@ class Quarantine: public AsanChunkFifoList {
thread_stats.real_frees++;
thread_stats.really_freed += m->UsedSize();

allocator.Deallocate(GetAllocatorCache(ms), p);
allocator.Deallocate(cache_, p);
}
SpinMutex mutex_;
};

static Quarantine quarantine;
void *Allocate(uptr size) {
return allocator.Allocate(cache_, size, 1, false);
}

void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
CHECK(q->size() > 0);
size_ += q->size();
append_back(q);
q->clear();
}
void Deallocate(void *p) {
allocator.Deallocate(cache_, p);
}

void AsanChunkFifoList::Push(AsanChunk *n) {
push_back(n);
size_ += n->UsedSize();
}
AllocatorCache *cache_;
};

// Interesting performance observation: this function takes up to 15% of overal
// allocator time. That's because *first_ has been evicted from cache long time
// ago. Not sure if we can or want to do anything with this.
AsanChunk *AsanChunkFifoList::Pop() {
CHECK(first_);
AsanChunk *res = front();
size_ -= res->UsedSize();
pop_front();
return res;
static void Init() {
static int inited = 0;
if (inited) return;
__asan_init();
inited = true; // this must happen before any threads are created.
allocator.Init();
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}

static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
Expand Down Expand Up @@ -355,9 +335,18 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
}

AsanThread *t = asanThreadRegistry().GetCurrent();
AllocatorCache *cache = t ? GetAllocatorCache(&t->malloc_storage()) : 0;
void *allocated = allocator.Allocate(cache, needed_size, 8, false);
void *allocated;
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
allocated = allocator.Allocate(cache, needed_size, 8, false);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, needed_size, 8, false);
}
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
// Clear the first allocated word (an old kMemalignMagic may still be there).
reinterpret_cast<uptr *>(alloc_beg)[0] = 0;
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
uptr user_beg = beg_plus_redzone;
Expand Down Expand Up @@ -432,7 +421,7 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {

// Flip the chunk_state atomically to avoid race on double-free.
u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
memory_order_acq_rel);
memory_order_relaxed);

if (old_chunk_state == CHUNK_QUARANTINE)
ReportDoubleFree((uptr)ptr, stack);
Expand Down Expand Up @@ -466,13 +455,15 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {

// Push into quarantine.
if (t) {
AsanChunkFifoList &q = t->malloc_storage().quarantine_;
q.Push(m);

if (q.size() > kMaxThreadLocalQuarantine)
quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage());
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
AllocatorCache *ac = GetAllocatorCache(ms);
quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
m, m->UsedSize());
} else {
quarantine.BypassThreadLocalQuarantine(m);
SpinMutexLock l(&fallback_mutex);
AllocatorCache *ac = &fallback_allocator_cache;
quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
m, m->UsedSize());
}

ASAN_FREE_HOOK(ptr);
Expand Down Expand Up @@ -584,7 +575,8 @@ AsanChunkView FindHeapChunkByAddress(uptr addr) {
}

void AsanThreadLocalMallocStorage::CommitBack() {
quarantine.SwallowThreadLocalQuarantine(this);
AllocatorCache *ac = GetAllocatorCache(this);
quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
allocator.SwallowCache(GetAllocatorCache(this));
}

Expand Down Expand Up @@ -681,16 +673,18 @@ uptr __asan_get_estimated_allocated_size(uptr size) {
}

bool __asan_get_ownership(const void *p) {
return AllocationSize(reinterpret_cast<uptr>(p)) > 0;
uptr ptr = reinterpret_cast<uptr>(p);
return (ptr == kReturnOnZeroMalloc) || (AllocationSize(ptr) > 0);
}

uptr __asan_get_allocated_size(const void *p) {
if (p == 0) return 0;
uptr allocated_size = AllocationSize(reinterpret_cast<uptr>(p));
uptr ptr = reinterpret_cast<uptr>(p);
uptr allocated_size = AllocationSize(ptr);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
if (allocated_size == 0 && ptr != kReturnOnZeroMalloc) {
GET_STACK_TRACE_FATAL_HERE;
ReportAsanGetAllocatedSizeNotOwned(reinterpret_cast<uptr>(p), &stack);
ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
}
return allocated_size;
}
Expand Down
Loading

0 comments on commit 4a2c1ff

Please sign in to comment.