Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
263 changes: 149 additions & 114 deletions Sources/SentryCrash/Recording/SentryCrashBinaryImageCache.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include "SentryCrashBinaryImageCache.h"
#include "SentryCrashDynamicLinker.h"
#include <dispatch/dispatch.h>
#include <mach-o/dyld.h>
#include <mach-o/dyld_images.h>
#include <pthread.h>
Expand Down Expand Up @@ -60,94 +61,121 @@ sentry_resetFuncForAddRemoveImage(void)
# define _will_add_image()
#endif // defined(SENTRY_TEST) || defined(SENTRY_TEST_CI) || defined(DEBUG)

typedef struct SentryCrashBinaryImageNode {
#include <stdatomic.h>

#define MAX_DYLD_IMAGES 4096

typedef struct {
_Atomic(uint32_t) ready; // 0 = not published, 1 = published
SentryCrashBinaryImage image;
bool available;
struct SentryCrashBinaryImageNode *next;
} SentryCrashBinaryImageNode;
} PublishedBinaryImage;

static SentryCrashBinaryImageNode rootNode = { 0 };
static SentryCrashBinaryImageNode *tailNode = NULL;
static pthread_mutex_t binaryImagesMutex = PTHREAD_MUTEX_INITIALIZER;
static PublishedBinaryImage g_images[MAX_DYLD_IMAGES];
static _Atomic(uint32_t) g_next_index = 0;
static _Atomic(uint32_t) g_overflowed = 0;
static _Atomic(uint32_t) g_started = 0;

static sentrycrashbic_cacheChangeCallback imageAddedCallback = NULL;
static sentrycrashbic_cacheChangeCallback imageRemovedCallback = NULL;
static _Atomic(sentrycrashbic_cacheChangeCallback) g_addedCallback = NULL;
static _Atomic(sentrycrashbic_cacheChangeCallback) g_removedCallback = NULL;

static void
binaryImageAdded(const struct mach_header *header, intptr_t slide)
add_dyld_image(const struct mach_header *mh)
{
pthread_mutex_lock(&binaryImagesMutex);
if (tailNode == NULL) {
pthread_mutex_unlock(&binaryImagesMutex);
// Don't add images if the cache is not started
if (!atomic_load_explicit(&g_started, memory_order_acquire)) {
return;
}
pthread_mutex_unlock(&binaryImagesMutex);

// Check dladdr first, before reserving a slot in the array.
// If we increment g_next_index before this check and dladdr fails,
// we'd create a "hole" with ready=0 that stops iteration.
Dl_info info;
if (!dladdr(header, &info) || info.dli_fname == NULL) {
if (!dladdr(mh, &info) || info.dli_fname == NULL) {
return;
}

SentryCrashBinaryImage binaryImage = { 0 };
if (!sentrycrashdl_getBinaryImageForHeader(
(const void *)header, info.dli_fname, &binaryImage, false)) {
// Test hook: called just before adding the image
_will_add_image();

uint32_t idx = atomic_fetch_add_explicit(&g_next_index, 1, memory_order_relaxed);

if (idx >= MAX_DYLD_IMAGES) {
atomic_store_explicit(&g_overflowed, 1, memory_order_relaxed);
return;
}

SentryCrashBinaryImageNode *newNode = malloc(sizeof(SentryCrashBinaryImageNode));
newNode->available = true;
newNode->image = binaryImage;
newNode->next = NULL;
_will_add_image();
pthread_mutex_lock(&binaryImagesMutex);
// Recheck tailNode as it could be null when
// stopped from another thread.
if (tailNode != NULL) {
tailNode->next = newNode;
tailNode = tailNode->next;
} else {
free(newNode);
newNode = NULL;
}
pthread_mutex_unlock(&binaryImagesMutex);
if (newNode && imageAddedCallback) {
imageAddedCallback(&newNode->image);
PublishedBinaryImage *entry = &g_images[idx];
sentrycrashdl_getBinaryImageForHeader(mh, info.dli_fname, &entry->image, false);

// Read callback BEFORE publishing to avoid race with registerAddedCallback.
// If callback is NULL here, the registering thread will see ready=1 and call it.
// If callback is non-NULL here, we call it and the registering thread will either
// not have started iterating yet, or will skip this image since it wasn't ready
// when it read g_next_index.
sentrycrashbic_cacheChangeCallback callback
= atomic_load_explicit(&g_addedCallback, memory_order_acquire);

// ---- Publish ----
atomic_store_explicit(&entry->ready, 1, memory_order_release);

if (callback != NULL) {
callback(&entry->image);
}
}

static void
binaryImageRemoved(const struct mach_header *header, intptr_t slide)
dyld_add_image_cb(const struct mach_header *mh, intptr_t slide)
{
SentryCrashBinaryImageNode *nextNode = &rootNode;
add_dyld_image(mh);
}

while (nextNode != NULL) {
if (nextNode->image.address == (uint64_t)header) {
nextNode->available = false;
if (imageRemovedCallback) {
imageRemovedCallback(&nextNode->image);
static void
dyld_remove_image_cb(const struct mach_header *mh, intptr_t slide)
{
sentrycrashbic_cacheChangeCallback callback
= atomic_load_explicit(&g_removedCallback, memory_order_acquire);

// Find the image in our cache by matching the header address
uint32_t count = atomic_load_explicit(&g_next_index, memory_order_acquire);
if (count > MAX_DYLD_IMAGES)
count = MAX_DYLD_IMAGES;

for (uint32_t i = 0; i < count; i++) {
PublishedBinaryImage *src = &g_images[i];
if (!atomic_load_explicit(&src->ready, memory_order_acquire)) {
continue;
}
if (src->image.address == (uintptr_t)mh) {
atomic_store_explicit(&src->ready, 0, memory_order_release);
if (callback) {
callback(&src->image);
return;
}
break;
}
nextNode = nextNode->next;
}
}

static void
dyld_tracker_start(void)
{
sentry_dyld_register_func_for_add_image(dyld_add_image_cb);
sentry_dyld_register_func_for_remove_image(dyld_remove_image_cb);
}

void
sentrycrashbic_iterateOverImages(sentrycrashbic_imageIteratorCallback callback, void *context)
{
/**
We can't use locks here because this is meant to be used during crashes,
where we can't use async unsafe functions. In order to avoid potential problems,
we choose an approach that doesn't remove nodes from the list.
*/
SentryCrashBinaryImageNode *nextNode = &rootNode;

// If tailNode is null it means the cache was stopped, therefore we end the iteration.
// This will minimize any race condition effect without the need for locks.
while (nextNode != NULL && tailNode != NULL) {
if (nextNode->available) {
callback(&nextNode->image, context);
uint32_t count = atomic_load_explicit(&g_next_index, memory_order_acquire);

if (count > MAX_DYLD_IMAGES)
count = MAX_DYLD_IMAGES;

for (uint32_t i = 0; i < count; i++) {
PublishedBinaryImage *src = &g_images[i];

if (atomic_load_explicit(&src->ready, memory_order_acquire)) {
callback(&src->image, context);
}
nextNode = nextNode->next;
}
}

Expand All @@ -159,7 +187,7 @@ sentrycrashbic_iterateOverImages(sentrycrashbic_imageIteratorCallback callback,
* @return true if dyld is not found in the loaded images and should be added to the cache,
* false if dyld is already present in the loaded images.
*/
bool
static bool
sentrycrashbic_shouldAddDyld(void)
{
// dyld is different from libdyld.dylib; the latter contains the public API
Expand All @@ -170,93 +198,100 @@ sentrycrashbic_shouldAddDyld(void)

// Since Apple no longer includes dyld in the images listed `_dyld_image_count` and related
// functions We manually include it to our cache.
SentryCrashBinaryImageNode *
sentrycrashbic_getDyldNode(void)
// Note: This bypasses add_dyld_image() because dladdr() returns NULL for dyld, so we need
// to use sentrycrashdl_getBinaryImageForHeader() directly with a hardcoded filename.
static void
sentrycrashbic_addDyldNode(void)
{
const struct mach_header *header = sentryDyldHeader;

SentryCrashBinaryImage binaryImage = { 0 };
if (!sentrycrashdl_getBinaryImageForHeader((const void *)header, "dyld", &binaryImage, false)) {
return NULL;
uint32_t idx = atomic_fetch_add_explicit(&g_next_index, 1, memory_order_relaxed);
if (idx >= MAX_DYLD_IMAGES) {
atomic_store_explicit(&g_overflowed, 1, memory_order_relaxed);
return;
}

SentryCrashBinaryImageNode *newNode = malloc(sizeof(SentryCrashBinaryImageNode));
newNode->available = true;
newNode->image = binaryImage;
newNode->next = NULL;
PublishedBinaryImage *entry = &g_images[idx];
if (!sentrycrashdl_getBinaryImageForHeader(
(const void *)header, "dyld", &entry->image, false)) {
// Decrement because we couldn't add the image
atomic_fetch_sub_explicit(&g_next_index, 1, memory_order_relaxed);
return;
}

return newNode;
atomic_store_explicit(&entry->ready, 1, memory_order_release);
}

void
sentrycrashbic_startCache(void)
static void
sentrycrashbic_startCacheImpl(void)
{
pthread_mutex_lock(&binaryImagesMutex);
if (tailNode != NULL) {
// Already initialized
pthread_mutex_unlock(&binaryImagesMutex);
// Check if already started
uint32_t expected = 0;
if (!atomic_compare_exchange_strong_explicit(
&g_started, &expected, 1, memory_order_acq_rel, memory_order_relaxed)) {
return;
}

if (sentrycrashbic_shouldAddDyld()) {
sentrycrashdl_initialize();
SentryCrashBinaryImageNode *dyldNode = sentrycrashbic_getDyldNode();
tailNode = dyldNode;
rootNode.next = dyldNode;
} else {
tailNode = &rootNode;
rootNode.next = NULL;
sentrycrashbic_addDyldNode();
}
pthread_mutex_unlock(&binaryImagesMutex);

// During a call to _dyld_register_func_for_add_image() the callback func is called for every
// existing image
sentry_dyld_register_func_for_add_image(&binaryImageAdded);
sentry_dyld_register_func_for_remove_image(&binaryImageRemoved);
dyld_tracker_start();
}

void
sentrycrashbic_stopCache(void)
sentrycrashbic_startCache(void)
{
pthread_mutex_lock(&binaryImagesMutex);
if (tailNode == NULL) {
pthread_mutex_unlock(&binaryImagesMutex);
return;
}

SentryCrashBinaryImageNode *node = rootNode.next;
rootNode.next = NULL;
tailNode = NULL;

while (node != NULL) {
SentryCrashBinaryImageNode *nextNode = node->next;
free(node);
node = nextNode;
}

pthread_mutex_unlock(&binaryImagesMutex);
// During a call to _dyld_register_func_for_add_image() the callback func is called for every
// existing image
// This must be done on a background thread to not block app launch due to the extensive use of
// locks in the image added callback. The main culprit is the calls to `dladdr`. The downside of
// doing this async is if there is a crash very shortly after app launch we might not have
// recorded all the load addresses of images yet. We think this is an acceptible tradeoff to not
// block app launch, since it's always possible to crash early in app launch before Sentry can
// capture the crash.
#if defined(SENTRY_TEST) || defined(SENTRY_TEST_CI)
sentrycrashbic_startCacheImpl();
#else
dispatch_async(
dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0), ^{ sentrycrashbic_startCacheImpl(); });
#endif
}

static void
initialReportToCallback(SentryCrashBinaryImage *image, void *context)
void
sentrycrashbic_stopCache(void)
{
sentrycrashbic_cacheChangeCallback callback = (sentrycrashbic_cacheChangeCallback)context;
callback(image);
// The binary image cache does not allocate memory, the cache takes a fixed
// size, it's not malloced. So this is kind of pointless, but there are a bunch
// of tests using it. We should clean it up and make startCache use dispatchOnce
atomic_store_explicit(&g_started, 0, memory_order_release);
atomic_store_explicit(&g_next_index, 0, memory_order_release);
}

void
sentrycrashbic_registerAddedCallback(sentrycrashbic_cacheChangeCallback callback)
{
imageAddedCallback = callback;
if (callback) {
pthread_mutex_lock(&binaryImagesMutex);
sentrycrashbic_iterateOverImages(&initialReportToCallback, callback);
pthread_mutex_unlock(&binaryImagesMutex);
atomic_store_explicit(&g_addedCallback, callback, memory_order_release);

if (callback != NULL) {
// Call for all existing images already in the cache
uint32_t count = atomic_load_explicit(&g_next_index, memory_order_acquire);
if (count > MAX_DYLD_IMAGES)
count = MAX_DYLD_IMAGES;

for (uint32_t i = 0; i < count; i++) {
PublishedBinaryImage *src = &g_images[i];
if (!atomic_load_explicit(&src->ready, memory_order_acquire)) {
break;
}
callback(&src->image);
}
}
}

void
sentrycrashbic_registerRemovedCallback(sentrycrashbic_cacheChangeCallback callback)
{
imageRemovedCallback = callback;
atomic_store_explicit(&g_removedCallback, callback, memory_order_release);
}
Loading
Loading