|
| 1 | +// Licensed to the .NET Foundation under one or more agreements. |
| 2 | +// The .NET Foundation licenses this file to you under the MIT license. |
| 3 | + |
| 4 | +#include <stdbool.h> |
| 5 | +#include <stdint.h> |
| 6 | +#include <external/zlib/zutil.h> |
| 7 | + |
| 8 | +/* A custom allocator for zlib that provides some defense-in-depth over standard malloc / free. |
| 9 | + * (non-Windows version) |
| 10 | + * |
| 11 | + * 1. When zlib allocates fixed-length data structures for containing stream metadata, we zero |
| 12 | + * the memory before using it, preventing use of uninitialized memory within these structures. |
| 13 | + * Ideally we would do this for dynamically-sized buffers as well, but there is a measurable |
| 14 | + * perf impact to doing this. Zeroing fixed structures seems like a good trade-off here, since |
| 15 | + * these data structures contain most of the metadata used for managing the variable-length |
| 16 | + * dynamically allocated buffers. |
| 17 | + * |
| 18 | + * 2. We put a cookie both before and after any allocated memory, which allows us to detect local |
| 19 | + * buffer overruns on the call to free(). The cookie values are tied to the addresses where |
| 20 | + * the data is located in memory. |
| 21 | + * |
| 22 | + * 3. We trash the aforementioned cookie on free(), which allows us to detect double-free. |
| 23 | + * |
| 24 | + * If any of these checks fails, the application raises SIGABRT. |
| 25 | + */ |
| 26 | + |
| 27 | +static bool IsMitigationDisabled() |
| 28 | +{ |
| 29 | + enum _MitigationEnablementTristate |
| 30 | + { |
| 31 | + MITIGATION_NOT_YET_QUERIED = 0, |
| 32 | + MITIGATION_DISABLED = 1, |
| 33 | + MITIGATION_ENABLED = 2 // really, anything other than 0 or 1 |
| 34 | + }; |
| 35 | + static int s_fMitigationEnablementState = MITIGATION_NOT_YET_QUERIED; |
| 36 | + |
| 37 | + // If already initialized, return immediately. |
| 38 | + // We don't need a volatile read here since the publish is performed with release semantics. |
| 39 | + if (s_fMitigationEnablementState != MITIGATION_NOT_YET_QUERIED) |
| 40 | + { |
| 41 | + return (s_fMitigationEnablementState == MITIGATION_DISABLED); |
| 42 | + } |
| 43 | + |
| 44 | + // Initialize the tri-state now. |
| 45 | + // It's ok for multiple threads to do this simultaneously. Only one thread will win. |
| 46 | + // Valid env var values to disable mitigation: "true" and "1" |
| 47 | + // All other env var values (or error) leaves mitigation enabled. |
| 48 | + |
| 49 | + char* pchEnvVar = getenv("DOTNET_SYSTEM_IO_COMPRESSION_DISABLEZLIBMITIGATIONS"); |
| 50 | + bool fMitigationDisabled = (pchEnvVar && (strcmp(pchEnvVar, "1") == 0 || strcmp(pchEnvVar, "true") == 0)); |
| 51 | + |
| 52 | + // We really don't care about the return value of the ICE operation. If another thread |
| 53 | + // beat us to it, so be it. The recursive call will figure it out. |
| 54 | + __sync_val_compare_and_swap( |
| 55 | + /* destination: */ &s_fMitigationEnablementState, |
| 56 | + /* comparand: */ MITIGATION_NOT_YET_QUERIED, |
| 57 | + /* exchange: */ fMitigationDisabled ? MITIGATION_DISABLED : MITIGATION_ENABLED); |
| 58 | + return IsMitigationDisabled(); |
| 59 | +} |
| 60 | + |
| 61 | +#ifndef MEMORY_ALLOCATION_ALIGNMENT |
| 62 | +// malloc() returns an address suitably aligned for any built-in data type. |
| 63 | +// Historically, this has been twice the arch's natural word size. |
| 64 | +#ifdef HOST_64BIT |
| 65 | +#define MEMORY_ALLOCATION_ALIGNMENT 16 |
| 66 | +#else |
| 67 | +#define MEMORY_ALLOCATION_ALIGNMENT 8 |
| 68 | +#endif |
| 69 | +#endif |
| 70 | + |
| 71 | +typedef struct _DOTNET_ALLOC_COOKIE |
| 72 | +{ |
| 73 | + void* Address; |
| 74 | + size_t Size; |
| 75 | +} DOTNET_ALLOC_COOKIE; |
| 76 | + |
| 77 | +static bool SafeAdd(size_t a, size_t b, size_t* sum) |
| 78 | +{ |
| 79 | + if (SIZE_MAX - a >= b) { *sum = a + b; return true; } |
| 80 | + else { *sum = 0; return false; } |
| 81 | +} |
| 82 | + |
| 83 | +static bool SafeMult(size_t a, size_t b, size_t* product) |
| 84 | +{ |
| 85 | + if (SIZE_MAX / a >= b) { *product = a * b; return true; } |
| 86 | + else { *product = 0; return false; } |
| 87 | +} |
| 88 | + |
| 89 | +static DOTNET_ALLOC_COOKIE ReadAllocCookieUnaligned(const void* pSrc) |
| 90 | +{ |
| 91 | + DOTNET_ALLOC_COOKIE vCookie; |
| 92 | + memcpy(&vCookie, pSrc, sizeof(DOTNET_ALLOC_COOKIE)); |
| 93 | + return vCookie; |
| 94 | +} |
| 95 | + |
| 96 | +static void WriteAllocCookieUnaligned(void* pDest, DOTNET_ALLOC_COOKIE vCookie) |
| 97 | +{ |
| 98 | + memcpy(pDest, &vCookie, sizeof(DOTNET_ALLOC_COOKIE)); |
| 99 | +} |
| 100 | + |
| 101 | +// Historically, the memory allocator always returns addresses aligned to some |
| 102 | +// particular boundary. We'll make that same guarantee here just in case somebody |
| 103 | +// depends on it. |
| 104 | +const size_t DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING = (sizeof(DOTNET_ALLOC_COOKIE) + MEMORY_ALLOCATION_ALIGNMENT - 1) & ~((size_t)MEMORY_ALLOCATION_ALIGNMENT - 1); |
| 105 | +const size_t DOTNET_ALLOC_TRAILER_COOKIE_SIZE = sizeof(DOTNET_ALLOC_COOKIE); |
| 106 | + |
| 107 | +voidpf ZLIB_INTERNAL zcalloc(opaque, items, size) |
| 108 | + voidpf opaque; |
| 109 | + unsigned items; |
| 110 | + unsigned size; |
| 111 | +{ |
| 112 | + (void)opaque; // unreferenced formal parameter |
| 113 | + |
| 114 | + if (IsMitigationDisabled()) |
| 115 | + { |
| 116 | + // fallback logic copied from zutil.c |
| 117 | + return sizeof(uInt) > 2 ? (voidpf)malloc(items * size) : |
| 118 | + (voidpf)calloc(items, size); |
| 119 | + } |
| 120 | + |
| 121 | + // If initializing a fixed-size structure, zero the memory. |
| 122 | + bool fZeroMemory = (items == 1); |
| 123 | + |
| 124 | + size_t cbRequested; |
| 125 | + if (sizeof(items) + sizeof(size) <= sizeof(cbRequested)) |
| 126 | + { |
| 127 | + // multiplication can't overflow; no need for safeint |
| 128 | + cbRequested = (size_t)items * (size_t)size; |
| 129 | + } |
| 130 | + else |
| 131 | + { |
| 132 | + // multiplication can overflow; go through safeint |
| 133 | + if (!SafeMult((size_t)items, (size_t)size, &cbRequested)) { return NULL; } |
| 134 | + } |
| 135 | + |
| 136 | + // Make sure the actual allocation has enough room for our frontside & backside cookies. |
| 137 | + size_t cbActualAllocationSize; |
| 138 | + if (!SafeAdd(cbRequested, DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING + DOTNET_ALLOC_TRAILER_COOKIE_SIZE, &cbActualAllocationSize)) { return NULL; } |
| 139 | + |
| 140 | + void* pAlloced = (fZeroMemory) ? calloc(1, cbActualAllocationSize) : malloc(cbActualAllocationSize); |
| 141 | + if (pAlloced == NULL) { return NULL; } // OOM |
| 142 | + |
| 143 | + DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)pAlloced; |
| 144 | + uint8_t* pReturnToCaller = (uint8_t*)pAlloced + DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING; |
| 145 | + uint8_t* pTrailerCookie = pReturnToCaller + cbRequested; |
| 146 | + |
| 147 | + // Write out the same cookie for the header & the trailer, then we're done. |
| 148 | + |
| 149 | + DOTNET_ALLOC_COOKIE vCookie = { 0 }; |
| 150 | + vCookie.Address = pReturnToCaller; |
| 151 | + vCookie.Size = cbRequested; |
| 152 | + *pHeaderCookie = vCookie; // aligned |
| 153 | + WriteAllocCookieUnaligned(pTrailerCookie, vCookie); |
| 154 | + |
| 155 | + return pReturnToCaller; |
| 156 | +} |
| 157 | + |
| 158 | +static void zcfree_trash_cookie(void* pCookie) |
| 159 | +{ |
| 160 | + memset(pCookie, 0, sizeof(DOTNET_ALLOC_COOKIE)); |
| 161 | +} |
| 162 | + |
| 163 | +void ZLIB_INTERNAL zcfree(opaque, ptr) |
| 164 | + voidpf opaque; |
| 165 | + voidpf ptr; |
| 166 | +{ |
| 167 | + (void)opaque; // unreferenced formal parameter |
| 168 | + |
| 169 | + if (IsMitigationDisabled()) |
| 170 | + { |
| 171 | + // fallback logic copied from zutil.c |
| 172 | + free(ptr); |
| 173 | + return; |
| 174 | + } |
| 175 | + |
| 176 | + if (ptr == NULL) { return; } // ok to free nullptr |
| 177 | + |
| 178 | + // Check cookie at beginning |
| 179 | + |
| 180 | + DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)((uint8_t*)ptr - DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING); |
| 181 | + if (pHeaderCookie->Address != ptr) { goto Fail; } |
| 182 | + size_t cbRequested = pHeaderCookie->Size; |
| 183 | + |
| 184 | + // Check cookie at end |
| 185 | + |
| 186 | + uint8_t* pTrailerCookie = (uint8_t*)ptr + cbRequested; |
| 187 | + DOTNET_ALLOC_COOKIE vTrailerCookie = ReadAllocCookieUnaligned(pTrailerCookie); |
| 188 | + if (vTrailerCookie.Address != ptr) { goto Fail; } |
| 189 | + if (vTrailerCookie.Size != cbRequested) { goto Fail; } |
| 190 | + |
| 191 | + // Checks passed - now trash the cookies and free memory |
| 192 | + |
| 193 | + zcfree_trash_cookie(pHeaderCookie); |
| 194 | + zcfree_trash_cookie(pTrailerCookie); |
| 195 | + |
| 196 | + free(pHeaderCookie); |
| 197 | + return; |
| 198 | + |
| 199 | +Fail: |
| 200 | + abort(); // cookie check failed |
| 201 | +} |
0 commit comments