Skip to content

Commit 395c507

Browse files
[release/7.0-staging] Zlib: Add some protections to the allocator used by zlib (#89532)
* Add guards to zlib memory allocator * Fix whitespace * Remove heap LFH flag, simplify publishing logic * Remove dead code from cmake file * Normalize cmake files * Slight zlib PAL & allocator restructuring - Move allocator files under Compression.Native dir - Update pal_zlib includes, use calloc instead of malloc - Remove custom typedefs from zlib unix allocator * Add env var for Windows * Add env var for Unix * Fix mono compilation - Mono defines HOST_WIN32, not CLR_CMAKE_HOST_WIN32 - Follow same pattern from src\native\eventpipe\CMakeLists.txt --------- Co-authored-by: Levi Broderick <levib@microsoft.com>
1 parent b075459 commit 395c507

File tree

6 files changed

+454
-5
lines changed

6 files changed

+454
-5
lines changed

src/native/external/zlib-intel.cmake

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,10 @@ set(ZLIB_SOURCES_BASE
2020
trees.c
2121
x86.c
2222
zutil.c
23+
../../libs/System.IO.Compression.Native/zlib_allocator_win.c
2324
)
2425

26+
# enable custom zlib allocator
27+
add_definitions(-DMY_ZCALLOC)
28+
2529
addprefix(ZLIB_SOURCES "${CMAKE_CURRENT_LIST_DIR}/zlib-intel" "${ZLIB_SOURCES_BASE}")

src/native/external/zlib.cmake

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,4 +29,12 @@ set(ZLIB_SOURCES_BASE
2929
zutil.h
3030
)
3131

32+
# enable custom zlib allocator
33+
add_definitions(-DMY_ZCALLOC)
34+
if(HOST_WIN32 OR CLR_CMAKE_TARGET_WIN32)
35+
set(ZLIB_SOURCES_BASE ${ZLIB_SOURCES_BASE} ../../libs/System.IO.Compression.Native/zlib_allocator_win.c)
36+
else()
37+
set(ZLIB_SOURCES_BASE ${ZLIB_SOURCES_BASE} ../../libs/System.IO.Compression.Native/zlib_allocator_unix.c)
38+
endif()
39+
3240
addprefix(ZLIB_SOURCES "${CMAKE_CURRENT_LIST_DIR}/zlib" "${ZLIB_SOURCES_BASE}")

src/native/libs/System.IO.Compression.Native/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ else ()
101101

102102
if (CLR_CMAKE_HOST_ARCH_I386 OR CLR_CMAKE_HOST_ARCH_AMD64)
103103
include(${CLR_SRC_NATIVE_DIR}/external/zlib-intel.cmake)
104+
add_definitions(-DINTERNAL_ZLIB_INTEL)
104105
else ()
105106
include(${CLR_SRC_NATIVE_DIR}/external/zlib.cmake)
106107
endif ()

src/native/libs/System.IO.Compression.Native/pal_zlib.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,11 @@
99
#ifdef _WIN32
1010
#define c_static_assert(e) static_assert((e),"")
1111
#endif
12-
#include <external/zlib/zlib.h>
12+
#ifdef INTERNAL_ZLIB_INTEL
13+
#include <external/zlib-intel/zlib.h>
14+
#else
15+
#include <external/zlib/zlib.h>
16+
#endif
1317
#else
1418
#include "pal_utilities.h"
1519
#include <zlib.h>
@@ -39,14 +43,11 @@ Initializes the PAL_ZStream by creating and setting its underlying z_stream.
3943
*/
4044
static int32_t Init(PAL_ZStream* stream)
4145
{
42-
z_stream* zStream = (z_stream*)malloc(sizeof(z_stream));
46+
z_stream* zStream = (z_stream*)calloc(1, sizeof(z_stream));
4347
stream->internalState = zStream;
4448

4549
if (zStream != NULL)
4650
{
47-
zStream->zalloc = Z_NULL;
48-
zStream->zfree = Z_NULL;
49-
zStream->opaque = Z_NULL;
5051
return PAL_Z_OK;
5152
}
5253
else
Lines changed: 201 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,201 @@
1+
// Licensed to the .NET Foundation under one or more agreements.
2+
// The .NET Foundation licenses this file to you under the MIT license.
3+
4+
#include <stdbool.h>
5+
#include <stdint.h>
6+
#include <external/zlib/zutil.h>
7+
8+
/* A custom allocator for zlib that provides some defense-in-depth over standard malloc / free.
9+
* (non-Windows version)
10+
*
11+
* 1. When zlib allocates fixed-length data structures for containing stream metadata, we zero
12+
* the memory before using it, preventing use of uninitialized memory within these structures.
13+
* Ideally we would do this for dynamically-sized buffers as well, but there is a measurable
14+
* perf impact to doing this. Zeroing fixed structures seems like a good trade-off here, since
15+
* these data structures contain most of the metadata used for managing the variable-length
16+
* dynamically allocated buffers.
17+
*
18+
* 2. We put a cookie both before and after any allocated memory, which allows us to detect local
19+
* buffer overruns on the call to free(). The cookie values are tied to the addresses where
20+
* the data is located in memory.
21+
*
22+
* 3. We trash the aforementioned cookie on free(), which allows us to detect double-free.
23+
*
24+
* If any of these checks fails, the application raises SIGABRT.
25+
*/
26+
27+
static bool IsMitigationDisabled()
28+
{
29+
enum _MitigationEnablementTristate
30+
{
31+
MITIGATION_NOT_YET_QUERIED = 0,
32+
MITIGATION_DISABLED = 1,
33+
MITIGATION_ENABLED = 2 // really, anything other than 0 or 1
34+
};
35+
static int s_fMitigationEnablementState = MITIGATION_NOT_YET_QUERIED;
36+
37+
// If already initialized, return immediately.
38+
// We don't need a volatile read here since the publish is performed with release semantics.
39+
if (s_fMitigationEnablementState != MITIGATION_NOT_YET_QUERIED)
40+
{
41+
return (s_fMitigationEnablementState == MITIGATION_DISABLED);
42+
}
43+
44+
// Initialize the tri-state now.
45+
// It's ok for multiple threads to do this simultaneously. Only one thread will win.
46+
// Valid env var values to disable mitigation: "true" and "1"
47+
// All other env var values (or error) leaves mitigation enabled.
48+
49+
char* pchEnvVar = getenv("DOTNET_SYSTEM_IO_COMPRESSION_DISABLEZLIBMITIGATIONS");
50+
bool fMitigationDisabled = (pchEnvVar && (strcmp(pchEnvVar, "1") == 0 || strcmp(pchEnvVar, "true") == 0));
51+
52+
// We really don't care about the return value of the ICE operation. If another thread
53+
// beat us to it, so be it. The recursive call will figure it out.
54+
__sync_val_compare_and_swap(
55+
/* destination: */ &s_fMitigationEnablementState,
56+
/* comparand: */ MITIGATION_NOT_YET_QUERIED,
57+
/* exchange: */ fMitigationDisabled ? MITIGATION_DISABLED : MITIGATION_ENABLED);
58+
return IsMitigationDisabled();
59+
}
60+
61+
#ifndef MEMORY_ALLOCATION_ALIGNMENT
62+
// malloc() returns an address suitably aligned for any built-in data type.
63+
// Historically, this has been twice the arch's natural word size.
64+
#ifdef HOST_64BIT
65+
#define MEMORY_ALLOCATION_ALIGNMENT 16
66+
#else
67+
#define MEMORY_ALLOCATION_ALIGNMENT 8
68+
#endif
69+
#endif
70+
71+
typedef struct _DOTNET_ALLOC_COOKIE
72+
{
73+
void* Address;
74+
size_t Size;
75+
} DOTNET_ALLOC_COOKIE;
76+
77+
static bool SafeAdd(size_t a, size_t b, size_t* sum)
78+
{
79+
if (SIZE_MAX - a >= b) { *sum = a + b; return true; }
80+
else { *sum = 0; return false; }
81+
}
82+
83+
static bool SafeMult(size_t a, size_t b, size_t* product)
84+
{
85+
if (SIZE_MAX / a >= b) { *product = a * b; return true; }
86+
else { *product = 0; return false; }
87+
}
88+
89+
static DOTNET_ALLOC_COOKIE ReadAllocCookieUnaligned(const void* pSrc)
90+
{
91+
DOTNET_ALLOC_COOKIE vCookie;
92+
memcpy(&vCookie, pSrc, sizeof(DOTNET_ALLOC_COOKIE));
93+
return vCookie;
94+
}
95+
96+
static void WriteAllocCookieUnaligned(void* pDest, DOTNET_ALLOC_COOKIE vCookie)
97+
{
98+
memcpy(pDest, &vCookie, sizeof(DOTNET_ALLOC_COOKIE));
99+
}
100+
101+
// Historically, the memory allocator always returns addresses aligned to some
102+
// particular boundary. We'll make that same guarantee here just in case somebody
103+
// depends on it.
104+
const size_t DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING = (sizeof(DOTNET_ALLOC_COOKIE) + MEMORY_ALLOCATION_ALIGNMENT - 1) & ~((size_t)MEMORY_ALLOCATION_ALIGNMENT - 1);
105+
const size_t DOTNET_ALLOC_TRAILER_COOKIE_SIZE = sizeof(DOTNET_ALLOC_COOKIE);
106+
107+
voidpf ZLIB_INTERNAL zcalloc(opaque, items, size)
108+
voidpf opaque;
109+
unsigned items;
110+
unsigned size;
111+
{
112+
(void)opaque; // unreferenced formal parameter
113+
114+
if (IsMitigationDisabled())
115+
{
116+
// fallback logic copied from zutil.c
117+
return sizeof(uInt) > 2 ? (voidpf)malloc(items * size) :
118+
(voidpf)calloc(items, size);
119+
}
120+
121+
// If initializing a fixed-size structure, zero the memory.
122+
bool fZeroMemory = (items == 1);
123+
124+
size_t cbRequested;
125+
if (sizeof(items) + sizeof(size) <= sizeof(cbRequested))
126+
{
127+
// multiplication can't overflow; no need for safeint
128+
cbRequested = (size_t)items * (size_t)size;
129+
}
130+
else
131+
{
132+
// multiplication can overflow; go through safeint
133+
if (!SafeMult((size_t)items, (size_t)size, &cbRequested)) { return NULL; }
134+
}
135+
136+
// Make sure the actual allocation has enough room for our frontside & backside cookies.
137+
size_t cbActualAllocationSize;
138+
if (!SafeAdd(cbRequested, DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING + DOTNET_ALLOC_TRAILER_COOKIE_SIZE, &cbActualAllocationSize)) { return NULL; }
139+
140+
void* pAlloced = (fZeroMemory) ? calloc(1, cbActualAllocationSize) : malloc(cbActualAllocationSize);
141+
if (pAlloced == NULL) { return NULL; } // OOM
142+
143+
DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)pAlloced;
144+
uint8_t* pReturnToCaller = (uint8_t*)pAlloced + DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING;
145+
uint8_t* pTrailerCookie = pReturnToCaller + cbRequested;
146+
147+
// Write out the same cookie for the header & the trailer, then we're done.
148+
149+
DOTNET_ALLOC_COOKIE vCookie = { 0 };
150+
vCookie.Address = pReturnToCaller;
151+
vCookie.Size = cbRequested;
152+
*pHeaderCookie = vCookie; // aligned
153+
WriteAllocCookieUnaligned(pTrailerCookie, vCookie);
154+
155+
return pReturnToCaller;
156+
}
157+
158+
static void zcfree_trash_cookie(void* pCookie)
159+
{
160+
memset(pCookie, 0, sizeof(DOTNET_ALLOC_COOKIE));
161+
}
162+
163+
void ZLIB_INTERNAL zcfree(opaque, ptr)
164+
voidpf opaque;
165+
voidpf ptr;
166+
{
167+
(void)opaque; // unreferenced formal parameter
168+
169+
if (IsMitigationDisabled())
170+
{
171+
// fallback logic copied from zutil.c
172+
free(ptr);
173+
return;
174+
}
175+
176+
if (ptr == NULL) { return; } // ok to free nullptr
177+
178+
// Check cookie at beginning
179+
180+
DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)((uint8_t*)ptr - DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING);
181+
if (pHeaderCookie->Address != ptr) { goto Fail; }
182+
size_t cbRequested = pHeaderCookie->Size;
183+
184+
// Check cookie at end
185+
186+
uint8_t* pTrailerCookie = (uint8_t*)ptr + cbRequested;
187+
DOTNET_ALLOC_COOKIE vTrailerCookie = ReadAllocCookieUnaligned(pTrailerCookie);
188+
if (vTrailerCookie.Address != ptr) { goto Fail; }
189+
if (vTrailerCookie.Size != cbRequested) { goto Fail; }
190+
191+
// Checks passed - now trash the cookies and free memory
192+
193+
zcfree_trash_cookie(pHeaderCookie);
194+
zcfree_trash_cookie(pTrailerCookie);
195+
196+
free(pHeaderCookie);
197+
return;
198+
199+
Fail:
200+
abort(); // cookie check failed
201+
}

0 commit comments

Comments
 (0)