-
Notifications
You must be signed in to change notification settings - Fork 4.9k
/
Copy pathexecutableallocator.h
362 lines (287 loc) · 12.2 KB
/
executableallocator.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// Allocator and holders for double mapped executable memory
//
#pragma once
#include "utilcode.h"
#include "ex.h"
#include <minipal.h>
#ifndef DACCESS_COMPILE
//#define LOG_EXECUTABLE_ALLOCATOR_STATISTICS
// This class is responsible for allocation of all the executable memory in the runtime.
class ExecutableAllocator
{
public:
enum CacheableMapping
{
AddToCache,
DoNotAddToCache,
};
private:
// RX address range block descriptor
struct BlockRX
{
// Next block in a linked list
BlockRX* next;
// Base address of the block
void* baseRX;
// Size of the block
size_t size;
// Offset of the block in the shared memory
size_t offset;
};
// RW address range block descriptor
struct BlockRW
{
// Next block in a linked list
BlockRW* next;
// Base address of the RW mapping of the block
void* baseRW;
// Base address of the RX mapping of the block
void* baseRX;
// Size of the block
size_t size;
// Usage reference count of the RW block. RW blocks can be reused
// when multiple mappings overlap in the VA space at the same time
// (even from multiple threads)
size_t refCount;
};
typedef void (*FatalErrorHandler)(UINT errorCode, LPCWSTR pszMessage);
#ifdef LOG_EXECUTABLE_ALLOCATOR_STATISTICS
static int64_t g_mapTimeSum;
static int64_t g_mapTimeWithLockSum;
static int64_t g_unmapTimeSum;
static int64_t g_unmapTimeWithLockSum;
static int64_t g_mapFindRXTimeSum;
static int64_t g_mapCreateTimeSum;
static int64_t g_releaseCount;
static int64_t g_reserveCount;
static int64_t g_MapRW_Calls;
static int64_t g_MapRW_CallsWithCacheMiss;
static int64_t g_MapRW_LinkedListWalkDepth;
static int64_t g_LinkedListTotalDepth;
#endif
// Instance of the allocator
static ExecutableAllocator* g_instance;
// Callback to the runtime to report fatal errors
static FatalErrorHandler g_fatalErrorHandler;
#if USE_LAZY_PREFERRED_RANGE
static BYTE* g_lazyPreferredRangeStart;
// Next address to try to allocate for code in the lazy preferred region.
static BYTE* g_lazyPreferredRangeHint;
#endif // USE_LAZY_PREFERRED_RANGE
// For PAL, this region represents the area that is eagerly reserved on
// startup where executable memory and static fields are preferrably kept.
// For Windows, this is the region that we lazily reserve from.
static BYTE* g_preferredRangeMin;
static BYTE* g_preferredRangeMax;
// Caches the DOTNET_EnableWXORX setting
static bool g_isWXorXEnabled;
// Head of the linked list of all RX blocks that were allocated by this allocator
BlockRX* m_pFirstBlockRX = NULL;
// Head of the linked list of free RX blocks that were allocated by this allocator and then backed out
BlockRX* m_pFirstFreeBlockRX = NULL;
// Head of the linked list of currently mapped RW blocks
BlockRW* m_pFirstBlockRW = NULL;
// Handle of the double mapped memory mapper
void *m_doubleMemoryMapperHandle = NULL;
// Maximum size of executable memory this allocator can allocate
size_t m_maxExecutableCodeSize;
// First free offset in the underlying shared memory. It is not used
// for platforms that don't use shared memory.
size_t m_freeOffset = 0;
// Uncomment these to gather information to better choose caching parameters
//#define VARIABLE_SIZED_CACHEDMAPPING_SIZE
// Last RW mappings cached so that it can be reused for the next mapping
// request if it goes into the same range.
// This is handled as a 3 element cache with an LRU replacement policy
#ifdef VARIABLE_SIZED_CACHEDMAPPING_SIZE
// If variable sized mappings enabled, make the cache physically big enough to cover all interesting sizes
static int g_cachedMappingSize;
BlockRW* m_cachedMapping[16] = { 0 };
#else
BlockRW* m_cachedMapping[3] = { 0 };
#endif
// Synchronization of the public allocator methods
CRITSEC_COOKIE m_CriticalSection;
// Update currently cached mapping. If the passed in block is the same as the one
// in the cache, it keeps it cached. Otherwise it destroys the currently cached one
// and replaces it by the passed in one.
void UpdateCachedMapping(BlockRW *pBlock);
// Remove the cached mapping (1 based indexing)
void RemoveCachedMapping(size_t indexToRemove);
// Find an overlapped cached mapping with pBlock, or return 0
size_t FindOverlappingCachedMapping(BlockRX* pBlock);
// Find existing RW block that maps the whole specified range of RX memory.
// Return NULL if no such block exists.
void* FindRWBlock(void* baseRX, size_t size, CacheableMapping cacheMapping);
// Add RW block to the list of existing RW blocks
bool AddRWBlock(void* baseRW, void* baseRX, size_t size, CacheableMapping cacheMapping);
// Remove RW block from the list of existing RW blocks and return the base
// address and size the underlying memory was mapped at.
// Return false if no existing RW block contains the passed in address.
bool RemoveRWBlock(void* pRW, void** pUnmapAddress, size_t* pUnmapSize);
// Find a free block with the closest size >= the requested size.
// Returns NULL if no such block exists.
BlockRX* FindBestFreeBlock(size_t size);
// Return memory mapping granularity.
static size_t Granularity();
// Allocate a block of executable memory of the specified size.
// It doesn't acquire the actual virtual memory, just the
// range of the underlying shared memory.
BlockRX* AllocateBlock(size_t size, bool* pIsFreeBlock);
// Backout the block allocated by AllocateBlock in case of an
// error.
void BackoutBlock(BlockRX* pBlock, bool isFreeBlock);
// Allocate range of offsets in the underlying shared memory
bool AllocateOffset(size_t* pOffset, size_t size);
// Add RX block to the linked list of existing blocks
void AddRXBlock(BlockRX *pBlock);
// Return true if double mapping is enabled.
static bool IsDoubleMappingEnabled();
// Initialize the allocator instance
bool Initialize();
#ifdef LOG_EXECUTABLE_ALLOCATOR_STATISTICS
static CRITSEC_COOKIE s_LoggerCriticalSection;
struct LogEntry
{
const char* source;
const char* function;
int line;
int count;
};
static LogEntry s_usageLog[256];
static int s_logMaxIndex;
#endif
public:
#ifdef LOG_EXECUTABLE_ALLOCATOR_STATISTICS
static void LogUsage(const char* source, int line, const char* function);
static void DumpHolderUsage();
#endif
// Return the ExecuteAllocator singleton instance
static ExecutableAllocator* Instance();
// Initialize the static members of the Executable allocator and allocate
// and initialize the instance of it.
static HRESULT StaticInitialize(FatalErrorHandler fatalErrorHandler);
// Destroy the allocator
~ExecutableAllocator();
// Return true if W^X is enabled
static bool IsWXORXEnabled();
// Use this function to initialize g_lazyPreferredRangeHint during startup.
// base is runtime .dll base address, size is runtime .dll virtual size.
static void InitLazyPreferredRange(size_t base, size_t size, int randomPageOffset);
// Use this function to reset g_lazyPreferredRangeHint after unloading code.
static void ResetLazyPreferredRangeHint();
// Use this function to initialize the preferred range of executable memory
// from PAL.
static void InitPreferredRange();
// Returns TRUE if p is located in near clr.dll that allows us
// to use rel32 IP-relative addressing modes.
static bool IsPreferredExecutableRange(void* p);
// Reserve the specified amount of virtual address space for executable mapping.
void* Reserve(size_t size);
// Reserve the specified amount of virtual address space for executable mapping.
// The reserved range must be within the loAddress and hiAddress. If it is not
// possible to reserve memory in such range, the method returns NULL.
void* ReserveWithinRange(size_t size, const void* loAddress, const void* hiAddress);
// Reserve the specified amount of virtual address space for executable mapping
// exactly at the given address.
void* ReserveAt(void* baseAddressRX, size_t size);
// Commit the specified range of memory. The memory can be committed as executable (RX)
// or non-executable (RW) based on the passed in isExecutable flag. The non-executable
// allocations are used to allocate data structures that need to be close to the
// executable code due to memory addressing performance related reasons.
void* Commit(void* pStart, size_t size, bool isExecutable);
// Release the executable memory block starting at the passed in address that was allocated
// by one of the ReserveXXX methods.
void Release(void* pRX);
// Map the specified block of executable memory as RW
void* MapRW(void* pRX, size_t size, CacheableMapping cacheMapping);
// Unmap the RW mapping at the specified address
void UnmapRW(void* pRW);
};
#define ExecutableWriterHolder ExecutableWriterHolderNoLog
// Holder class to map read-execute memory as read-write so that it can be modified without using read-write-execute mapping.
// At the moment the implementation is dummy, returning the same addresses for both cases and expecting them to be read-write-execute.
// The class uses the move semantics to ensure proper unmapping in case of re-assigning of the holder value.
template<typename T>
class ExecutableWriterHolder
{
T *m_addressRX;
T *m_addressRW;
void Move(ExecutableWriterHolder& other)
{
m_addressRX = other.m_addressRX;
m_addressRW = other.m_addressRW;
other.m_addressRX = NULL;
other.m_addressRW = NULL;
}
void Unmap()
{
#if defined(HOST_APPLE) && defined(HOST_ARM64) && !defined(DACCESS_COMPILE)
if (m_addressRX != NULL)
{
PAL_JitWriteProtect(false);
}
#else
if (m_addressRX != m_addressRW)
{
ExecutableAllocator::Instance()->UnmapRW((void*)m_addressRW);
}
#endif
}
public:
ExecutableWriterHolder(const ExecutableWriterHolder& other) = delete;
ExecutableWriterHolder& operator=(const ExecutableWriterHolder& other) = delete;
ExecutableWriterHolder(ExecutableWriterHolder&& other)
{
Move(other);
}
ExecutableWriterHolder& operator=(ExecutableWriterHolder&& other)
{
Unmap();
Move(other);
return *this;
}
ExecutableWriterHolder() : m_addressRX(nullptr), m_addressRW(nullptr)
{
}
ExecutableWriterHolder(T* addressRX, size_t size, ExecutableAllocator::CacheableMapping cacheMapping = ExecutableAllocator::AddToCache)
{
m_addressRX = addressRX;
#if defined(HOST_APPLE) && defined(HOST_ARM64)
m_addressRW = addressRX;
PAL_JitWriteProtect(true);
#else
m_addressRW = (T *)ExecutableAllocator::Instance()->MapRW((void*)addressRX, size, cacheMapping);
#endif
}
~ExecutableWriterHolder()
{
Unmap();
}
// Get the writeable address
inline T *GetRW() const
{
return m_addressRW;
}
void AssignExecutableWriterHolder(T* addressRX, size_t size)
{
*this = ExecutableWriterHolder(addressRX, size);
}
};
#ifdef LOG_EXECUTABLE_ALLOCATOR_STATISTICS
#undef ExecutableWriterHolder
#ifdef HOST_UNIX
#define ExecutableWriterHolder ExecutableAllocator::LogUsage(__FILE__, __LINE__, __PRETTY_FUNCTION__); ExecutableWriterHolderNoLog
#define AssignExecutableWriterHolder(addressRX, size) AssignExecutableWriterHolder(addressRX, size); ExecutableAllocator::LogUsage(__FILE__, __LINE__, __PRETTY_FUNCTION__);
#else
#define ExecutableWriterHolder ExecutableAllocator::LogUsage(__FILE__, __LINE__, __FUNCTION__); ExecutableWriterHolderNoLog
#define AssignExecutableWriterHolder(addressRX, size) AssignExecutableWriterHolder(addressRX, size); ExecutableAllocator::LogUsage(__FILE__, __LINE__, __FUNCTION__);
#endif
#else
#define ExecutableWriterHolder ExecutableWriterHolderNoLog
#endif
#endif // !DACCESS_COMPILE