@@ -45,20 +45,20 @@ ReservedSpace::ReservedSpace() : _base(nullptr), _size(0), _noaccess_prefix(0),
45
45
_alignment(0 ), _special(false ), _fd_for_heap(-1 ), _executable(false ) {
46
46
}
47
47
48
- ReservedSpace::ReservedSpace (size_t size) : _fd_for_heap(-1 ) {
48
+ ReservedSpace::ReservedSpace (size_t size, MemTag mem_tag ) : _fd_for_heap(-1 ) {
49
49
// Want to use large pages where possible. If the size is
50
50
// not large page aligned the mapping will be a mix of
51
51
// large and normal pages.
52
52
size_t page_size = os::page_size_for_region_unaligned (size, 1 );
53
53
size_t alignment = os::vm_allocation_granularity ();
54
- initialize (size, alignment, page_size, nullptr , false );
54
+ initialize (size, alignment, page_size, nullptr , false , mem_tag );
55
55
}
56
56
57
57
ReservedSpace::ReservedSpace (size_t size, size_t preferred_page_size) : _fd_for_heap(-1 ) {
58
58
// When a page size is given we don't want to mix large
59
59
// and normal pages. If the size is not a multiple of the
60
60
// page size it will be aligned up to achieve this.
61
- size_t alignment = os::vm_allocation_granularity ();;
61
+ size_t alignment = os::vm_allocation_granularity ();
62
62
if (preferred_page_size != os::vm_page_size ()) {
63
63
alignment = MAX2 (preferred_page_size, alignment);
64
64
size = align_up (size, alignment);
@@ -81,19 +81,19 @@ ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t p
81
81
}
82
82
83
83
// Helper method
84
- static char * attempt_map_or_reserve_memory_at (char * base, size_t size, int fd, bool executable) {
84
+ static char * attempt_map_or_reserve_memory_at (char * base, size_t size, int fd, bool executable, MemTag mem_tag ) {
85
85
if (fd != -1 ) {
86
86
return os::attempt_map_memory_to_file_at (base, size, fd);
87
87
}
88
- return os::attempt_reserve_memory_at (base, size, executable);
88
+ return os::attempt_reserve_memory_at (base, size, executable, mem_tag );
89
89
}
90
90
91
91
// Helper method
92
- static char * map_or_reserve_memory (size_t size, int fd, bool executable) {
92
+ static char * map_or_reserve_memory (size_t size, int fd, bool executable, MemTag mem_tag ) {
93
93
if (fd != -1 ) {
94
94
return os::map_memory_to_file (size, fd);
95
95
}
96
- return os::reserve_memory (size, executable);
96
+ return os::reserve_memory (size, executable, mem_tag );
97
97
}
98
98
99
99
// Helper method
@@ -154,7 +154,7 @@ static void log_on_large_pages_failure(char* req_addr, size_t bytes) {
154
154
}
155
155
156
156
static char * reserve_memory (char * requested_address, const size_t size,
157
- const size_t alignment, int fd, bool exec) {
157
+ const size_t alignment, int fd, bool exec, MemTag mem_tag ) {
158
158
char * base;
159
159
// If the memory was requested at a particular address, use
160
160
// os::attempt_reserve_memory_at() to avoid mapping over something
@@ -163,12 +163,12 @@ static char* reserve_memory(char* requested_address, const size_t size,
163
163
assert (is_aligned (requested_address, alignment),
164
164
" Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,
165
165
p2i (requested_address), alignment);
166
- base = attempt_map_or_reserve_memory_at (requested_address, size, fd, exec);
166
+ base = attempt_map_or_reserve_memory_at (requested_address, size, fd, exec, mem_tag );
167
167
} else {
168
168
// Optimistically assume that the OS returns an aligned base pointer.
169
169
// When reserving a large address range, most OSes seem to align to at
170
170
// least 64K.
171
- base = map_or_reserve_memory (size, fd, exec);
171
+ base = map_or_reserve_memory (size, fd, exec, mem_tag );
172
172
// Check alignment constraints. This is only needed when there is
173
173
// no requested address.
174
174
if (!is_aligned (base, alignment)) {
@@ -220,7 +220,8 @@ void ReservedSpace::reserve(size_t size,
220
220
size_t alignment,
221
221
size_t page_size,
222
222
char * requested_address,
223
- bool executable) {
223
+ bool executable,
224
+ MemTag mem_tag) {
224
225
assert (is_aligned (size, alignment), " Size must be aligned to the requested alignment" );
225
226
226
227
// There are basically three different cases that we need to handle below:
@@ -235,7 +236,7 @@ void ReservedSpace::reserve(size_t size,
235
236
// When there is a backing file directory for this space then whether
236
237
// large pages are allocated is up to the filesystem of the backing file.
237
238
// So UseLargePages is not taken into account for this reservation.
238
- char * base = reserve_memory (requested_address, size, alignment, _fd_for_heap, executable);
239
+ char * base = reserve_memory (requested_address, size, alignment, _fd_for_heap, executable, mem_tag );
239
240
if (base != nullptr ) {
240
241
initialize_members (base, size, alignment, os::vm_page_size (), true , executable);
241
242
}
@@ -266,7 +267,7 @@ void ReservedSpace::reserve(size_t size,
266
267
}
267
268
268
269
// == Case 3 ==
269
- char * base = reserve_memory (requested_address, size, alignment, -1 , executable);
270
+ char * base = reserve_memory (requested_address, size, alignment, -1 , executable, mem_tag );
270
271
if (base != nullptr ) {
271
272
// Successful mapping.
272
273
initialize_members (base, size, alignment, page_size, false , executable);
@@ -277,7 +278,8 @@ void ReservedSpace::initialize(size_t size,
277
278
size_t alignment,
278
279
size_t page_size,
279
280
char * requested_address,
280
- bool executable) {
281
+ bool executable,
282
+ MemTag mem_tag) {
281
283
const size_t granularity = os::vm_allocation_granularity ();
282
284
assert ((size & (granularity - 1 )) == 0 ,
283
285
" size not aligned to os::vm_allocation_granularity()" );
@@ -298,7 +300,7 @@ void ReservedSpace::initialize(size_t size,
298
300
alignment = MAX2 (alignment, os::vm_page_size ());
299
301
300
302
// Reserve the memory.
301
- reserve (size, alignment, page_size, requested_address, executable);
303
+ reserve (size, alignment, page_size, requested_address, executable, mem_tag );
302
304
303
305
// Check that the requested address is used if given.
304
306
if (failed_to_reserve_as_requested (_base, requested_address)) {
@@ -424,7 +426,7 @@ void ReservedHeapSpace::try_reserve_heap(size_t size,
424
426
p2i (requested_address),
425
427
size);
426
428
427
- reserve (size, alignment, page_size, requested_address, false );
429
+ reserve (size, alignment, page_size, requested_address, false , mtJavaHeap );
428
430
429
431
// Check alignment constraints.
430
432
if (is_reserved () && !is_aligned (_base, _alignment)) {
@@ -610,7 +612,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
610
612
// Last, desperate try without any placement.
611
613
if (_base == nullptr ) {
612
614
log_trace (gc, heap, coops)(" Trying to allocate at address null heap of size " SIZE_FORMAT_X, size + noaccess_prefix);
613
- initialize (size + noaccess_prefix, alignment, page_size, nullptr , false );
615
+ initialize (size + noaccess_prefix, alignment, page_size, nullptr , false , mtJavaHeap );
614
616
}
615
617
}
616
618
}
@@ -653,18 +655,14 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_
653
655
ShouldNotReachHere ();
654
656
#endif // _LP64
655
657
} else {
656
- initialize (size, alignment, page_size, nullptr , false );
658
+ initialize (size, alignment, page_size, nullptr , false , mtJavaHeap );
657
659
}
658
660
659
661
assert (markWord::encode_pointer_as_mark (_base).decode_pointer () == _base,
660
662
" area must be distinguishable from marks for mark-sweep" );
661
663
assert (markWord::encode_pointer_as_mark (&_base[size]).decode_pointer () == &_base[size],
662
664
" area must be distinguishable from marks for mark-sweep" );
663
665
664
- if (base () != nullptr ) {
665
- MemTracker::record_virtual_memory_tag ((address)base (), mtJavaHeap);
666
- }
667
-
668
666
if (_fd_for_heap != -1 ) {
669
667
::close (_fd_for_heap);
670
668
}
@@ -679,8 +677,7 @@ MemRegion ReservedHeapSpace::region() const {
679
677
ReservedCodeSpace::ReservedCodeSpace (size_t r_size,
680
678
size_t rs_align,
681
679
size_t rs_page_size) : ReservedSpace() {
682
- initialize (r_size, rs_align, rs_page_size, /* requested address*/ nullptr , /* executable*/ true );
683
- MemTracker::record_virtual_memory_tag ((address)base (), mtCode);
680
+ initialize (r_size, rs_align, rs_page_size, /* requested address*/ nullptr , /* executable*/ true , mtCode);
684
681
}
685
682
686
683
// VirtualSpace
0 commit comments