Skip to content

Commit 7513b13

Browse files
author
Gerard Ziemski
committed
8328944: NMT reports "unknown" memory
Reviewed-by: jsjolen, coleenp
1 parent 691e692 commit 7513b13

File tree

15 files changed

+49
-69
lines changed

15 files changed

+49
-69
lines changed

src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -79,7 +79,7 @@ ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity)
7979
_initialized(false) {
8080

8181
// Reserve address space for backing memory
82-
_base = (uintptr_t)os::reserve_memory(max_capacity);
82+
_base = (uintptr_t)os::reserve_memory(max_capacity, false, mtJavaHeap);
8383
if (_base == 0) {
8484
// Failed
8585
ZInitialize::error("Failed to reserve address space for backing memory");

src/hotspot/os/linux/os_linux.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4582,23 +4582,21 @@ static void workaround_expand_exec_shield_cs_limit() {
45824582
*/
45834583
char* hint = (char*)(os::Linux::initial_thread_stack_bottom() -
45844584
(StackOverflow::stack_guard_zone_size() + page_size));
4585-
char* codebuf = os::attempt_reserve_memory_at(hint, page_size);
4585+
char* codebuf = os::attempt_reserve_memory_at(hint, page_size, false, mtThread);
45864586

45874587
if (codebuf == nullptr) {
45884588
// JDK-8197429: There may be a stack gap of one megabyte between
45894589
// the limit of the stack and the nearest memory region: this is a
45904590
// Linux kernel workaround for CVE-2017-1000364. If we failed to
45914591
// map our codebuf, try again at an address one megabyte lower.
45924592
hint -= 1 * M;
4593-
codebuf = os::attempt_reserve_memory_at(hint, page_size);
4593+
codebuf = os::attempt_reserve_memory_at(hint, page_size, false, mtThread);
45944594
}
45954595

45964596
if ((codebuf == nullptr) || (!os::commit_memory(codebuf, page_size, true))) {
45974597
return; // No matter, we tried, best effort.
45984598
}
45994599

4600-
MemTracker::record_virtual_memory_tag((address)codebuf, mtInternal);
4601-
46024600
log_info(os)("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
46034601

46044602
// Some code to exec: the 'ret' instruction

src/hotspot/share/cds/metaspaceShared.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,7 @@ void MetaspaceShared::initialize_for_static_dump() {
281281
SharedBaseAddress = (size_t)_requested_base_address;
282282

283283
size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M);
284-
_symbol_rs = ReservedSpace(symbol_rs_size);
284+
_symbol_rs = ReservedSpace(symbol_rs_size, mtClassShared);
285285
if (!_symbol_rs.is_reserved()) {
286286
log_error(cds)("Unable to reserve memory for symbols: " SIZE_FORMAT " bytes.", symbol_rs_size);
287287
MetaspaceShared::unrecoverable_writing_error();

src/hotspot/share/gc/parallel/objectStartArray.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,11 +47,10 @@ void ObjectStartArray::initialize(MemRegion reserved_region) {
4747

4848
// Do not use large-pages for the backing store. The one large page region
4949
// will be used for the heap proper.
50-
ReservedSpace backing_store(bytes_to_reserve);
50+
ReservedSpace backing_store(bytes_to_reserve, mtGC);
5151
if (!backing_store.is_reserved()) {
5252
vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
5353
}
54-
MemTracker::record_virtual_memory_tag(backing_store.base(), mtGC);
5554

5655
// We do not commit any memory initially
5756
_virtual_space.initialize(backing_store);

src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,13 +37,11 @@ SerialBlockOffsetTable::SerialBlockOffsetTable(MemRegion reserved,
3737
size_t init_word_size):
3838
_reserved(reserved) {
3939
size_t size = compute_size(reserved.word_size());
40-
ReservedSpace rs(size);
40+
ReservedSpace rs(size, mtGC);
4141
if (!rs.is_reserved()) {
4242
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
4343
}
4444

45-
MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC);
46-
4745
if (!_vs.initialize(rs, 0)) {
4846
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
4947
}

src/hotspot/share/memory/heap.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -227,13 +227,11 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
227227
const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
228228

229229
// reserve space for _segmap
230-
ReservedSpace seg_rs(reserved_segments_size);
230+
ReservedSpace seg_rs(reserved_segments_size, mtCode);
231231
if (!_segmap.initialize(seg_rs, committed_segments_size)) {
232232
return false;
233233
}
234234

235-
MemTracker::record_virtual_memory_tag((address)_segmap.low_boundary(), mtCode);
236-
237235
assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map");
238236
assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
239237
assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking");

src/hotspot/share/memory/virtualspace.cpp

Lines changed: 21 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -45,20 +45,20 @@ ReservedSpace::ReservedSpace() : _base(nullptr), _size(0), _noaccess_prefix(0),
4545
_alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
4646
}
4747

48-
ReservedSpace::ReservedSpace(size_t size) : _fd_for_heap(-1) {
48+
ReservedSpace::ReservedSpace(size_t size, MemTag mem_tag) : _fd_for_heap(-1) {
4949
// Want to use large pages where possible. If the size is
5050
// not large page aligned the mapping will be a mix of
5151
// large and normal pages.
5252
size_t page_size = os::page_size_for_region_unaligned(size, 1);
5353
size_t alignment = os::vm_allocation_granularity();
54-
initialize(size, alignment, page_size, nullptr, false);
54+
initialize(size, alignment, page_size, nullptr, false, mem_tag);
5555
}
5656

5757
ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
5858
// When a page size is given we don't want to mix large
5959
// and normal pages. If the size is not a multiple of the
6060
// page size it will be aligned up to achieve this.
61-
size_t alignment = os::vm_allocation_granularity();;
61+
size_t alignment = os::vm_allocation_granularity();
6262
if (preferred_page_size != os::vm_page_size()) {
6363
alignment = MAX2(preferred_page_size, alignment);
6464
size = align_up(size, alignment);
@@ -81,19 +81,19 @@ ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t p
8181
}
8282

8383
// Helper method
84-
static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable) {
84+
static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable, MemTag mem_tag) {
8585
if (fd != -1) {
8686
return os::attempt_map_memory_to_file_at(base, size, fd);
8787
}
88-
return os::attempt_reserve_memory_at(base, size, executable);
88+
return os::attempt_reserve_memory_at(base, size, executable, mem_tag);
8989
}
9090

9191
// Helper method
92-
static char* map_or_reserve_memory(size_t size, int fd, bool executable) {
92+
static char* map_or_reserve_memory(size_t size, int fd, bool executable, MemTag mem_tag) {
9393
if (fd != -1) {
9494
return os::map_memory_to_file(size, fd);
9595
}
96-
return os::reserve_memory(size, executable);
96+
return os::reserve_memory(size, executable, mem_tag);
9797
}
9898

9999
// Helper method
@@ -154,7 +154,7 @@ static void log_on_large_pages_failure(char* req_addr, size_t bytes) {
154154
}
155155

156156
static char* reserve_memory(char* requested_address, const size_t size,
157-
const size_t alignment, int fd, bool exec) {
157+
const size_t alignment, int fd, bool exec, MemTag mem_tag) {
158158
char* base;
159159
// If the memory was requested at a particular address, use
160160
// os::attempt_reserve_memory_at() to avoid mapping over something
@@ -163,12 +163,12 @@ static char* reserve_memory(char* requested_address, const size_t size,
163163
assert(is_aligned(requested_address, alignment),
164164
"Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,
165165
p2i(requested_address), alignment);
166-
base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec);
166+
base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec, mem_tag);
167167
} else {
168168
// Optimistically assume that the OS returns an aligned base pointer.
169169
// When reserving a large address range, most OSes seem to align to at
170170
// least 64K.
171-
base = map_or_reserve_memory(size, fd, exec);
171+
base = map_or_reserve_memory(size, fd, exec, mem_tag);
172172
// Check alignment constraints. This is only needed when there is
173173
// no requested address.
174174
if (!is_aligned(base, alignment)) {
@@ -220,7 +220,8 @@ void ReservedSpace::reserve(size_t size,
220220
size_t alignment,
221221
size_t page_size,
222222
char* requested_address,
223-
bool executable) {
223+
bool executable,
224+
MemTag mem_tag) {
224225
assert(is_aligned(size, alignment), "Size must be aligned to the requested alignment");
225226

226227
// There are basically three different cases that we need to handle below:
@@ -235,7 +236,7 @@ void ReservedSpace::reserve(size_t size,
235236
// When there is a backing file directory for this space then whether
236237
// large pages are allocated is up to the filesystem of the backing file.
237238
// So UseLargePages is not taken into account for this reservation.
238-
char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable);
239+
char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable, mem_tag);
239240
if (base != nullptr) {
240241
initialize_members(base, size, alignment, os::vm_page_size(), true, executable);
241242
}
@@ -266,7 +267,7 @@ void ReservedSpace::reserve(size_t size,
266267
}
267268

268269
// == Case 3 ==
269-
char* base = reserve_memory(requested_address, size, alignment, -1, executable);
270+
char* base = reserve_memory(requested_address, size, alignment, -1, executable, mem_tag);
270271
if (base != nullptr) {
271272
// Successful mapping.
272273
initialize_members(base, size, alignment, page_size, false, executable);
@@ -277,7 +278,8 @@ void ReservedSpace::initialize(size_t size,
277278
size_t alignment,
278279
size_t page_size,
279280
char* requested_address,
280-
bool executable) {
281+
bool executable,
282+
MemTag mem_tag) {
281283
const size_t granularity = os::vm_allocation_granularity();
282284
assert((size & (granularity - 1)) == 0,
283285
"size not aligned to os::vm_allocation_granularity()");
@@ -298,7 +300,7 @@ void ReservedSpace::initialize(size_t size,
298300
alignment = MAX2(alignment, os::vm_page_size());
299301

300302
// Reserve the memory.
301-
reserve(size, alignment, page_size, requested_address, executable);
303+
reserve(size, alignment, page_size, requested_address, executable, mem_tag);
302304

303305
// Check that the requested address is used if given.
304306
if (failed_to_reserve_as_requested(_base, requested_address)) {
@@ -424,7 +426,7 @@ void ReservedHeapSpace::try_reserve_heap(size_t size,
424426
p2i(requested_address),
425427
size);
426428

427-
reserve(size, alignment, page_size, requested_address, false);
429+
reserve(size, alignment, page_size, requested_address, false, mtJavaHeap);
428430

429431
// Check alignment constraints.
430432
if (is_reserved() && !is_aligned(_base, _alignment)) {
@@ -610,7 +612,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
610612
// Last, desperate try without any placement.
611613
if (_base == nullptr) {
612614
log_trace(gc, heap, coops)("Trying to allocate at address null heap of size " SIZE_FORMAT_X, size + noaccess_prefix);
613-
initialize(size + noaccess_prefix, alignment, page_size, nullptr, false);
615+
initialize(size + noaccess_prefix, alignment, page_size, nullptr, false, mtJavaHeap);
614616
}
615617
}
616618
}
@@ -653,18 +655,14 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_
653655
ShouldNotReachHere();
654656
#endif // _LP64
655657
} else {
656-
initialize(size, alignment, page_size, nullptr, false);
658+
initialize(size, alignment, page_size, nullptr, false, mtJavaHeap);
657659
}
658660

659661
assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
660662
"area must be distinguishable from marks for mark-sweep");
661663
assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
662664
"area must be distinguishable from marks for mark-sweep");
663665

664-
if (base() != nullptr) {
665-
MemTracker::record_virtual_memory_tag((address)base(), mtJavaHeap);
666-
}
667-
668666
if (_fd_for_heap != -1) {
669667
::close(_fd_for_heap);
670668
}
@@ -679,8 +677,7 @@ MemRegion ReservedHeapSpace::region() const {
679677
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
680678
size_t rs_align,
681679
size_t rs_page_size) : ReservedSpace() {
682-
initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true);
683-
MemTracker::record_virtual_memory_tag((address)base(), mtCode);
680+
initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true, mtCode);
684681
}
685682

686683
// VirtualSpace

src/hotspot/share/memory/virtualspace.hpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#define SHARE_MEMORY_VIRTUALSPACE_HPP
2727

2828
#include "memory/memRegion.hpp"
29+
#include "nmt/memTag.hpp"
2930
#include "utilities/globalDefinitions.hpp"
3031

3132
class outputStream;
@@ -61,16 +62,16 @@ class ReservedSpace {
6162
size_t page_size, bool special, bool executable);
6263

6364
void initialize(size_t size, size_t alignment, size_t page_size,
64-
char* requested_address, bool executable);
65+
char* requested_address, bool executable, MemTag mem_tag = mtNone);
6566

6667
void reserve(size_t size, size_t alignment, size_t page_size,
67-
char* requested_address, bool executable);
68+
char* requested_address, bool executable, MemTag mem_tag);
6869
public:
6970
// Constructor
7071
ReservedSpace();
7172
// Initialize the reserved space with the given size. Depending on the size
7273
// a suitable page size and alignment will be used.
73-
explicit ReservedSpace(size_t size);
74+
ReservedSpace(size_t size, MemTag mem_tag);
7475
// Initialize the reserved space with the given size. The preferred_page_size
7576
// is used as the minimum page size/alignment. This may waste some space if
7677
// the given size is not aligned to that value, as the reservation will be

src/hotspot/share/prims/jni.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2402,11 +2402,10 @@ static char* get_bad_address() {
24022402
static char* bad_address = nullptr;
24032403
if (bad_address == nullptr) {
24042404
size_t size = os::vm_allocation_granularity();
2405-
bad_address = os::reserve_memory(size);
2405+
bad_address = os::reserve_memory(size, false, mtInternal);
24062406
if (bad_address != nullptr) {
24072407
os::protect_memory(bad_address, size, os::MEM_PROT_READ,
24082408
/*is_committed*/false);
2409-
MemTracker::record_virtual_memory_tag((void*)bad_address, mtInternal);
24102409
}
24112410
}
24122411
return bad_address;

src/hotspot/share/prims/whitebox.cpp

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -711,19 +711,11 @@ WB_ENTRY(void, WB_NMTFree(JNIEnv* env, jobject o, jlong mem))
711711
WB_END
712712

713713
WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
714-
jlong addr = 0;
715-
716-
addr = (jlong)(uintptr_t)os::reserve_memory(size);
717-
MemTracker::record_virtual_memory_tag((address)addr, mtTest);
718-
719-
return addr;
714+
return (jlong)(uintptr_t)os::reserve_memory(size, false, mtTest);
720715
WB_END
721716

722717
WB_ENTRY(jlong, WB_NMTAttemptReserveMemoryAt(JNIEnv* env, jobject o, jlong addr, jlong size))
723-
addr = (jlong)(uintptr_t)os::attempt_reserve_memory_at((char*)(uintptr_t)addr, (size_t)size);
724-
MemTracker::record_virtual_memory_tag((address)addr, mtTest);
725-
726-
return addr;
718+
return (jlong)(uintptr_t)os::attempt_reserve_memory_at((char*)(uintptr_t)addr, (size_t)size, false, mtTest);
727719
WB_END
728720

729721
WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))

0 commit comments

Comments
 (0)