Skip to content

Commit 51ae88e

Browse files
committed
runtime: remove non-reserved heap logic
Currently large sysReserve calls on some OSes don't actually reserve the memory, but just check that it can be reserved. This was important when we called sysReserve to "reserve" many gigabytes for the heap up front, but now that we map memory in small increments as we need it, this complication is no longer necessary. This has one curious side benefit: currently, on Linux, allocations that are large enough to be rejected by mmap wind up freezing the application for a long time before it panics. This happens because sysReserve doesn't reserve the memory, so sysMap calls mmap_fixed, which calls mmap, which fails because the mapping is too large. However, mmap_fixed doesn't inspect *why* mmap fails, so it falls back to probing every page in the desired region individually with mincore before performing an (otherwise dangerous) MAP_FIXED mapping, which will also fail. This takes a long time for a large region. Now this logic is gone, so the mmap failure leads to an immediate panic. Updates #10460. Change-Id: I8efe88c611871cdb14f99fadd09db83e0161ca2e Reviewed-on: https://go-review.googlesource.com/85888 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
1 parent 2b41554 commit 51ae88e

File tree

9 files changed

+24
-145
lines changed

9 files changed

+24
-145
lines changed

src/runtime/export_test.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -437,8 +437,7 @@ func MapNextArenaHint() (start, end uintptr) {
437437
} else {
438438
start, end = addr, addr+heapArenaBytes
439439
}
440-
var reserved bool
441-
sysReserve(unsafe.Pointer(addr), physPageSize, &reserved)
440+
sysReserve(unsafe.Pointer(addr), physPageSize)
442441
return
443442
}
444443

src/runtime/malloc.go

Lines changed: 10 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -235,18 +235,12 @@ var physPageSize uintptr
235235
// SysReserve reserves address space without allocating memory.
236236
// If the pointer passed to it is non-nil, the caller wants the
237237
// reservation there, but SysReserve can still choose another
238-
// location if that one is unavailable. On some systems and in some
239-
// cases SysReserve will simply check that the address space is
240-
// available and not actually reserve it. If SysReserve returns
241-
// non-nil, it sets *reserved to true if the address space is
242-
// reserved, false if it has merely been checked.
238+
// location if that one is unavailable.
243239
// NOTE: SysReserve returns OS-aligned memory, but the heap allocator
244240
// may use larger alignment, so the caller must be careful to realign the
245241
// memory obtained by sysAlloc.
246242
//
247243
// SysMap maps previously reserved address space for use.
248-
// The reserved argument is true if the address space was really
249-
// reserved, not merely checked.
250244
//
251245
// SysFault marks a (already sysAlloc'd) region to fault
252246
// if accessed. Used only for debugging the runtime.
@@ -361,8 +355,7 @@ func mallocinit() {
361355
// heap reservation.
362356

363357
const arenaMetaSize = unsafe.Sizeof(heapArena{}) * uintptr(len(*mheap_.arenas))
364-
var reserved bool
365-
meta := uintptr(sysReserve(nil, arenaMetaSize, &reserved))
358+
meta := uintptr(sysReserve(nil, arenaMetaSize))
366359
if meta != 0 {
367360
mheap_.heapArenaAlloc.init(meta, arenaMetaSize)
368361
}
@@ -399,7 +392,7 @@ func mallocinit() {
399392
128 << 20,
400393
}
401394
for _, arenaSize := range arenaSizes {
402-
a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes, &reserved)
395+
a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes)
403396
if a != nil {
404397
mheap_.arena.init(uintptr(a), size)
405398
p = uintptr(a) + size // For hint below
@@ -440,7 +433,7 @@ func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
440433
// We can't use this, so don't ask.
441434
v = nil
442435
} else {
443-
v = sysReserve(unsafe.Pointer(p), n, &h.arena_reserved)
436+
v = sysReserve(unsafe.Pointer(p), n)
444437
}
445438
if p == uintptr(v) {
446439
// Success. Update the hint.
@@ -468,7 +461,7 @@ func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
468461
// All of the hints failed, so we'll take any
469462
// (sufficiently aligned) address the kernel will give
470463
// us.
471-
v, size = sysReserveAligned(nil, n, heapArenaBytes, &h.arena_reserved)
464+
v, size = sysReserveAligned(nil, n, heapArenaBytes)
472465
if v == nil {
473466
return nil, 0
474467
}
@@ -494,7 +487,7 @@ func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
494487
}
495488

496489
// Back the reservation.
497-
sysMap(v, size, h.arena_reserved, &memstats.heap_sys)
490+
sysMap(v, size, &memstats.heap_sys)
498491

499492
mapped:
500493
// Create arena metadata.
@@ -529,13 +522,13 @@ mapped:
529522
// sysReserveAligned is like sysReserve, but the returned pointer is
530523
// aligned to align bytes. It may reserve either n or n+align bytes,
531524
// so it returns the size that was reserved.
532-
func sysReserveAligned(v unsafe.Pointer, size, align uintptr, reserved *bool) (unsafe.Pointer, uintptr) {
525+
func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) {
533526
// Since the alignment is rather large in uses of this
534527
// function, we're not likely to get it by chance, so we ask
535528
// for a larger region and remove the parts we don't need.
536529
retries := 0
537530
retry:
538-
p := uintptr(sysReserve(v, size+align, reserved))
531+
p := uintptr(sysReserve(v, size+align))
539532
switch {
540533
case p == 0:
541534
return nil, 0
@@ -550,7 +543,7 @@ retry:
550543
// so we may have to try again.
551544
sysFree(unsafe.Pointer(p), size+align, nil)
552545
p = round(p, align)
553-
p2 := sysReserve(unsafe.Pointer(p), size, reserved)
546+
p2 := sysReserve(unsafe.Pointer(p), size)
554547
if p != uintptr(p2) {
555548
// Must have raced. Try again.
556549
sysFree(p2, size, nil)
@@ -1095,7 +1088,7 @@ func (l *linearAlloc) alloc(size, align uintptr, sysStat *uint64) unsafe.Pointer
10951088
l.next = p + size
10961089
if pEnd := round(l.next-1, physPageSize); pEnd > l.mapped {
10971090
// We need to map more of the reserved space.
1098-
sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, true, sysStat)
1091+
sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat)
10991092
l.mapped = pEnd
11001093
}
11011094
return unsafe.Pointer(p)

src/runtime/mem_bsd.go

Lines changed: 2 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
package runtime
88

99
import (
10-
"runtime/internal/sys"
1110
"unsafe"
1211
)
1312

@@ -42,51 +41,20 @@ func sysFault(v unsafe.Pointer, n uintptr) {
4241
mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
4342
}
4443

45-
func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
46-
// On 64-bit, people with ulimit -v set complain if we reserve too
47-
// much address space. Instead, assume that the reservation is okay
48-
// and check the assumption in SysMap.
49-
if sys.PtrSize == 8 && uint64(n) > 1<<32 || sys.GoosNacl != 0 {
50-
*reserved = false
51-
return v
52-
}
53-
44+
func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
5445
p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
5546
if err != 0 {
5647
return nil
5748
}
58-
*reserved = true
5949
return p
6050
}
6151

6252
const _sunosEAGAIN = 11
6353
const _ENOMEM = 12
6454

65-
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
55+
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
6656
mSysStatInc(sysStat, n)
6757

68-
// On 64-bit, we don't actually have v reserved, so tread carefully.
69-
if !reserved {
70-
flags := int32(_MAP_ANON | _MAP_PRIVATE)
71-
if GOOS == "dragonfly" {
72-
// TODO(jsing): For some reason DragonFly seems to return
73-
// memory at a different address than we requested, even when
74-
// there should be no reason for it to do so. This can be
75-
// avoided by using MAP_FIXED, but I'm not sure we should need
76-
// to do this - we do not on other platforms.
77-
flags |= _MAP_FIXED
78-
}
79-
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, flags, -1, 0)
80-
if err == _ENOMEM || (GOOS == "solaris" && err == _sunosEAGAIN) {
81-
throw("runtime: out of memory")
82-
}
83-
if p != v || err != 0 {
84-
print("runtime: address space conflict: map(", v, ") = ", p, "(err ", err, ")\n")
85-
throw("runtime: address space conflict")
86-
}
87-
return
88-
}
89-
9058
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
9159
if err == _ENOMEM || (GOOS == "solaris" && err == _sunosEAGAIN) {
9260
throw("runtime: out of memory")

src/runtime/mem_darwin.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,7 @@ func sysFault(v unsafe.Pointer, n uintptr) {
3838
mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
3939
}
4040

41-
func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
42-
*reserved = true
41+
func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
4342
p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
4443
if err != 0 {
4544
return nil
@@ -51,7 +50,7 @@ const (
5150
_ENOMEM = 12
5251
)
5352

54-
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
53+
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
5554
mSysStatInc(sysStat, n)
5655
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
5756
if err == _ENOMEM {

src/runtime/mem_linux.go

Lines changed: 2 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -14,46 +14,6 @@ const (
1414
_EINVAL = 22
1515
)
1616

17-
// NOTE: vec must be just 1 byte long here.
18-
// Mincore returns ENOMEM if any of the pages are unmapped,
19-
// but we want to know that all of the pages are unmapped.
20-
// To make these the same, we can only ask about one page
21-
// at a time. See golang.org/issue/7476.
22-
var addrspace_vec [1]byte
23-
24-
func addrspace_free(v unsafe.Pointer, n uintptr) bool {
25-
for off := uintptr(0); off < n; off += physPageSize {
26-
// Use a length of 1 byte, which the kernel will round
27-
// up to one physical page regardless of the true
28-
// physical page size.
29-
errval := mincore(unsafe.Pointer(uintptr(v)+off), 1, &addrspace_vec[0])
30-
if errval == -_EINVAL {
31-
// Address is not a multiple of the physical
32-
// page size. Shouldn't happen, but just ignore it.
33-
continue
34-
}
35-
// ENOMEM means unmapped, which is what we want.
36-
// Anything else we assume means the pages are mapped.
37-
if errval != -_ENOMEM {
38-
return false
39-
}
40-
}
41-
return true
42-
}
43-
44-
func mmap_fixed(v unsafe.Pointer, n uintptr, prot, flags, fd int32, offset uint32) (unsafe.Pointer, int) {
45-
p, err := mmap(v, n, prot, flags, fd, offset)
46-
// On some systems, mmap ignores v without
47-
// MAP_FIXED, so retry if the address space is free.
48-
if p != v && addrspace_free(v, n) {
49-
if err == 0 {
50-
munmap(p, n)
51-
}
52-
p, err = mmap(v, n, prot, flags|_MAP_FIXED, fd, offset)
53-
}
54-
return p, err
55-
}
56-
5717
// Don't split the stack as this method may be invoked without a valid G, which
5818
// prevents us from allocating more stack.
5919
//go:nosplit
@@ -180,48 +140,17 @@ func sysFault(v unsafe.Pointer, n uintptr) {
180140
mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
181141
}
182142

183-
func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
184-
// On 64-bit, people with ulimit -v set complain if we reserve too
185-
// much address space. Instead, assume that the reservation is okay
186-
// if we can reserve at least 64K and check the assumption in SysMap.
187-
// Only user-mode Linux (UML) rejects these requests.
188-
if sys.PtrSize == 8 && uint64(n) > 1<<32 {
189-
p, err := mmap_fixed(v, 64<<10, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
190-
if p != v || err != 0 {
191-
if err == 0 {
192-
munmap(p, 64<<10)
193-
}
194-
return nil
195-
}
196-
munmap(p, 64<<10)
197-
*reserved = false
198-
return v
199-
}
200-
143+
func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
201144
p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
202145
if err != 0 {
203146
return nil
204147
}
205-
*reserved = true
206148
return p
207149
}
208150

209-
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
151+
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
210152
mSysStatInc(sysStat, n)
211153

212-
// On 64-bit, we don't actually have v reserved, so tread carefully.
213-
if !reserved {
214-
p, err := mmap_fixed(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
215-
if err == _ENOMEM {
216-
throw("runtime: out of memory")
217-
}
218-
if p != v || err != 0 {
219-
print("runtime: address space conflict: map(", v, ") = ", p, " (err ", err, ")\n")
220-
throw("runtime: address space conflict")
221-
}
222-
return
223-
}
224-
225154
p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
226155
if err == _ENOMEM {
227156
throw("runtime: out of memory")

src/runtime/mem_plan9.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
160160
func sysUsed(v unsafe.Pointer, n uintptr) {
161161
}
162162

163-
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
163+
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
164164
// sysReserve has already allocated all heap memory,
165165
// but has not adjusted stats.
166166
mSysStatInc(sysStat, n)
@@ -169,8 +169,7 @@ func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
169169
func sysFault(v unsafe.Pointer, n uintptr) {
170170
}
171171

172-
func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
173-
*reserved = true
172+
func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
174173
lock(&memlock)
175174
p := memAlloc(n)
176175
memCheck()

src/runtime/mem_windows.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -98,8 +98,7 @@ func sysFault(v unsafe.Pointer, n uintptr) {
9898
sysUnused(v, n)
9999
}
100100

101-
func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
102-
*reserved = true
101+
func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {
103102
// v is just a hint.
104103
// First try at v.
105104
// This will fail if any of [v, v+n) is already reserved.
@@ -112,7 +111,7 @@ func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
112111
return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
113112
}
114113

115-
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
114+
func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {
116115
mSysStatInc(sysStat, n)
117116
p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
118117
if p != uintptr(v) {

src/runtime/mheap.go

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -96,15 +96,6 @@ type mheap struct {
9696
nlargefree uint64 // number of frees for large objects (>maxsmallsize)
9797
nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
9898

99-
// arena_reserved indicates that the memory [arena_alloc,
100-
// arena_end) is reserved (e.g., mapped PROT_NONE). If this is
101-
// false, we have to be careful not to clobber existing
102-
// mappings here. If this is true, then we own the mapping
103-
// here and *must* clobber it to use it.
104-
//
105-
// TODO(austin): Remove.
106-
arena_reserved bool
107-
10899
// arenas is the heap arena index. arenas[va/heapArenaBytes]
109100
// points to the metadata for the heap arena containing va.
110101
//
@@ -135,7 +126,7 @@ type mheap struct {
135126
// (the actual arenas). This is only used on 32-bit.
136127
arena linearAlloc
137128

138-
_ uint32 // ensure 64-bit alignment of central
129+
//_ uint32 // ensure 64-bit alignment of central
139130

140131
// central free lists for small size classes.
141132
// the padding makes sure that the MCentrals are

src/runtime/os_linux.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,8 @@ const (
194194

195195
var procAuxv = []byte("/proc/self/auxv\x00")
196196

197+
var addrspace_vec [1]byte
198+
197199
func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
198200

199201
func sysargs(argc int32, argv **byte) {

0 commit comments

Comments
 (0)