-
Notifications
You must be signed in to change notification settings - Fork 77
/
Copy pathminor_gc.c
737 lines (678 loc) · 26 KB
/
minor_gc.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
/**************************************************************************/
/* */
/* OCaml */
/* */
/* Damien Doligez, projet Para, INRIA Rocquencourt */
/* */
/* Copyright 1996 Institut National de Recherche en Informatique et */
/* en Automatique. */
/* */
/* All rights reserved. This file is distributed under the terms of */
/* the GNU Lesser General Public License version 2.1, with the */
/* special exception on linking described in the file LICENSE. */
/* */
/**************************************************************************/
#define CAML_INTERNALS
#include <string.h>
#include "caml/custom.h"
#include "caml/config.h"
#include "caml/fail.h"
#include "caml/finalise.h"
#include "caml/gc.h"
#include "caml/gc_ctrl.h"
#include "caml/major_gc.h"
#include "caml/memory.h"
#include "caml/minor_gc.h"
#include "caml/misc.h"
#include "caml/mlvalues.h"
#include "caml/roots.h"
#include "caml/signals.h"
#include "caml/weak.h"
#include "caml/memprof.h"
#include "caml/eventlog.h"
/* Pointers into the minor heap.
[Caml_state->young_base]
The [malloc] block that contains the heap.
[Caml_state->young_start] ... [Caml_state->young_end]
The whole range of the minor heap: all young blocks are inside
this interval.
[Caml_state->young_alloc_start]...[Caml_state->young_alloc_end]
The allocation arena: newly-allocated blocks are carved from
this interval, starting at [Caml_state->young_alloc_end].
[Caml_state->young_alloc_mid] is the mid-point of this interval.
[Caml_state->young_ptr], [Caml_state->young_trigger],
[Caml_state->young_limit]
These pointers are all inside the allocation arena.
- [Caml_state->young_ptr] is where the next allocation will take place.
- [Caml_state->young_trigger] is how far we can allocate before
triggering [caml_gc_dispatch]. Currently, it is either
[Caml_state->young_alloc_start] or the mid-point of the allocation
arena.
- [Caml_state->young_limit] is the pointer that is compared to
[Caml_state->young_ptr] for allocation. It is either:
+ [Caml_state->young_alloc_end] if a signal handler or
finaliser or memprof callback is pending, or if a major
or minor collection has been requested, or an
asynchronous callback has just raised an exception,
+ [caml_memprof_young_trigger] if a memprof sample is planned,
+ or [Caml_state->young_trigger].
*/
struct generic_table CAML_TABLE_STRUCT(char);
void caml_alloc_minor_tables (void)
{
Caml_state->ref_table =
caml_stat_alloc_noexc(sizeof(struct caml_ref_table));
if (Caml_state->ref_table == NULL)
caml_fatal_error ("cannot initialize minor heap");
memset(Caml_state->ref_table, 0, sizeof(struct caml_ref_table));
Caml_state->ephe_ref_table =
caml_stat_alloc_noexc(sizeof(struct caml_ephe_ref_table));
if (Caml_state->ephe_ref_table == NULL)
caml_fatal_error ("cannot initialize minor heap");
memset(Caml_state->ephe_ref_table, 0, sizeof(struct caml_ephe_ref_table));
Caml_state->custom_table =
caml_stat_alloc_noexc(sizeof(struct caml_custom_table));
if (Caml_state->custom_table == NULL)
caml_fatal_error ("cannot initialize minor heap");
memset(Caml_state->custom_table, 0, sizeof(struct caml_custom_table));
}
/* [sz] and [rsv] are numbers of entries */
static void alloc_generic_table (struct generic_table *tbl, asize_t sz,
asize_t rsv, asize_t element_size)
{
void *new_table;
tbl->size = sz;
tbl->reserve = rsv;
new_table = (void *) caml_stat_alloc_noexc((tbl->size + tbl->reserve) *
element_size);
if (new_table == NULL) caml_fatal_error ("not enough memory");
if (tbl->base != NULL) caml_stat_free (tbl->base);
tbl->base = new_table;
tbl->ptr = tbl->base;
tbl->threshold = tbl->base + tbl->size * element_size;
tbl->limit = tbl->threshold;
tbl->end = tbl->base + (tbl->size + tbl->reserve) * element_size;
}
void caml_alloc_table (struct caml_ref_table *tbl, asize_t sz, asize_t rsv)
{
alloc_generic_table ((struct generic_table *) tbl, sz, rsv, sizeof (value *));
}
void caml_alloc_ephe_table (struct caml_ephe_ref_table *tbl, asize_t sz,
asize_t rsv)
{
alloc_generic_table ((struct generic_table *) tbl, sz, rsv,
sizeof (struct caml_ephe_ref_elt));
}
void caml_alloc_custom_table (struct caml_custom_table *tbl, asize_t sz,
asize_t rsv)
{
alloc_generic_table ((struct generic_table *) tbl, sz, rsv,
sizeof (struct caml_custom_elt));
}
static void reset_table (struct generic_table *tbl)
{
tbl->size = 0;
tbl->reserve = 0;
if (tbl->base != NULL) caml_stat_free (tbl->base);
tbl->base = tbl->ptr = tbl->threshold = tbl->limit = tbl->end = NULL;
}
static void clear_table (struct generic_table *tbl,
asize_t element_size,
const char* name)
{
asize_t maxsz = Caml_state->minor_heap_wsz;
if (tbl->size <= maxsz) {
tbl->ptr = tbl->base;
tbl->limit = tbl->threshold;
} else {
caml_gc_message (0x08, "Shrinking %s to %ldk bytes\n",
name,
(long)((maxsz * element_size) / 1024));
alloc_generic_table(tbl, Caml_state->minor_heap_wsz, 256, element_size);
}
}
void caml_set_minor_heap_size (asize_t bsz)
{
char *new_heap;
void *new_heap_base;
CAMLassert (bsz >= Bsize_wsize(Minor_heap_min));
CAMLassert (bsz <= Bsize_wsize(Minor_heap_max));
CAMLassert (bsz % Page_size == 0);
CAMLassert (bsz % sizeof (value) == 0);
if (Caml_state->young_ptr != Caml_state->young_alloc_end){
CAML_EV_COUNTER (EV_C_FORCE_MINOR_SET_MINOR_HEAP_SIZE, 1);
Caml_state->requested_minor_gc = 0;
Caml_state->young_trigger = Caml_state->young_alloc_mid;
caml_update_young_limit();
caml_empty_minor_heap ();
}
CAMLassert (Caml_state->young_ptr == Caml_state->young_alloc_end);
new_heap = caml_stat_alloc_aligned_noexc(bsz, 0, &new_heap_base);
if (new_heap == NULL) caml_fatal_out_of_memory();
if (caml_page_table_add(In_young, new_heap, new_heap + bsz) != 0)
caml_fatal_out_of_memory();
if (Caml_state->young_start != NULL){
caml_page_table_remove(In_young, Caml_state->young_start,
Caml_state->young_end);
caml_stat_free (Caml_state->young_base);
}
Caml_state->young_base = new_heap_base;
Caml_state->young_start = (value *) new_heap;
Caml_state->young_end = (value *) (new_heap + bsz);
Caml_state->young_alloc_start = Caml_state->young_start;
Caml_state->young_alloc_mid =
Caml_state->young_alloc_start + Wsize_bsize (bsz) / 2;
Caml_state->young_alloc_end = Caml_state->young_end;
/* caml_update_young_limit called by caml_memprof_renew_minor_sample */
Caml_state->young_trigger = Caml_state->young_alloc_start;
Caml_state->young_ptr = Caml_state->young_alloc_end;
Caml_state->minor_heap_wsz = Wsize_bsize (bsz);
caml_memprof_renew_minor_sample();
reset_table ((struct generic_table *) Caml_state->ref_table);
reset_table ((struct generic_table *) Caml_state->ephe_ref_table);
reset_table ((struct generic_table *) Caml_state->custom_table);
}
static value oldify_todo_list = 0;
/* Note that the tests on the tag depend on the fact that Infix_tag,
Forward_tag, and No_scan_tag are contiguous. */
void caml_oldify_one (value v, value *p)
{
value result;
header_t hd;
mlsize_t sz, i;
tag_t tag;
tail_call:
if (Is_block (v) && Is_young (v)){
CAMLassert ((value *) Hp_val (v) >= Caml_state->young_ptr);
hd = Hd_val (v);
if (hd == 0){ /* If already forwarded */
*p = Field (v, 0); /* then forward pointer is first field. */
}else{
CAMLassert_young_header(hd);
tag = Tag_hd (hd);
if (tag < Infix_tag){
value field0;
sz = Wosize_hd (hd);
mlsize_t scannable_sz = Scannable_wosize_hd(hd);
result = caml_alloc_shr_for_minor_gc (sz, tag, hd);
*p = result;
/* Copy the non-scannable suffix of fields */
for (i = scannable_sz; i < sz; i++) {
Field(result, i) = Field(v, i);
}
field0 = Field (v, 0);
Hd_val (v) = 0; /* Set forward flag */
Field (v, 0) = result; /* and forward pointer. */
if (scannable_sz == 0) {
return;
} else if (scannable_sz > 1){
Field (result, 0) = field0;
Field (result, 1) = oldify_todo_list; /* Add this block */
oldify_todo_list = v; /* to the "to do" list. */
}else{
CAMLassert (scannable_sz == 1);
p = &Field (result, 0);
v = field0;
goto tail_call;
}
}else if (tag >= No_scan_tag){
sz = Wosize_hd (hd);
result = caml_alloc_shr_for_minor_gc (sz, tag, hd);
for (i = 0; i < sz; i++) Field (result, i) = Field (v, i);
Hd_val (v) = 0; /* Set forward flag */
Field (v, 0) = result; /* and forward pointer. */
*p = result;
}else if (tag == Infix_tag){
mlsize_t offset = Infix_offset_hd (hd);
caml_oldify_one (v - offset, p); /* Cannot recurse deeper than 1. */
*p += offset;
}else{
value f = Forward_val (v);
tag_t ft = 0;
int vv = 1;
CAMLassert (tag == Forward_tag);
if (Is_block (f)){
if (Is_young (f)){
vv = 1;
ft = Tag_val (Hd_val (f) == 0 ? Field (f, 0) : f);
}else{
vv = Is_in_value_area(f);
if (vv){
ft = Tag_val (f);
}
}
}
if (!vv || ft == Forward_tag || ft == Lazy_tag || ft == Forcing_tag
#ifdef FLAT_FLOAT_ARRAY
|| ft == Double_tag
#endif
){
/* Do not short-circuit the pointer. Copy as a normal block. */
CAMLassert (Wosize_hd (hd) == 1);
result = caml_alloc_shr_for_minor_gc (1, Forward_tag, hd);
*p = result;
Hd_val (v) = 0; /* Set (GC) forward flag */
Field (v, 0) = result; /* and forward pointer. */
p = &Field (result, 0);
v = f;
goto tail_call;
}else{
v = f; /* Follow the forwarding */
goto tail_call; /* then oldify. */
}
}
}
}else{
*p = v;
}
}
/* Test if the ephemeron is alive, everything outside minor heap is alive */
Caml_inline int ephe_check_alive_data(struct caml_ephe_ref_elt *re){
mlsize_t i;
value child;
for (i = CAML_EPHE_FIRST_KEY; i < Wosize_val(re->ephe); i++){
child = Field (re->ephe, i);
if(child != caml_ephe_none
&& Is_block (child) && Is_young (child)) {
if(Tag_val(child) == Infix_tag) child -= Infix_offset_val(child);
if(Hd_val (child) != 0) return 0; /* Value not copied to major heap */
}
}
return 1;
}
/* Finish the work that was put off by [caml_oldify_one].
Note that [caml_oldify_one] itself is called by oldify_mopup, so we
have to be careful to remove the first entry from the list before
oldifying its fields. */
void caml_oldify_mopup (void)
{
value v, new_v, f;
mlsize_t i;
struct caml_ephe_ref_elt *re;
int redo;
again:
redo = 0;
while (oldify_todo_list != 0){
v = oldify_todo_list; /* Get the head. */
CAMLassert (Hd_val (v) == 0); /* It must be forwarded. */
new_v = Field (v, 0); /* Follow forward pointer. */
oldify_todo_list = Field (new_v, 1); /* Remove from list. */
mlsize_t scannable_wosize = Scannable_wosize_val(new_v);
/* [v] was only added to the [todo_list] if its [scannable_wosize > 1].
- It needs to be greater than 0 because we oldify the first field.
- It needs to be greater than 1 so the below loop runs at least once,
overwriting Field(new_v, 1) which [oldify_one] used as temporary
storage of the next value of [todo_list].
*/
CAMLassert (scannable_wosize > 1);
f = Field (new_v, 0);
if (Is_block (f) && Is_young (f)){
caml_oldify_one (f, &Field (new_v, 0));
}
i = 1;
if(Tag_val(new_v) == Closure_tag) {
mlsize_t non_scannable = Start_env_closinfo(Closinfo_val(v));
for (; i < non_scannable; i++) {
Field(new_v, i) = Field(v, i);
}
}
for (; i < scannable_wosize; i++){
f = Field (v, i);
if (Is_block (f) && Is_young (f)){
caml_oldify_one (f, &Field (new_v, i));
}else{
Field (new_v, i) = f;
}
}
}
// The non-scannable suffix is already copied in [oldify_one].
/* Oldify the data in the minor heap of alive ephemeron
During minor collection keys outside the minor heap are considered alive */
for (re = Caml_state->ephe_ref_table->base;
re < Caml_state->ephe_ref_table->ptr; re++){
/* look only at ephemeron with data in the minor heap */
if (re->offset == 1){
value *data = &Field(re->ephe,1), v = *data;
if (v != caml_ephe_none && Is_block (v) && Is_young (v)){
mlsize_t offs = Tag_val(v) == Infix_tag ? Infix_offset_val(v) : 0;
v -= offs;
if (Hd_val (v) == 0){ /* Value copied to major heap */
*data = Field (v, 0) + offs;
} else {
if (ephe_check_alive_data(re)){
caml_oldify_one(*data,data);
redo = 1; /* oldify_todo_list can still be 0 */
}
}
}
}
}
if (redo) goto again;
}
#ifdef DEBUG
static void verify_minor_heap(void)
{
header_t* p;
struct caml_local_arena* arena = Caml_state->local_arenas ?
&Caml_state->local_arenas->arenas[Caml_state->local_arenas->count-1] : NULL;
for (p = (header_t*)Caml_state->young_ptr;
p < (header_t*)Caml_state->young_alloc_end;
p += Whsize_hp(p)) {
header_t hd = *p;
CAMLassert_young_header(hd);
if (Tag_hd(hd) < No_scan_tag) {
intnat i = 0;
if (Tag_hd(hd) == Closure_tag)
i = Start_env_closinfo(Closinfo_val(Val_hp(p)));
for (; i < Scannable_wosize_hd(hd); i++) {
value v = Field(Val_hp(p), i);
if (Is_block(v)) {
if (Is_young(v)) CAMLassert ((value)Caml_state->young_ptr < v);
if (arena) {
CAMLassert(!(arena->base <= (char*)v &&
(char*)v < arena->base + arena->length));
}
}
}
}
}
if (arena) {
value** r;
for (r = Caml_state->ref_table->base;
r < Caml_state->ref_table->ptr; r++) {
CAMLassert(!(arena->base <= (char*)*r &&
(char*)*r < arena->base + arena->length));
if (Is_block(**r)) {
CAMLassert(!(arena->base <= (char*)**r &&
(char*)**r < arena->base + arena->length));
}
}
}
}
#endif
/* Make sure the minor heap is empty by performing a minor collection
if needed.
*/
void caml_empty_minor_heap (void)
{
value **r;
struct caml_custom_elt *elt;
uintnat prev_alloc_words;
struct caml_ephe_ref_elt *re;
if (Caml_state->young_ptr != Caml_state->young_alloc_end){
CAMLassert_young_header(*(header_t*)Caml_state->young_ptr);
#ifdef DEBUG
verify_minor_heap();
#endif
if (caml_minor_gc_begin_hook != NULL) (*caml_minor_gc_begin_hook) ();
prev_alloc_words = caml_allocated_words;
if (Caml_state->in_minor_collection)
caml_fatal_error("Minor GC triggered recursively");
Caml_state->in_minor_collection = 1;
caml_gc_message (0x02, "<");
CAML_EV_BEGIN(EV_MINOR_LOCAL_ROOTS);
caml_oldify_local_roots();
CAML_EV_END(EV_MINOR_LOCAL_ROOTS);
CAML_EV_BEGIN(EV_MINOR_REF_TABLES);
for (r = Caml_state->ref_table->base;
r < Caml_state->ref_table->ptr; r++) {
caml_oldify_one (**r, *r);
}
CAML_EV_END(EV_MINOR_REF_TABLES);
CAML_EV_BEGIN(EV_MINOR_COPY);
caml_oldify_mopup ();
CAML_EV_END(EV_MINOR_COPY);
/* Update the ephemerons */
for (re = Caml_state->ephe_ref_table->base;
re < Caml_state->ephe_ref_table->ptr; re++){
if(re->offset < Wosize_val(re->ephe)){
/* If it is not the case, the ephemeron has been truncated */
value *key = &Field(re->ephe,re->offset), v = *key;
if (v != caml_ephe_none && Is_block (v) && Is_young (v)){
mlsize_t offs = Tag_val (v) == Infix_tag ? Infix_offset_val (v) : 0;
v -= offs;
if (Hd_val (v) == 0){ /* Value copied to major heap */
*key = Field (v, 0) + offs;
}else{ /* Value not copied so it's dead */
CAMLassert(!ephe_check_alive_data(re));
*key = caml_ephe_none;
Field(re->ephe,1) = caml_ephe_none;
}
}
}
}
/* Update the OCaml finalise_last values */
CAML_EV_BEGIN(EV_MINOR_UPDATE_WEAK);
caml_final_update_minor_roots();
/* Trigger memprofs callbacks for blocks in the minor heap. */
caml_memprof_minor_update();
/* Run custom block finalisation of dead minor values */
for (elt = Caml_state->custom_table->base;
elt < Caml_state->custom_table->ptr; elt++){
value v = elt->block;
if (Hd_val (v) == 0){
/* Block was copied to the major heap: adjust GC speed numbers. */
caml_adjust_gc_speed(elt->mem, elt->max);
}else{
/* Block will be freed: call finalization function, if any. */
void (*final_fun)(value) = Custom_ops_val(v)->finalize;
if (final_fun != NULL) final_fun(v);
}
}
CAML_EV_END(EV_MINOR_UPDATE_WEAK);
CAML_EV_BEGIN(EV_MINOR_FINALIZED);
Caml_state->stat_minor_words +=
Caml_state->young_alloc_end - Caml_state->young_ptr;
caml_gc_clock +=
(double) (Caml_state->young_alloc_end - Caml_state->young_ptr)
/ Caml_state->minor_heap_wsz;
Caml_state->young_ptr = Caml_state->young_alloc_end;
clear_table ((struct generic_table *) Caml_state->ref_table,
sizeof(value *),
"ref_table");
clear_table ((struct generic_table *) Caml_state->ephe_ref_table,
sizeof(struct caml_ephe_ref_elt),
"ephe_ref_table");
clear_table ((struct generic_table *) Caml_state->custom_table,
sizeof(struct caml_custom_elt),
"custom_table");
Caml_state->extra_heap_resources_minor = 0;
caml_gc_message (0x02, ">");
Caml_state->in_minor_collection = 0;
caml_final_empty_young ();
CAML_EV_END(EV_MINOR_FINALIZED);
Caml_state->stat_promoted_words += caml_allocated_words - prev_alloc_words;
CAML_EV_COUNTER (EV_C_MINOR_PROMOTED,
caml_allocated_words - prev_alloc_words);
++ Caml_state->stat_minor_collections;
caml_memprof_renew_minor_sample();
if (caml_minor_gc_end_hook != NULL) (*caml_minor_gc_end_hook) ();
}else{
/* The minor heap is empty nothing to do. */
caml_final_empty_young ();
}
#ifdef DEBUG
{
value *p;
for (p = Caml_state->young_alloc_start; p < Caml_state->young_alloc_end;
++p) {
*p = Debug_free_minor;
}
}
#endif
}
#ifdef CAML_INSTR
extern uintnat caml_instr_alloc_jump;
#endif /*CAML_INSTR*/
/* Do a minor collection or a slice of major collection, etc.
Leave enough room in the minor heap to allocate at least one object.
Guaranteed not to call any OCaml callback.
*/
void caml_gc_dispatch (void)
{
CAML_EVENTLOG_DO({
CAML_EV_COUNTER(EV_C_ALLOC_JUMP, caml_instr_alloc_jump);
caml_instr_alloc_jump = 0;
});
if (Caml_state->young_trigger == Caml_state->young_alloc_start){
/* The minor heap is full, we must do a minor collection. */
Caml_state->requested_minor_gc = 1;
}else{
/* The minor heap is half-full, do a major GC slice. */
Caml_state->requested_major_slice = 1;
}
if (caml_gc_phase == Phase_idle){
/* The major GC needs an empty minor heap in order to start a new cycle.
If a major slice was requested, we need to do a minor collection
before we can do the major slice that starts a new major GC cycle.
If a minor collection was requested, we take the opportunity to start
a new major GC cycle.
In either case, we have to do a minor cycle followed by a major slice.
*/
Caml_state->requested_minor_gc = 1;
Caml_state->requested_major_slice = 1;
}
if (Caml_state->requested_minor_gc) {
/* reset the pointers first because the end hooks might allocate */
CAML_EV_BEGIN(EV_MINOR);
Caml_state->requested_minor_gc = 0;
Caml_state->young_trigger = Caml_state->young_alloc_mid;
caml_update_young_limit();
caml_empty_minor_heap ();
CAML_EV_END(EV_MINOR);
}
if (Caml_state->requested_major_slice) {
Caml_state->requested_major_slice = 0;
Caml_state->young_trigger = Caml_state->young_alloc_start;
caml_update_young_limit();
CAML_EV_BEGIN(EV_MAJOR);
caml_major_collection_slice (-1);
CAML_EV_END(EV_MAJOR);
}
}
/* Called by young allocations when [Caml_state->young_ptr] reaches
[Caml_state->young_limit]. We may have to either call memprof or
the gc. */
void caml_alloc_small_dispatch (intnat wosize, int flags,
int nallocs, unsigned char* encoded_alloc_lens)
{
intnat whsize = Whsize_wosize (wosize);
/* First, we un-do the allocation performed in [Alloc_small] */
Caml_state->young_ptr += whsize;
while(1) {
/* We might be here because of an async callback / urgent GC
request. Take the opportunity to do what has been requested. */
if (flags & CAML_FROM_CAML)
/* In the case of allocations performed from OCaml, execute
asynchronous callbacks. */
caml_raise_async_if_exception(caml_do_pending_actions_exn (), "minor GC");
else {
caml_check_urgent_gc (Val_unit);
/* In the case of long-running C code that regularly polls with
caml_process_pending_actions, force a query of all callbacks
at every minor collection or major slice. */
caml_something_to_do = 1;
}
/* Now, there might be enough room in the minor heap to do our
allocation. */
if (Caml_state->young_ptr - whsize >= Caml_state->young_trigger)
break;
/* If not, then empty the minor heap, and check again for async
callbacks. */
CAML_EV_COUNTER (EV_C_FORCE_MINOR_ALLOC_SMALL, 1);
caml_gc_dispatch ();
}
/* Re-do the allocation: we now have enough space in the minor heap. */
Caml_state->young_ptr -= whsize;
/* Check if the allocated block has been sampled by memprof. */
if(Caml_state->young_ptr < caml_memprof_young_trigger){
if(flags & CAML_DO_TRACK) {
caml_memprof_track_young(wosize, flags & CAML_FROM_CAML,
nallocs, encoded_alloc_lens);
/* Until the allocation actually takes place, the heap is in an invalid
state (see comments in [caml_memprof_track_young]). Hence, very little
heap operations are allowed before the actual allocation.
Moreover, [Caml_state->young_ptr] should not be modified before the
allocation, because its value has been used as the pointer to
the sampled block.
*/
} else caml_memprof_renew_minor_sample();
}
}
/* Exported for backward compatibility with Lablgtk: do a minor
collection to ensure that the minor heap is empty.
*/
CAMLexport void caml_minor_collection (void)
{
Caml_state->requested_minor_gc = 1;
caml_gc_dispatch ();
}
CAMLexport value caml_check_urgent_gc (value extra_root)
{
if (Caml_state->requested_major_slice || Caml_state->requested_minor_gc){
CAMLparam1 (extra_root);
caml_gc_dispatch();
CAMLdrop;
}
return extra_root;
}
static void realloc_generic_table
(struct generic_table *tbl, asize_t element_size,
ev_gc_counter ev_counter_name,
char *msg_threshold, char *msg_growing, char *msg_error)
{
CAMLassert (tbl->ptr == tbl->limit);
CAMLassert (tbl->limit <= tbl->end);
CAMLassert (tbl->limit >= tbl->threshold);
if (tbl->base == NULL){
alloc_generic_table (tbl, Caml_state->minor_heap_wsz / 8, 256,
element_size);
}else if (tbl->limit == tbl->threshold){
CAML_EV_COUNTER (ev_counter_name, 1);
caml_gc_message (0x08, msg_threshold, 0);
tbl->limit = tbl->end;
caml_request_minor_gc ();
}else{
asize_t sz;
asize_t cur_ptr = tbl->ptr - tbl->base;
CAMLassert (Caml_state->requested_minor_gc);
tbl->size *= 2;
sz = (tbl->size + tbl->reserve) * element_size;
caml_gc_message (0x08, msg_growing, (intnat) sz/1024);
tbl->base = caml_stat_resize_noexc (tbl->base, sz);
if (tbl->base == NULL){
caml_fatal_error ("%s", msg_error);
}
tbl->end = tbl->base + (tbl->size + tbl->reserve) * element_size;
tbl->threshold = tbl->base + tbl->size * element_size;
tbl->ptr = tbl->base + cur_ptr;
tbl->limit = tbl->end;
}
}
void caml_realloc_ref_table (struct caml_ref_table *tbl)
{
realloc_generic_table
((struct generic_table *) tbl, sizeof (value *),
EV_C_REQUEST_MINOR_REALLOC_REF_TABLE,
"ref_table threshold crossed\n",
"Growing ref_table to %" ARCH_INTNAT_PRINTF_FORMAT "dk bytes\n",
"ref_table overflow");
}
void caml_realloc_ephe_ref_table (struct caml_ephe_ref_table *tbl)
{
realloc_generic_table
((struct generic_table *) tbl, sizeof (struct caml_ephe_ref_elt),
EV_C_REQUEST_MINOR_REALLOC_EPHE_REF_TABLE,
"ephe_ref_table threshold crossed\n",
"Growing ephe_ref_table to %" ARCH_INTNAT_PRINTF_FORMAT "dk bytes\n",
"ephe_ref_table overflow");
}
void caml_realloc_custom_table (struct caml_custom_table *tbl)
{
realloc_generic_table
((struct generic_table *) tbl, sizeof (struct caml_custom_elt),
EV_C_REQUEST_MINOR_REALLOC_CUSTOM_TABLE,
"custom_table threshold crossed\n",
"Growing custom_table to %" ARCH_INTNAT_PRINTF_FORMAT "dk bytes\n",
"custom_table overflow");
}