@@ -11431,6 +11431,7 @@ void gc_heap::init_heap_segment (heap_segment* seg, gc_heap* hp
11431
11431
heap_segment_plan_allocated (seg) = heap_segment_mem (seg);
11432
11432
heap_segment_allocated (seg) = heap_segment_mem (seg);
11433
11433
heap_segment_saved_allocated (seg) = heap_segment_mem (seg);
11434
+ heap_segment_decommit_target (seg) = heap_segment_reserved (seg);
11434
11435
#ifdef BACKGROUND_GC
11435
11436
heap_segment_background_allocated (seg) = 0;
11436
11437
heap_segment_saved_bg_allocated (seg) = 0;
@@ -11546,6 +11547,7 @@ void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
11546
11547
{
11547
11548
if (use_large_pages_p)
11548
11549
return;
11550
+
11549
11551
uint8_t* page_start = align_on_page (heap_segment_allocated(seg));
11550
11552
assert (heap_segment_committed (seg) >= page_start);
11551
11553
@@ -11561,12 +11563,6 @@ void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
11561
11563
size_t gc_heap::decommit_heap_segment_pages_worker (heap_segment* seg,
11562
11564
uint8_t* new_committed)
11563
11565
{
11564
- #ifdef USE_REGIONS
11565
- if (!dt_high_memory_load_p())
11566
- {
11567
- return 0;
11568
- }
11569
- #endif
11570
11566
assert (!use_large_pages_p);
11571
11567
uint8_t* page_start = align_on_page (new_committed);
11572
11568
ptrdiff_t size = heap_segment_committed (seg) - page_start;
@@ -12351,8 +12347,7 @@ void gc_heap::distribute_free_regions()
12351
12347
heap_budget_in_region_units[i][large_free_region] = 0;
12352
12348
for (int gen = soh_gen0; gen < total_generation_count; gen++)
12353
12349
{
12354
- ptrdiff_t budget_gen = hp->estimate_gen_growth (gen);
12355
- assert (budget_gen >= 0);
12350
+ ptrdiff_t budget_gen = max (hp->estimate_gen_growth (gen), 0);
12356
12351
int kind = gen >= loh_generation;
12357
12352
size_t budget_gen_in_region_units = (budget_gen + (region_size[kind] - 1)) / region_size[kind];
12358
12353
dprintf (REGIONS_LOG, ("h%2d gen %d has an estimated growth of %Id bytes (%Id regions)", i, gen, budget_gen, budget_gen_in_region_units));
@@ -12499,7 +12494,6 @@ void gc_heap::distribute_free_regions()
12499
12494
}
12500
12495
12501
12496
#ifdef MULTIPLE_HEAPS
12502
- gradual_decommit_in_progress_p = FALSE;
12503
12497
for (int kind = basic_free_region; kind < count_free_region_kinds; kind++)
12504
12498
{
12505
12499
if (global_regions_to_decommit[kind].get_num_free_regions() != 0)
@@ -22142,7 +22136,7 @@ void gc_heap::garbage_collect (int n)
22142
22136
}
22143
22137
22144
22138
descr_generations ("BEGIN");
22145
- #ifdef TRACE_GC
22139
+ #if defined( TRACE_GC) && defined(USE_REGIONS)
22146
22140
if (heap_number == 0)
22147
22141
{
22148
22142
#ifdef MULTIPLE_HEAPS
@@ -22166,7 +22160,7 @@ void gc_heap::garbage_collect (int n)
22166
22160
}
22167
22161
}
22168
22162
}
22169
- #endif // TRACE_GC
22163
+ #endif // TRACE_GC && USE_REGIONS
22170
22164
22171
22165
#ifdef VERIFY_HEAP
22172
22166
if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) &&
@@ -30221,7 +30215,7 @@ heap_segment* gc_heap::find_first_valid_region (heap_segment* region, bool compa
30221
30215
set_region_plan_gen_num (current_region, plan_gen_num);
30222
30216
}
30223
30217
30224
- if (gen_num != 0 )
30218
+ if (gen_num >= soh_gen2 )
30225
30219
{
30226
30220
dprintf (REGIONS_LOG, (" gen%d decommit end of region %Ix(%Ix)",
30227
30221
gen_num, current_region, heap_segment_mem (current_region)));
@@ -39562,7 +39556,7 @@ ptrdiff_t gc_heap::estimate_gen_growth (int gen_number)
39562
39556
gen_number, heap_number, budget_gen, new_allocation_gen, free_list_space_gen));
39563
39557
#endif //USE_REGIONS
39564
39558
39565
- return max(0, budget_gen) ;
39559
+ return budget_gen;
39566
39560
}
39567
39561
39568
39562
void gc_heap::decommit_ephemeral_segment_pages()
@@ -39573,14 +39567,71 @@ void gc_heap::decommit_ephemeral_segment_pages()
39573
39567
}
39574
39568
39575
39569
#if defined(MULTIPLE_HEAPS) && defined(USE_REGIONS)
39576
- // for regions, this is done at the regions level
39577
- return;
39570
+ for (int gen_number = soh_gen0; gen_number <= soh_gen1; gen_number++)
39571
+ {
39572
+ generation *gen = generation_of (gen_number);
39573
+ heap_segment* tail_region = generation_tail_region (gen);
39574
+ uint8_t* previous_decommit_target = heap_segment_decommit_target (tail_region);
39575
+
39576
+ // reset the decommit targets to make sure we don't decommit inadvertently
39577
+ for (heap_segment* region = generation_start_segment_rw (gen); region != nullptr; region = heap_segment_next (region))
39578
+ {
39579
+ heap_segment_decommit_target (region) = heap_segment_reserved (region);
39580
+ }
39581
+
39582
+ ptrdiff_t budget_gen = estimate_gen_growth (gen_number) + loh_size_threshold;
39583
+
39584
+ if (budget_gen >= 0)
39585
+ {
39586
+ // we need more than the regions we have - nothing to decommit
39587
+ continue;
39588
+ }
39589
+
39590
+ // we may have too much committed - let's see if we can decommit in the tail region
39591
+ ptrdiff_t tail_region_size = heap_segment_reserved (tail_region) - heap_segment_mem (tail_region);
39592
+ ptrdiff_t unneeded_tail_size = min (-budget_gen, tail_region_size);
39593
+ uint8_t *decommit_target = heap_segment_reserved (tail_region) - unneeded_tail_size;
39594
+ decommit_target = max (decommit_target, heap_segment_allocated (tail_region));
39595
+
39596
+ if (decommit_target < previous_decommit_target)
39597
+ {
39598
+ // we used to have a higher target - do exponential smoothing by computing
39599
+ // essentially decommit_target = 1/3*decommit_target + 2/3*previous_decommit_target
39600
+ // computation below is slightly different to avoid overflow
39601
+ ptrdiff_t target_decrease = previous_decommit_target - decommit_target;
39602
+ decommit_target += target_decrease * 2 / 3;
39603
+ }
39604
+
39605
+ //#define STRESS_DECOMMIT 1
39606
+ #ifdef STRESS_DECOMMIT
39607
+ // our decommit logic should work for a random decommit target within tail_region - make sure it does
39608
+ decommit_target = heap_segment_mem (tail_region) + gc_rand::get_rand (heap_segment_reserved (tail_region) - heap_segment_mem (tail_region));
39609
+ #endif //STRESS_DECOMMIT
39610
+
39611
+ heap_segment_decommit_target (tail_region) = decommit_target;
39612
+
39613
+ if (decommit_target < heap_segment_committed (tail_region))
39614
+ {
39615
+ gradual_decommit_in_progress_p = TRUE;
39616
+
39617
+ dprintf (1, ("h%2d gen %d reduce_commit by %IdkB",
39618
+ heap_number,
39619
+ gen_number,
39620
+ (heap_segment_committed (tail_region) - decommit_target)/1024));
39621
+ }
39622
+ dprintf(3, ("h%2d gen %d allocated: %IdkB committed: %IdkB target: %IdkB",
39623
+ heap_number,
39624
+ gen_number,
39625
+ (heap_segment_allocated (tail_region) - heap_segment_mem (tail_region))/1024,
39626
+ (heap_segment_committed (tail_region) - heap_segment_mem (tail_region))/1024,
39627
+ (decommit_target - heap_segment_mem (tail_region))/1024));
39628
+ }
39578
39629
#else //MULTIPLE_HEAPS && USE_REGIONS
39579
39630
39580
39631
dynamic_data* dd0 = dynamic_data_of (0);
39581
39632
39582
39633
ptrdiff_t desired_allocation = dd_new_allocation (dd0) +
39583
- estimate_gen_growth (soh_gen1) +
39634
+ max ( estimate_gen_growth (soh_gen1), 0 ) +
39584
39635
loh_size_threshold;
39585
39636
39586
39637
size_t slack_space =
@@ -39687,7 +39738,11 @@ bool gc_heap::decommit_step ()
39687
39738
}
39688
39739
}
39689
39740
}
39690
- #else //USE_REGIONS
39741
+ if (use_large_pages_p)
39742
+ {
39743
+ return (decommit_size != 0);
39744
+ }
39745
+ #endif //USE_REGIONS
39691
39746
#ifdef MULTIPLE_HEAPS
39692
39747
// should never get here for large pages because decommit_ephemeral_segment_pages
39693
39748
// will not do anything if use_large_pages_p is true
@@ -39699,46 +39754,93 @@ bool gc_heap::decommit_step ()
39699
39754
decommit_size += hp->decommit_ephemeral_segment_pages_step ();
39700
39755
}
39701
39756
#endif //MULTIPLE_HEAPS
39702
- #endif //USE_REGIONS
39703
39757
return (decommit_size != 0);
39704
39758
}
39705
39759
39706
39760
#ifdef MULTIPLE_HEAPS
39707
39761
// return the decommitted size
39708
- #ifndef USE_REGIONS
39709
39762
size_t gc_heap::decommit_ephemeral_segment_pages_step ()
39710
39763
{
39711
- // we rely on desired allocation not being changed outside of GC
39712
- assert (ephemeral_heap_segment->saved_desired_allocation == dd_desired_allocation (dynamic_data_of (0)));
39713
-
39714
- uint8_t* decommit_target = heap_segment_decommit_target (ephemeral_heap_segment);
39715
- size_t EXTRA_SPACE = 2 * OS_PAGE_SIZE;
39716
- decommit_target += EXTRA_SPACE;
39717
- uint8_t* committed = heap_segment_committed (ephemeral_heap_segment);
39718
- if (decommit_target < committed)
39764
+ size_t size = 0;
39765
+ #ifdef USE_REGIONS
39766
+ for (int gen_number = soh_gen0; gen_number <= soh_gen1; gen_number++)
39767
+ {
39768
+ generation* gen = generation_of (gen_number);
39769
+ heap_segment* seg = generation_tail_region (gen);
39770
+ #else // USE_REGIONS
39719
39771
{
39720
- // we rely on other threads not messing with committed if we are about to trim it down
39721
- assert (ephemeral_heap_segment->saved_committed == heap_segment_committed (ephemeral_heap_segment));
39772
+ heap_segment* seg = ephemeral_heap_segment;
39773
+ // we rely on desired allocation not being changed outside of GC
39774
+ assert (seg->saved_desired_allocation == dd_desired_allocation (dynamic_data_of (0)));
39775
+ #endif // USE_REGIONS
39722
39776
39723
- // how much would we need to decommit to get to decommit_target in one step?
39724
- size_t full_decommit_size = (committed - decommit_target);
39777
+ uint8_t* decommit_target = heap_segment_decommit_target (seg);
39778
+ size_t EXTRA_SPACE = 2 * OS_PAGE_SIZE;
39779
+ decommit_target += EXTRA_SPACE;
39780
+ #ifdef STRESS_DECOMMIT
39781
+ // our decommit logic should work for a random decommit target within tail_region - make sure it does
39782
+ // tail region now may be different from what decommit_ephemeral_segment_pages saw
39783
+ decommit_target = heap_segment_mem (seg) + gc_rand::get_rand (heap_segment_reserved (seg) - heap_segment_mem (seg));
39784
+ #endif //STRESS_DECOMMIT
39785
+ uint8_t* committed = heap_segment_committed (seg);
39786
+ uint8_t* allocated = (seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg);
39787
+ if ((allocated <= decommit_target) && (decommit_target < committed))
39788
+ {
39789
+ #ifdef USE_REGIONS
39790
+ if (gen_number == soh_gen0)
39791
+ {
39792
+ // for gen 0, sync with the allocator by taking the more space lock
39793
+ // and re-read the variables
39794
+ //
39795
+ // we call try_enter_spin_lock here instead of enter_spin_lock because
39796
+ // calling enter_spin_lock from this thread can deadlock at the start
39797
+ // of a GC - if gc_started is already true, we call wait_for_gc_done(),
39798
+ // but we are on GC thread 0, so GC cannot make progress
39799
+ if (!try_enter_spin_lock (&more_space_lock_soh))
39800
+ {
39801
+ continue;
39802
+ }
39803
+ add_saved_spinlock_info (false, me_acquire, mt_decommit_step);
39804
+ seg = generation_tail_region (gen);
39805
+ #ifndef STRESS_DECOMMIT
39806
+ decommit_target = heap_segment_decommit_target (seg);
39807
+ decommit_target += EXTRA_SPACE;
39808
+ #endif
39809
+ committed = heap_segment_committed (seg);
39810
+ allocated = (seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg);
39811
+ }
39812
+ if ((allocated <= decommit_target) && (decommit_target < committed))
39813
+ #else // USE_REGIONS
39814
+ // we rely on other threads not messing with committed if we are about to trim it down
39815
+ assert (seg->saved_committed == heap_segment_committed (seg));
39816
+ #endif // USE_REGIONS
39817
+ {
39818
+ // how much would we need to decommit to get to decommit_target in one step?
39819
+ size_t full_decommit_size = (committed - decommit_target);
39725
39820
39726
- // don't do more than max_decommit_step_size per step
39727
- size_t decommit_size = min (max_decommit_step_size, full_decommit_size);
39821
+ // don't do more than max_decommit_step_size per step
39822
+ size_t decommit_size = min (max_decommit_step_size, full_decommit_size);
39728
39823
39729
- // figure out where the new committed should be
39730
- uint8_t* new_committed = (committed - decommit_size);
39731
- size_t size = decommit_heap_segment_pages_worker (ephemeral_heap_segment , new_committed);
39824
+ // figure out where the new committed should be
39825
+ uint8_t* new_committed = (committed - decommit_size);
39826
+ size + = decommit_heap_segment_pages_worker (seg , new_committed);
39732
39827
39733
39828
#ifdef _DEBUG
39734
- ephemeral_heap_segment ->saved_committed = committed - size;
39829
+ seg ->saved_committed = committed - size;
39735
39830
#endif // _DEBUG
39736
-
39737
- return size;
39831
+ }
39832
+ #ifdef USE_REGIONS
39833
+ if (gen_number == soh_gen0)
39834
+ {
39835
+ // for gen 0, we took the more space lock - leave it again
39836
+ add_saved_spinlock_info (false, me_release, mt_decommit_step);
39837
+ leave_spin_lock (&more_space_lock_soh);
39838
+ }
39839
+ #endif // USE_REGIONS
39840
+ }
39738
39841
}
39739
- return 0 ;
39842
+ return size ;
39740
39843
}
39741
- #endif //!USE_REGIONS
39742
39844
#endif //MULTIPLE_HEAPS
39743
39845
39744
39846
//This is meant to be called by decide_on_compacting.
0 commit comments