@@ -1238,7 +1238,10 @@ static NOINLINE jl_taggedvalue_t *gc_add_page(jl_gc_pool_t *p) JL_NOTSAFEPOINT
12381238 // Do not pass in `ptls` as argument. This slows down the fast path
12391239 // in pool_alloc significantly
12401240 jl_ptls_t ptls = jl_current_task -> ptls ;
1241- jl_gc_pagemeta_t * pg = jl_gc_alloc_page ();
1241+ jl_gc_pagemeta_t * pg = pop_page_metadata_back (& ptls -> page_metadata_lazily_freed );
1242+ if (pg == NULL ) {
1243+ pg = jl_gc_alloc_page ();
1244+ }
12421245 pg -> osize = p -> osize ;
12431246 pg -> thread_n = ptls -> tid ;
12441247 set_page_metadata (pg );
@@ -1295,20 +1298,8 @@ STATIC_INLINE jl_value_t *jl_gc_pool_alloc_inner(jl_ptls_t ptls, int pool_offset
12951298 assert (pg -> osize == p -> osize );
12961299 pg -> nfree = 0 ;
12971300 pg -> has_young = 1 ;
1298- pg = pop_page_metadata_back (& ptls -> page_metadata_lazily_freed );
1299- if (pg != NULL ) {
1300- v = gc_reset_page (ptls , p , pg );
1301- pg -> osize = p -> osize ;
1302- push_page_metadata_back (& ptls -> page_metadata_allocd , pg );
1303- }
1304- else {
1305- v = NULL ;
1306- }
1307- }
1308- // Not an else!!
1309- if (v == NULL ) {
1310- v = gc_add_page (p );
13111301 }
1302+ v = gc_add_page (p );
13121303 next = (jl_taggedvalue_t * )((char * )v + osize );
13131304 }
13141305 p -> newpages = next ;
@@ -1353,15 +1344,19 @@ static jl_taggedvalue_t **gc_sweep_page(jl_gc_pool_t *p, jl_gc_pagemeta_t **allo
13531344 char * data = pg -> data ;
13541345 jl_taggedvalue_t * v = (jl_taggedvalue_t * )(data + GC_PAGE_OFFSET );
13551346 char * lim = data + GC_PAGE_SZ - osize ;
1347+ char * lim_newpages = data + GC_PAGE_SZ ;
1348+ if (gc_page_data ((char * )p -> newpages - 1 ) == data ) {
1349+ lim_newpages = (char * )p -> newpages ;
1350+ }
13561351 size_t old_nfree = pg -> nfree ;
13571352 size_t nfree ;
13581353
1359- int reuse_page = 1 ;
1354+ int re_use_page = 1 ;
13601355 int freed_lazily = 0 ;
13611356 int freedall = 1 ;
13621357 int pg_skpd = 1 ;
13631358 if (!pg -> has_marked ) {
1364- reuse_page = 0 ;
1359+ re_use_page = 0 ;
13651360 #ifdef _P64 // TODO: re-enable on `_P32`?
13661361 // lazy version: (empty) if the whole page was already unused, free it (return it to the pool)
13671362 // eager version: (freedall) free page as soon as possible
@@ -1402,7 +1397,9 @@ static jl_taggedvalue_t **gc_sweep_page(jl_gc_pool_t *p, jl_gc_pagemeta_t **allo
14021397 jl_taggedvalue_t * * pfl_begin = NULL ;
14031398 while ((char * )v <= lim ) {
14041399 int bits = v -> bits .gc ;
1405- if (!gc_marked (bits )) {
1400+ // if an object is past `lim_newpages` then we can guarantee it's garbage
1401+ if (!gc_marked (bits ) || (char * )v >= lim_newpages ) {
1402+ v -> bits .gc = GC_CLEAN ;
14061403 * pfl = v ;
14071404 pfl = & v -> next ;
14081405 pfl_begin = (pfl_begin != NULL ) ? pfl_begin : pfl ;
@@ -1439,7 +1436,7 @@ static jl_taggedvalue_t **gc_sweep_page(jl_gc_pool_t *p, jl_gc_pagemeta_t **allo
14391436 nfree = pg -> nfree ;
14401437
14411438done :
1442- if (reuse_page ) {
1439+ if (re_use_page ) {
14431440 push_page_metadata_back (allocd , pg );
14441441 }
14451442 else if (freed_lazily ) {
0 commit comments