183183#ifdef RB_THREAD_LOCAL_SPECIFIER
184184#define USE_MALLOC_INCREASE_LOCAL 1
185185static RB_THREAD_LOCAL_SPECIFIER int malloc_increase_local ;
186+ static RB_THREAD_LOCAL_SPECIFIER struct heap_page * current_sweep_thread_page ;
186187#else
187188#define USE_MALLOC_INCREASE_LOCAL 0
188189#endif
@@ -831,6 +832,7 @@ struct heap_page {
831832 unsigned short pre_deferred_free_slots ;
832833 unsigned short pre_final_slots ;
833834 unsigned short pre_zombie_slots ;
835+ size_t pre_freed_malloc_bytes ;
834836 struct {
835837 unsigned int has_remembered_objects : 1 ;
836838 unsigned int has_uncollectible_wb_unprotected_objects : 1 ;
@@ -1389,6 +1391,8 @@ static int garbage_collect(rb_objspace_t *, unsigned int reason);
13891391
13901392static int gc_start (rb_objspace_t * objspace , unsigned int reason );
13911393static void gc_rest (rb_objspace_t * objspace );
1394+ static inline void atomic_sub_nounderflow (size_t * var , size_t sub );
1395+ static size_t malloc_increase_local_flush (rb_objspace_t * objspace );
13921396
13931397enum gc_enter_event {
13941398 gc_enter_event_start ,
@@ -4593,6 +4597,8 @@ gc_pre_sweep_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *pa
45934597 GC_ASSERT (page -> heap == heap );
45944598 page -> pre_deferred_free_slots = 0 ;
45954599 page -> pre_zombie_slots = 0 ;
4600+ page -> pre_freed_malloc_bytes = 0 ;
4601+ current_sweep_thread_page = page ;
45964602
45974603 int page_rvalue_count = page -> total_slots * slot_bits ;
45984604 int out_of_range_bits = (NUM_IN_PAGE (p ) + page_rvalue_count ) % BITS_BITLENGTH ;
@@ -4653,6 +4659,9 @@ gc_pre_sweep_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *pa
46534659 }
46544660#endif
46554661
4662+ malloc_increase_local_flush (objspace );
4663+ current_sweep_thread_page = NULL ;
4664+
46564665 psweep_debug (1 , "[sweep] gc_pre_sweep_page(heap:%p page:%p) done, deferred free:%d\n" , heap , page , page -> pre_deferred_free_slots );
46574666}
46584667
@@ -4712,6 +4721,7 @@ clear_pre_sweep_fields(struct heap_page *page)
47124721 page -> pre_empty_slots = 0 ;
47134722 page -> pre_final_slots = 0 ;
47144723 page -> pre_zombie_slots = 0 ;
4724+ page -> pre_freed_malloc_bytes = 0 ;
47154725}
47164726
47174727// add beginning of b to end of a
@@ -5346,6 +5356,12 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
53465356 asan_lock_deferred_freelist (sweep_page );
53475357 asan_lock_freelist (sweep_page );
53485358
5359+ if (sweep_page -> pre_freed_malloc_bytes > 0 ) {
5360+ atomic_sub_nounderflow (& malloc_increase , sweep_page -> pre_freed_malloc_bytes );
5361+ #if RGENGC_ESTIMATE_OLDMALLOC
5362+ atomic_sub_nounderflow (& objspace -> malloc_counters .oldmalloc_increase , sweep_page -> pre_freed_malloc_bytes );
5363+ #endif
5364+ }
53495365 clear_pre_sweep_fields (sweep_page );
53505366 }
53515367
@@ -9760,6 +9776,7 @@ static size_t
97609776malloc_increase_commit (rb_objspace_t * objspace , size_t new_size , size_t old_size )
97619777{
97629778 if (new_size > old_size ) {
9779+ GC_ASSERT (!is_sweep_thread_p ());
97639780 size_t delta = new_size - old_size ;
97649781 size_t old_val = rbimpl_atomic_size_fetch_add (& malloc_increase , delta , RBIMPL_ATOMIC_RELAXED );
97659782#if RGENGC_ESTIMATE_OLDMALLOC
@@ -9768,10 +9785,16 @@ malloc_increase_commit(rb_objspace_t *objspace, size_t new_size, size_t old_size
97689785 return old_val + delta ;
97699786 }
97709787 else {
9771- atomic_sub_nounderflow (& malloc_increase , old_size - new_size );
9788+ size_t delta = old_size - new_size ;
9789+ if (current_sweep_thread_page ) {
9790+ current_sweep_thread_page -> pre_freed_malloc_bytes += delta ;
9791+ }
9792+ else {
9793+ atomic_sub_nounderflow (& malloc_increase , delta );
97729794#if RGENGC_ESTIMATE_OLDMALLOC
9773- atomic_sub_nounderflow (& objspace -> malloc_counters .oldmalloc_increase , old_size - new_size );
9795+ atomic_sub_nounderflow (& objspace -> malloc_counters .oldmalloc_increase , delta );
97749796#endif
9797+ }
97759798 return 0 ;
97769799 }
97779800}
0 commit comments