@@ -44,7 +44,7 @@ typedef struct mi_arena_s {
4444 mi_lock_t abandoned_visit_lock ; // lock is only used when abandoned segments are being visited
4545 _Atomic (size_t ) search_idx ; // optimization to start the search for free blocks
4646 _Atomic (mi_msecs_t ) purge_expire ; // expiration time when blocks should be purged from `blocks_purge`.
47-
47+
4848 mi_bitmap_field_t * blocks_dirty ; // are the blocks potentially non-zero?
4949 mi_bitmap_field_t * blocks_committed ; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
5050 mi_bitmap_field_t * blocks_purge ; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
@@ -192,14 +192,9 @@ void* _mi_arena_meta_zalloc(size_t size, mi_memid_t* memid) {
192192 if (p != NULL ) return p ;
193193
194194 // or fall back to the OS
195- p = _mi_os_alloc (size , memid );
195+ p = _mi_os_zalloc (size , memid );
196196 if (p == NULL ) return NULL ;
197197
198- // zero the OS memory if needed
199- if (!memid -> initially_zero ) {
200- _mi_memzero_aligned (p , size );
201- memid -> initially_zero = true;
202- }
203198 return p ;
204199}
205200
@@ -270,12 +265,12 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
270265 else if (commit ) {
271266 // commit requested, but the range may not be committed as a whole: ensure it is committed now
272267 memid -> initially_committed = true;
268+ const size_t commit_size = mi_arena_block_size (needed_bcount );
273269 bool any_uncommitted ;
274270 size_t already_committed = 0 ;
275271 _mi_bitmap_claim_across (arena -> blocks_committed , arena -> field_count , needed_bcount , bitmap_index , & any_uncommitted , & already_committed );
276272 if (any_uncommitted ) {
277273 mi_assert_internal (already_committed < needed_bcount );
278- const size_t commit_size = mi_arena_block_size (needed_bcount );
279274 const size_t stat_commit_size = commit_size - mi_arena_block_size (already_committed );
280275 bool commit_zero = false;
281276 if (!_mi_os_commit_ex (p , commit_size , & commit_zero , stat_commit_size )) {
@@ -285,6 +280,10 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
285280 if (commit_zero ) { memid -> initially_zero = true; }
286281 }
287282 }
283+ else {
284+ // all are already committed: signal that we are reusing memory in case it was purged before
285+ _mi_os_reuse ( p , commit_size );
286+ }
288287 }
289288 else {
290289 // no need to commit, but check if already fully committed
@@ -369,7 +368,7 @@ static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, siz
369368static bool mi_arena_reserve (size_t req_size , bool allow_large , mi_arena_id_t * arena_id )
370369{
371370 if (_mi_preloading ()) return false; // use OS only while pre loading
372-
371+
373372 const size_t arena_count = mi_atomic_load_acquire (& mi_arena_count );
374373 if (arena_count > (MI_MAX_ARENAS - 4 )) return false;
375374
@@ -411,7 +410,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset
411410
412411 // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
413412 if (!mi_option_is_enabled (mi_option_disallow_arena_alloc )) { // is arena allocation allowed?
414- if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0 )
413+ if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0 )
415414 {
416415 void * p = mi_arena_try_alloc (numa_node , size , alignment , commit , allow_large , req_arena_id , memid );
417416 if (p != NULL ) return p ;
@@ -491,7 +490,7 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks)
491490 // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory).
492491 mi_assert_internal (already_committed < blocks );
493492 mi_assert_internal (mi_option_is_enabled (mi_option_purge_decommits ));
494- needs_recommit = _mi_os_purge_ex (p , size , false /* allow reset? */ , mi_arena_block_size (already_committed ));
493+ needs_recommit = _mi_os_purge_ex (p , size , false /* allow reset? */ , mi_arena_block_size (already_committed ));
495494 }
496495
497496 // clear the purged blocks
@@ -560,7 +559,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
560559{
561560 // check pre-conditions
562561 if (arena -> memid .is_pinned ) return false;
563-
562+
564563 // expired yet?
565564 mi_msecs_t expire = mi_atomic_loadi64_relaxed (& arena -> purge_expire );
566565 if (!force && (expire == 0 || expire > now )) return false;
@@ -615,7 +614,7 @@ static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force)
615614 return any_purged ;
616615}
617616
618- static void mi_arenas_try_purge ( bool force , bool visit_all )
617+ static void mi_arenas_try_purge ( bool force , bool visit_all )
619618{
620619 if (_mi_preloading () || mi_arena_purge_delay () <= 0 ) return ; // nothing will be scheduled
621620
@@ -632,7 +631,7 @@ static void mi_arenas_try_purge( bool force, bool visit_all )
632631 mi_atomic_guard (& purge_guard )
633632 {
634633 // increase global expire: at most one purge per delay cycle
635- mi_atomic_storei64_release (& mi_arenas_purge_expire , now + mi_arena_purge_delay ());
634+ mi_atomic_storei64_release (& mi_arenas_purge_expire , now + mi_arena_purge_delay ());
636635 size_t max_purge_count = (visit_all ? max_arena : 2 );
637636 bool all_visited = true;
638637 for (size_t i = 0 ; i < max_arena ; i ++ ) {
@@ -951,7 +950,7 @@ void mi_debug_show_arenas(void) mi_attr_noexcept {
951950 for (size_t i = 0 ; i < max_arenas ; i ++ ) {
952951 mi_arena_t * arena = mi_atomic_load_ptr_relaxed (mi_arena_t , & mi_arenas [i ]);
953952 if (arena == NULL ) break ;
954- _mi_message ("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n" , i , arena -> block_count , MI_ARENA_BLOCK_SIZE / MI_MiB , arena -> field_count , (arena -> memid .is_pinned ? ", pinned" : "" ));
953+ _mi_message ("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n" , i , arena -> block_count , ( size_t )( MI_ARENA_BLOCK_SIZE / MI_MiB ) , arena -> field_count , (arena -> memid .is_pinned ? ", pinned" : "" ));
955954 if (show_inuse ) {
956955 inuse_total += mi_debug_show_bitmap (" " , "inuse blocks" , arena -> block_count , arena -> blocks_inuse , arena -> field_count );
957956 }
@@ -1011,17 +1010,17 @@ int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t
10111010 if (pages == 0 ) return 0 ;
10121011
10131012 // pages per numa node
1014- size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count ());
1015- if (numa_count < = 0 ) numa_count = 1 ;
1013+ int numa_count = (numa_nodes > 0 && numa_nodes <= INT_MAX ? ( int ) numa_nodes : _mi_os_numa_node_count ());
1014+ if (numa_count = = 0 ) numa_count = 1 ;
10161015 const size_t pages_per = pages / numa_count ;
10171016 const size_t pages_mod = pages % numa_count ;
10181017 const size_t timeout_per = (timeout_msecs == 0 ? 0 : (timeout_msecs / numa_count ) + 50 );
10191018
10201019 // reserve evenly among numa nodes
1021- for (size_t numa_node = 0 ; numa_node < numa_count && pages > 0 ; numa_node ++ ) {
1020+ for (int numa_node = 0 ; numa_node < numa_count && pages > 0 ; numa_node ++ ) {
10221021 size_t node_pages = pages_per ; // can be 0
1023- if (numa_node < pages_mod ) node_pages ++ ;
1024- int err = mi_reserve_huge_os_pages_at (node_pages , ( int ) numa_node , timeout_per );
1022+ if (( size_t ) numa_node < pages_mod ) node_pages ++ ;
1023+ int err = mi_reserve_huge_os_pages_at (node_pages , numa_node , timeout_per );
10251024 if (err ) return err ;
10261025 if (pages < node_pages ) {
10271026 pages = 0 ;
0 commit comments