private static bool mi_page_is_valid_init(mi_page_t *page) { mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG >= 3)); mi_assert_internal((MI_DEBUG > 1) && (page->xblock_size > 0)); mi_assert_internal((MI_DEBUG > 1) && (page->used <= page->capacity)); mi_assert_internal((MI_DEBUG > 1) && (page->capacity <= page->reserved)); nuint bsize = mi_page_block_size(page); mi_segment_t *segment = _mi_page_segment(page); byte * start = _mi_page_start(segment, page, out _); mi_assert_internal((MI_DEBUG > 1) && (start == _mi_segment_page_start(segment, page, bsize, out _, out _))); // mi_assert_internal((MI_DEBUG > 1) && ((start + page->capacity * page->block_size) == page->top)); mi_assert_internal((MI_DEBUG > 1) && mi_page_list_is_valid(page, page->free)); mi_assert_internal((MI_DEBUG > 1) && mi_page_list_is_valid(page, page->local_free)); mi_block_t *tfree = mi_page_thread_free(page); mi_assert_internal((MI_DEBUG > 1) && mi_page_list_is_valid(page, tfree)); // nuint tfree_count = mi_page_list_count(page, tfree); // mi_assert_internal((MI_DEBUG > 1) && (tfree_count <= page->thread_freed + 1)); nuint free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free); mi_assert_internal((MI_DEBUG > 1) && (page->used + free_count == page->capacity)); return(true); }
private static partial bool _mi_free_delayed_block(mi_block_t *block) { // get segment and page mi_segment_t *segment = _mi_ptr_segment(block); mi_assert_internal((MI_DEBUG > 1) && (_mi_ptr_cookie(segment) == segment->cookie)); mi_assert_internal((MI_DEBUG > 1) && (_mi_thread_id() == segment->thread_id)); mi_page_t *page = _mi_segment_page_of(segment, block); // Clear the no-delayed flag so delayed freeing is used again for this page. // This must be done before collecting the free lists on this page -- otherwise // some blocks may end up in the page `thread_free` list with no blocks in the // heap `thread_delayed_free` list which may cause the page to be never freed! // (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`) _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, override_never: false); // collect all other non-local frees to ensure up-to-date `used` count _mi_page_free_collect(page, false); // and free the block (possibly freeing the page as well since used is updated) _mi_free_block(page, true, block); return(true); }
private static partial bool _mi_page_is_valid(mi_page_t *page) { mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG >= 3)); mi_assert_internal((MI_DEBUG > 1) && mi_page_is_valid_init(page)); if (MI_SECURE != 0) { mi_assert_internal((MI_DEBUG > 1) && (page->keys.e0 != 0)); } if (mi_page_heap(page) != null) { mi_segment_t *segment = _mi_page_segment(page); mi_assert_internal((MI_DEBUG > 1) && ((segment->thread_id == mi_page_heap(page)->thread_id) || (segment->thread_id == 0))); if (segment->page_kind != MI_PAGE_HUGE) { mi_page_queue_t *pq = mi_page_queue_of(page); mi_assert_internal((MI_DEBUG > 1) && mi_page_queue_contains(pq, page)); mi_assert_internal((MI_DEBUG > 1) && ((pq->block_size == mi_page_block_size(page)) || (mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX) || mi_page_is_in_full(page))); mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(mi_page_heap(page), pq)); } } return(true); }
private static nuint _mi_usable_size([NativeTypeName("const void*")] void *p, [NativeTypeName("const char*")] string msg) { mi_segment_t *segment = mi_checked_ptr_segment(p, msg); if (segment == null) { return(0); } mi_page_t * page = _mi_segment_page_of(segment, p); mi_block_t *block = (mi_block_t *)p; if (mi_unlikely(mi_page_has_aligned(page))) { block = _mi_page_ptr_unalign(segment, page, p); nuint size = mi_page_usable_size_of(page, block); nint adjust = (nint)((nuint)p - (nuint)block); mi_assert_internal((MI_DEBUG > 1) && (adjust >= 0) && ((nuint)adjust <= size)); return(size - (nuint)adjust); } else { return(mi_page_usable_size_of(page, block)); } }
// Free a block public static partial void mi_free(void *p) { mi_segment_t *segment = mi_checked_ptr_segment(p, "mi_free"); if (mi_unlikely(segment == null)) { return; } nuint tid = _mi_thread_id(); mi_page_t * page = _mi_segment_page_of(segment, p); mi_block_t *block = (mi_block_t *)p; if (MI_STAT > 1) { mi_heap_t *heap = (mi_heap_t *)mi_heap_get_default(); nuint bsize = mi_page_usable_block_size(page); mi_stat_decrease(ref heap->tld->stats.malloc, bsize); if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { // huge page stats are accounted for in `_mi_page_retire` mi_stat_decrease(ref (&heap->tld->stats.normal.e0)[_mi_bin(bsize)], 1); } } if (mi_likely((tid == segment->thread_id) && (page->flags.full_aligned == 0))) { // the thread id matches and it is not a full page, nor has aligned blocks // local, and not full or aligned if (mi_unlikely(mi_check_is_double_free(page, block))) { return; } mi_check_padding(page, block); if (MI_DEBUG != 0) { _ = memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); } mi_block_set_next(page, block, page->local_free); page->local_free = block; if (mi_unlikely(--page->used == 0)) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page)) _mi_page_retire(page); } } else { // non-local, aligned blocks, or a full page; use the more generic path // note: recalc page in generic to improve code generation mi_free_generic(segment, tid == segment->thread_id, p); } }
// Get the segment data belonging to a pointer // This is just a single `and` in assembly but does further checks in debug mode // (and secure mode) if this was a valid pointer. private static mi_segment_t *mi_checked_ptr_segment([NativeTypeName("const void*")] void *p, [NativeTypeName("const char*")] string msg) { if ((MI_DEBUG > 0) && mi_unlikely(((nuint)p & (MI_INTPTR_SIZE - 1)) != 0)) { _mi_error_message(EINVAL, "{0}: invalid (unaligned) pointer: {1:X}\n", msg, (nuint)p); return(null); } mi_segment_t *segment = _mi_ptr_segment(p); if (mi_unlikely(segment == null)) { // checks also for (p==null) return(null); } if ((MI_DEBUG > 0) && mi_unlikely(!mi_is_in_heap_region(p))) { _mi_warning_message("{0}: pointer might not point to a valid heap region: {1:X}\n(this may still be a valid very large allocation (over 64MiB))\n", msg, (nuint)p); if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) { _mi_warning_message("(yes, the previous pointer {0:X} was valid after all)\n", (nuint)p); } } if (((MI_DEBUG > 0) || (MI_SECURE >= 4)) && mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) { _mi_error_message(EINVAL, "pointer does not point to a valid heap space: {0:X}\n", (nuint)p); } return(segment); }
private static void mi_free_generic([NativeTypeName("const mi_segment_t*")] mi_segment_t *segment, bool local, void *p) { mi_page_t * page = _mi_segment_page_of(segment, p); mi_block_t *block = mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t *)p; _mi_free_block(page, local, block); }
// Adjust a block that was allocated aligned, to the actual start of the block in the page. private static partial mi_block_t *_mi_page_ptr_unalign(mi_segment_t *segment, mi_page_t *page, void *p) { mi_assert_internal((MI_DEBUG > 1) && (page != null) && (p != null)); nuint diff = (nuint)p - (nuint)_mi_page_start(segment, page, out _); nuint adjust = diff % mi_page_block_size(page); return((mi_block_t *)((nuint)p - adjust)); }
private static bool mi_heap_page_is_valid(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg1, void *arg2) { mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG >= 2)); mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == heap)); mi_segment_t *segment = _mi_page_segment(page); mi_assert_internal((MI_DEBUG > 1) && (segment->thread_id == heap->thread_id)); mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page)); return(true); }
private static bool mi_heap_page_check_owned(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *p, void *vfound) { bool * found = (bool *)vfound; mi_segment_t *segment = _mi_page_segment(page); void *start = _mi_page_start(segment, page, out _); void *end = (byte *)start + (page->capacity * mi_page_block_size(page)); *found = (p >= start) && (p < end); // continue if not found return(!*found); }
/* ----------------------------------------------------------- * Analysis * ----------------------------------------------------------- */ // static since it is not thread safe to access heaps from other threads. private static mi_heap_t *mi_heap_of_block([NativeTypeName("const void*")] void *p) { if (p == null) { return(null); } mi_segment_t *segment = _mi_ptr_segment(p); bool valid = _mi_ptr_cookie(segment) == segment->cookie; mi_assert_internal((MI_DEBUG > 1) && valid); if (mi_unlikely(!valid)) { return(null); } return(mi_page_heap(_mi_segment_page_of(segment, p))); }
private static void _mi_free_block_mt(mi_page_t *page, mi_block_t *block) { #pragma warning disable CS0420 // The padding check may access the non-thread-owned page for the key values. // that is safe as these are constant and the page won't be freed (as the block is not freed yet). mi_check_padding(page, block); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection mi_padding_shrink(page, block, SizeOf <mi_block_t>()); if (MI_DEBUG != 0) { _ = memset(block, MI_DEBUG_FREED, mi_usable_size(block)); } // huge page segments are always abandoned and can be freed immediately mi_segment_t *segment = _mi_page_segment(page); if (segment->page_kind == MI_PAGE_HUGE) { _mi_segment_huge_page_free(segment, page, block); return; } // Try to put the block on either the page-local thread free list, or the heap delayed free list. nuint tfreex; bool use_delayed; nuint tfree = mi_atomic_load_relaxed(ref page->xthread_free); do { use_delayed = mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE; if (mi_unlikely(use_delayed)) { // unlikely: this only happens on the first concurrent free in a page that is in the full list tfreex = mi_tf_set_delayed(tfree, MI_DELAYED_FREEING); } else { // usual: directly add to page thread_free list mi_block_set_next(page, block, mi_tf_block(tfree)); tfreex = mi_tf_set_block(tfree, block); } }while (!mi_atomic_cas_weak_release(ref page->xthread_free, ref tfree, tfreex)); if (mi_unlikely(use_delayed)) { // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`) mi_heap_t *heap = (mi_heap_t *)mi_atomic_load_acquire(ref page->xheap); mi_assert_internal((MI_DEBUG > 1) && (heap != null)); if (heap != null) { // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity) nuint dfree = (nuint)mi_atomic_load_ptr_relaxed <mi_block_t>(ref heap->thread_delayed_free); do { mi_block_set_nextx(heap, block, (mi_block_t *)dfree, &heap->keys.e0); }while (!mi_atomic_cas_ptr_weak_release(ref heap->thread_delayed_free, ref dfree, block)); } // and reset the MI_DELAYED_FREEING flag tfree = mi_atomic_load_relaxed(ref page->xthread_free); do { tfreex = tfree; mi_assert_internal((MI_DEBUG > 1) && (mi_tf_delayed(tfree) == MI_DELAYED_FREEING)); tfreex = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); }while (!mi_atomic_cas_weak_release(ref page->xthread_free, ref tfree, tfreex)); } #pragma warning restore CS0420 }