Ejemplo n.º 1
0
        private static mi_page_queue_t *mi_heap_page_queue_of(mi_heap_t *heap, [NativeTypeName("const mi_page_t*")] mi_page_t *page)
        {
            byte bin = mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->xblock_size);

            mi_assert_internal((MI_DEBUG > 1) && (bin <= MI_BIN_FULL));

            mi_page_queue_t *pq = &heap->pages.e0 + bin;

            mi_assert_internal((MI_DEBUG > 1) && (mi_page_is_in_full(page) || page->xblock_size == pq->block_size));

            return(pq);
        }
Ejemplo n.º 2
0
        private static void mi_page_queue_enqueue_from(mi_page_queue_t *to, mi_page_queue_t *from, mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && mi_page_queue_contains(from, page));
            mi_assert_expensive((MI_DEBUG > 2) && !mi_page_queue_contains(to, page));
            mi_assert_internal((MI_DEBUG > 1) && (((page->xblock_size == to->block_size) && page->xblock_size == from->block_size) || ((page->xblock_size == to->block_size) && mi_page_queue_is_full(from)) || ((page->xblock_size == from->block_size) && mi_page_queue_is_full(to)) || ((page->xblock_size > MI_LARGE_OBJ_SIZE_MAX) && mi_page_queue_is_huge(to)) || ((page->xblock_size > MI_LARGE_OBJ_SIZE_MAX) && mi_page_queue_is_full(to))));

            mi_heap_t *heap = mi_page_heap(page);

            if (page->prev != null)
            {
                page->prev->next = page->next;
            }

            if (page->next != null)
            {
                page->next->prev = page->prev;
            }

            if (page == from->last)
            {
                from->last = page->prev;
            }

            if (page == from->first)
            {
                from->first = page->next;
                mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(heap, from));

                // update first
                mi_heap_queue_first_update(heap, from);
            }

            page->prev = to->last;
            page->next = null;

            if (to->last != null)
            {
                mi_assert_internal((MI_DEBUG > 1) && (heap == mi_page_heap(to->last)));

                to->last->next = page;
                to->last       = page;
            }
            else
            {
                to->first = page;
                to->last  = page;

                mi_heap_queue_first_update(heap, to);
            }

            mi_page_set_in_full(page, mi_page_queue_is_full(to));
        }
Ejemplo n.º 3
0
        private static mi_page_queue_t *mi_page_queue_of([NativeTypeName("const mi_page_t*")] mi_page_t *page)
        {
            byte       bin  = mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->xblock_size);
            mi_heap_t *heap = mi_page_heap(page);

            mi_assert_internal((MI_DEBUG > 1) && heap != null && bin <= MI_BIN_FULL);

            mi_page_queue_t *pq = &heap->pages.e0 + bin;

            mi_assert_internal((MI_DEBUG > 1) && (bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size));
            mi_assert_expensive((MI_DEBUG > 2) && mi_page_queue_contains(pq, page));

            return(pq);
        }
Ejemplo n.º 4
0
        // Retire a page with no more used blocks
        // Important to not retire too quickly though as new
        // allocations might coming.
        // Note: called from `mi_free` and benchmarks often
        // trigger this due to freeing everything and then
        // allocating again so careful when changing this.
        private static partial void _mi_page_retire(mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
            mi_assert_internal((MI_DEBUG > 1) && mi_page_all_free(page));

            mi_page_set_has_aligned(page, false);

            // don't retire too often..
            // (or we end up retiring and re-allocating most of the time)
            // NOTE: refine this more: we should not retire if this
            // is the only page left with free blocks. It is not clear
            // how to check this efficiently though...
            // for now, we don't retire if it is the only page left of this size class.
            mi_page_queue_t *pq = mi_page_queue_of(page);

            if (mi_likely((page->xblock_size <= MI_MAX_RETIRE_SIZE) && !mi_page_is_in_full(page)))
            {
                if (pq->last == page && pq->first == page)
                {
                    // the only page in the queue?
                    mi_stat_counter_increase(ref _mi_stats_main.page_no_retire, 1);

                    page->retire_expire = (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX) ? MI_RETIRE_CYCLES : (byte)(MI_RETIRE_CYCLES / 4);

                    mi_heap_t *heap = mi_page_heap(page);
                    mi_assert_internal((MI_DEBUG > 1) && (pq >= &heap->pages.e0));

                    nuint index = (nuint)(pq - &heap->pages.e0);
                    mi_assert_internal((MI_DEBUG > 1) && index < MI_BIN_FULL && index < MI_BIN_HUGE);

                    if (index < heap->page_retired_min)
                    {
                        heap->page_retired_min = index;
                    }

                    if (index > heap->page_retired_max)
                    {
                        heap->page_retired_max = index;
                    }

                    mi_assert_internal((MI_DEBUG > 1) && mi_page_all_free(page));

                    // dont't free after all
                    return;
                }
            }

            _mi_page_free(page, pq, false);
        }
Ejemplo n.º 5
0
        // ------------------------------------------------------
        // Check for double free in secure and debug mode
        // This is somewhat expensive so only enabled for secure mode 4
        // ------------------------------------------------------

        // linear check if the free list contains a specific element
        private static bool mi_list_contains([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *list, [NativeTypeName("const mi_block_t*")] mi_block_t *elem)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_ENCODE_FREELIST != 0) && ((MI_SECURE >= 4) || (MI_DEBUG != 0)));

            while (list != null)
            {
                if (elem == list)
                {
                    return(true);
                }
                list = mi_block_next(page, list);
            }

            return(false);
        }
Ejemplo n.º 6
0
        private static bool mi_check_is_double_freex([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *block)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_ENCODE_FREELIST != 0) && ((MI_SECURE >= 4) || (MI_DEBUG != 0)));

            // The decoded value is in the same page (or null).
            // Walk the free lists to verify positively if it is already freed

            if (mi_list_contains(page, page->free, block) || mi_list_contains(page, page->local_free, block) || mi_list_contains(page, mi_page_thread_free(page), block))
            {
                _mi_error_message(EAGAIN, "double free detected of block {0:X} with size {1}\n", (nuint)block, mi_page_block_size(page));
                return(true);
            }

            return(false);
        }
Ejemplo n.º 7
0
        private static nuint mi_page_list_count(mi_page_t *page, mi_block_t *head)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG >= 3));

            nuint count = 0;

            while (head != null)
            {
                mi_assert_internal((MI_DEBUG > 1) && (page == _mi_ptr_page(head)));
                count++;
                head = mi_block_next(page, head);
            }

            return(count);
        }
Ejemplo n.º 8
0
        private static mi_page_t *mi_page_fresh(mi_heap_t *heap, mi_page_queue_t *pq)
        {
            mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(heap, pq));
            mi_page_t *page = mi_page_fresh_alloc(heap, pq, pq->block_size);

            if (page == null)
            {
                return(null);
            }

            mi_assert_internal((MI_DEBUG > 1) && (pq->block_size == mi_page_block_size(page)));
            mi_assert_internal((MI_DEBUG > 1) && (pq == mi_page_queue(heap, mi_page_block_size(page))));

            return(page);
        }
Ejemplo n.º 9
0
        private static void mi_page_to_full(mi_page_t *page, mi_page_queue_t *pq)
        {
            mi_assert_internal((MI_DEBUG > 1) && (pq == mi_page_queue_of(page)));
            mi_assert_internal((MI_DEBUG > 1) && (!mi_page_immediate_available(page)));
            mi_assert_internal((MI_DEBUG > 1) && (!mi_page_is_in_full(page)));

            if (mi_page_is_in_full(page))
            {
                return;
            }

            mi_page_queue_enqueue_from(&mi_page_heap(page)->pages.e0 + MI_BIN_FULL, pq, page);

            // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
            _mi_page_free_collect(page, false);
        }
Ejemplo n.º 10
0
        private static partial void _mi_page_reclaim(mi_heap_t *heap, mi_page_t *page)
        {
            mi_assert_expensive((MI_DEBUG > 2) && mi_page_is_valid_init(page));
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == heap));
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE));
            mi_assert_internal((MI_DEBUG > 1) && (_mi_page_segment(page)->page_kind != MI_PAGE_HUGE));
            mi_assert_internal((MI_DEBUG > 1) && (!page->is_reset));

            // TODO: push on full queue immediately if it is full?

            mi_page_queue_t *pq = mi_page_queue(heap, mi_page_block_size(page));

            mi_page_queue_push(heap, pq, page);

            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
        }
Ejemplo n.º 11
0
        private static nuint mi_page_usable_size_of([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *block)
        {
            if ((MI_PADDING > 0) && (MI_ENCODE_FREELIST != 0))
            {
                bool ok = mi_page_decode_padding(page, block, out nuint delta, out nuint bsize);

                mi_assert_internal((MI_DEBUG > 1) && ok);
                mi_assert_internal((MI_DEBUG > 1) && (delta <= bsize));

                return(ok ? (bsize - delta) : 0);
            }
            else
            {
                return(mi_page_usable_block_size(page));
            }
        }
Ejemplo n.º 12
0
        // free retired pages: we don't need to look at the entire queues
        // since we only retire pages that are at the head position in a queue.
        private static partial void _mi_heap_collect_retired(mi_heap_t *heap, bool force)
        {
            nuint min = MI_BIN_FULL;
            nuint max = 0;

            for (nuint bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++)
            {
                mi_page_queue_t *pq   = &heap->pages.e0 + bin;
                mi_page_t *      page = pq->first;

                if ((page != null) && (page->retire_expire != 0))
                {
                    if (mi_page_all_free(page))
                    {
                        page->retire_expire--;

                        if (force || (page->retire_expire == 0))
                        {
                            _mi_page_free(pq->first, pq, force);
                        }
                        else
                        {
                            // keep retired, update min/max

                            if (bin < min)
                            {
                                min = bin;
                            }

                            if (bin > max)
                            {
                                max = bin;
                            }
                        }
                    }
                    else
                    {
                        page->retire_expire = 0;
                    }
                }
            }

            heap->page_retired_min = min;
            heap->page_retired_max = max;
        }
Ejemplo n.º 13
0
        private static bool mi_check_is_double_free([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *block)
        {
            if ((MI_ENCODE_FREELIST != 0) && ((MI_SECURE >= 4) || (MI_DEBUG != 0)))
            {
                // pretend it is freed, and get the decoded first field
                mi_block_t *n = mi_block_nextx(page, block, &page->keys.e0);

                // quick check: aligned pointer && in same page or null
                if ((((nuint)n & (MI_INTPTR_SIZE - 1)) == 0) && ((n == null) || mi_is_in_same_page(block, n)))
                {
                    // Suspicous: decoded value a in block is in the same page (or null) -- maybe a double free?
                    // (continue in separate function to improve code generation)
                    return(mi_check_is_double_freex(page, block));
                }
            }

            return(false);
        }
Ejemplo n.º 14
0
        private static partial void _mi_page_free_collect(mi_page_t *page, bool force)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));

            // collect the thread free list
            if (force || (mi_page_thread_free(page) != null))
            {
                // quick test to avoid an atomic operation
                _mi_page_thread_free_collect(page);
            }

            // and the local free list
            if (page->local_free != null)
            {
                if (mi_likely(page->free == null))
                {
                    // usual case
                    page->free       = page->local_free;
                    page->local_free = null;
                    page->is_zero    = false;
                }
                else if (force)
                {
                    // append -- only on shutdown (force) as this is a linear operation

                    mi_block_t *tail = page->local_free;
                    mi_block_t *next;

                    while ((next = mi_block_next(page, tail)) != null)
                    {
                        tail = next;
                    }

                    mi_block_set_next(page, tail, page->free);
                    page->free = page->local_free;

                    page->local_free = null;
                    page->is_zero    = false;
                }
            }

            mi_assert_internal((MI_DEBUG > 1) && (!force || (page->local_free == null)));
        }
Ejemplo n.º 15
0
        private static bool mi_page_list_is_valid(mi_page_t *page, mi_block_t *p)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG >= 3));
            byte *page_area = _mi_page_start(_mi_page_segment(page), page, out nuint psize);

            mi_block_t *start = (mi_block_t *)page_area;
            mi_block_t *end   = (mi_block_t *)(page_area + psize);

            while (p != null)
            {
                if (p < start || p >= end)
                {
                    return(false);
                }
                p = mi_block_next(page, p);
            }

            return(true);
        }
Ejemplo n.º 16
0
        /* -----------------------------------------------------------
        *  Helpers
        *  ----------------------------------------------------------- */

        // Visit all pages in a heap; returns `false` if break was called.
        private static bool mi_heap_visit_pages(mi_heap_t *heap, [NativeTypeName("heap_page_visitor_fun*")] heap_page_visitor_fun fn, void *arg1, void *arg2)
        {
            if ((heap == null) || (heap->page_count == 0))
            {
                return(false);
            }

            nuint total = 0;

            // visit all pages
            if (MI_DEBUG > 1)
            {
                total = heap->page_count;
            }

            nuint count = 0;

            for (nuint i = 0; i <= MI_BIN_FULL; i++)
            {
                mi_page_queue_t *pq   = &heap->pages.e0 + i;
                mi_page_t *      page = pq->first;

                while (page != null)
                {
                    // save next in case the page gets removed from the queue
                    mi_page_t *next = page->next;

                    mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == heap));

                    count++;

                    if (!fn(heap, pq, page, arg1, arg2))
                    {
                        return(false);
                    }

                    page = next; // and continue
                }
            }

            mi_assert_internal((MI_DEBUG > 1) && (count == total));
            return(true);
        }
Ejemplo n.º 17
0
        // Free a page with no more free blocks
        private static partial void _mi_page_free(mi_page_t *page, mi_page_queue_t *pq, bool force)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
            mi_assert_internal((MI_DEBUG > 1) && (pq == mi_page_queue_of(page)));
            mi_assert_internal((MI_DEBUG > 1) && mi_page_all_free(page));
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_thread_free_flag(page) != MI_DELAYED_FREEING));

            // no more aligned blocks in here
            mi_page_set_has_aligned(page, false);

            // remove from the page list
            // (no need to do _mi_heap_delayed_free first as all blocks are already free)
            mi_segments_tld_t *segments_tld = &mi_page_heap(page)->tld->segments;

            mi_page_queue_remove(pq, page);

            // and free it
            mi_page_set_heap(page, null);
            _mi_segment_page_free(page, force, segments_tld);
        }
Ejemplo n.º 18
0
        private static void mi_page_queue_remove(mi_page_queue_t *queue, mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && mi_page_queue_contains(queue, page));
            mi_assert_internal((MI_DEBUG > 1) && ((page->xblock_size == queue->block_size) || ((page->xblock_size > MI_LARGE_OBJ_SIZE_MAX) && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))));

            mi_heap_t *heap = mi_page_heap(page);

            if (page->prev != null)
            {
                page->prev->next = page->next;
            }

            if (page->next != null)
            {
                page->next->prev = page->prev;
            }

            if (page == queue->last)
            {
                queue->last = page->prev;
            }

            if (page == queue->first)
            {
                queue->first = page->next;
                mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(heap, queue));

                // update first
                mi_heap_queue_first_update(heap, queue);
            }

            heap->page_count--;

            page->next = null;
            page->prev = null;

            // mi_atomic_store_ptr_release(ref page->heap, null);
            mi_page_set_in_full(page, false);
        }
Ejemplo n.º 19
0
        private static bool mi_page_queue_contains(mi_page_queue_t *queue, [NativeTypeName("const mi_page_t*")] mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG > 1));
            mi_assert_internal((MI_DEBUG > 1) && (page != null));

            mi_page_t *list = queue->first;

            while (list != null)
            {
                mi_assert_internal((MI_DEBUG > 1) && ((list->next == null) || (list->next->prev == list)));
                mi_assert_internal((MI_DEBUG > 1) && ((list->prev == null) || (list->prev->next == list)));

                if (list == page)
                {
                    break;
                }

                list = list->next;
            }

            return(list == page);
        }
Ejemplo n.º 20
0
        // regular free
        private static void _mi_free_block(mi_page_t *page, bool local, mi_block_t *block)
        {
            // and push it on the free list
            if (mi_likely(local))
            {
                // owning thread can free a block directly

                if (mi_unlikely(mi_check_is_double_free(page, block)))
                {
                    return;
                }

                mi_check_padding(page, block);

                if (MI_DEBUG != 0)
                {
                    _ = memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
                }

                mi_block_set_next(page, block, page->local_free);

                page->local_free = block;
                page->used--;

                if (mi_unlikely(mi_page_all_free(page)))
                {
                    _mi_page_retire(page);
                }
                else if (mi_unlikely(mi_page_is_in_full(page)))
                {
                    _mi_page_unfull(page);
                }
            }
            else
            {
                _mi_free_block_mt(page, block);
            }
        }
Ejemplo n.º 21
0
        /* -----------------------------------------------------------
        *  Unfull, abandon, free and retire
        *  ----------------------------------------------------------- */

        // Move a page from the full list back to a regular list
        private static partial void _mi_page_unfull(mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
            mi_assert_internal((MI_DEBUG > 1) && mi_page_is_in_full(page));

            if (!mi_page_is_in_full(page))
            {
                return;
            }

            mi_heap_t *      heap   = mi_page_heap(page);
            mi_page_queue_t *pqfull = &heap->pages.e0 + MI_BIN_FULL;

            // to get the right queue
            mi_page_set_in_full(page, false);

            mi_page_queue_t *pq = mi_heap_page_queue_of(heap, page);

            mi_page_set_in_full(page, true);

            mi_page_queue_enqueue_from(pq, pqfull, page);
        }
Ejemplo n.º 22
0
        private static partial void _mi_page_use_delayed_free(mi_page_t *page, mi_delayed_t delay, bool override_never)
        {
#pragma warning disable CS0420
            nuint        tfreex;
            mi_delayed_t old_delay;
            nuint        tfree;

            do
            {
                // note: must acquire as we can break/repeat this loop and not do a CAS;
                tfree = mi_atomic_load_acquire(ref page->xthread_free);

                tfreex    = mi_tf_set_delayed(tfree, delay);
                old_delay = mi_tf_delayed(tfree);

                if (mi_unlikely(old_delay == MI_DELAYED_FREEING))
                {
                    // delay until outstanding MI_DELAYED_FREEING are done.
                    mi_atomic_yield();

                    // will cause CAS to busy fail
                    // tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE);
                }
                else if (delay == old_delay)
                {
                    // avoid atomic operation if already equal
                    break;
                }
                else if (!override_never && (old_delay == MI_NEVER_DELAYED_FREE))
                {
                    // leave never-delayed flag set
                    break;
                }
            }while ((old_delay == MI_DELAYED_FREEING) || !mi_atomic_cas_weak_release(ref page->xthread_free, ref tfree, tfreex));
#pragma warning restore CS0420
        }
Ejemplo n.º 23
0
        private static partial void _mi_block_zero_init(mi_page_t *page, void *p, nuint size)
        {
            // note: we need to initialize the whole usable block size to zero, not just the requested size,
            // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)

            mi_assert_internal((MI_DEBUG > 1) && (p != null));
            mi_assert_internal((MI_DEBUG > 1) && (mi_usable_size(p) >= size)); // size can be zero
            mi_assert_internal((MI_DEBUG > 1) && (_mi_ptr_page(p) == page));

            if (page->is_zero && (size > SizeOf <mi_block_t>()))
            {
                // already zero initialized memory

                // clear the free list pointer
                ((mi_block_t *)p)->next = 0;

                mi_assert_expensive((MI_DEBUG > 2) && mi_mem_is_zero(p, mi_usable_size(p)));
            }
            else
            {
                // otherwise memset
                _ = memset(p, 0, mi_usable_size(p));
            }
        }
Ejemplo n.º 24
0
 private static void mi_page_free_list_extend_secure([NativeTypeName("mi_heap_t* const")] mi_heap_t *heap, [NativeTypeName("mi_page_t* const")] mi_page_t *page, [NativeTypeName("const size_t")] nuint bsize, [NativeTypeName("const size_t")] nuint extend, [NativeTypeName("mi_stats_t* const")] in mi_stats_t stats)
Ejemplo n.º 25
0
        // ------------------------------------------------------
        // Aligned Allocation
        // ------------------------------------------------------

        private static void *mi_heap_malloc_zero_aligned_at([NativeTypeName("mi_heap_t* const")] mi_heap_t *heap, [NativeTypeName("const size_t")] nuint size, [NativeTypeName("const size_t")] nuint alignment, [NativeTypeName("const size_t")] nuint offset, [NativeTypeName("const bool")] bool zero)
        {
            void *p;

            // note: we don't require `size > offset`, we just guarantee that
            // the address at offset is aligned regardless of the allocated size.
            mi_assert((MI_DEBUG != 0) && (alignment > 0));

            if (mi_unlikely(size > (nuint)PTRDIFF_MAX))
            {
                // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
                return(null);
            }

            if (mi_unlikely((alignment == 0) || !_mi_is_power_of_two(alignment)))
            {
                // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
                return(null);
            }

            // for any x, `(x & align_mask) == (x % alignment)`
            nuint align_mask = alignment - 1;

            // try if there is a small block available with just the right alignment
            nuint padsize = size + MI_PADDING_SIZE;

            if (mi_likely(padsize <= MI_SMALL_SIZE_MAX))
            {
                mi_page_t *page       = _mi_heap_get_free_small_page(heap, padsize);
                bool       is_aligned = (((nuint)page->free + offset) & align_mask) == 0;

                if (mi_likely(page->free != null && is_aligned))
                {
                    if (MI_STAT > 1)
                    {
                        mi_stat_increase(ref heap->tld->stats.malloc, size);
                    }

                    // TODO: inline _mi_page_malloc
                    p = _mi_page_malloc(heap, page, padsize);

                    mi_assert_internal((MI_DEBUG > 1) && (p != null));
                    mi_assert_internal((MI_DEBUG > 1) && (((nuint)p + offset) % alignment == 0));

                    if (zero)
                    {
                        _mi_block_zero_init(page, p, size);
                    }
                    return(p);
                }
            }

            // use regular allocation if it is guaranteed to fit the alignment constraints
            if ((offset == 0) && (alignment <= padsize) && (padsize <= MI_MEDIUM_OBJ_SIZE_MAX) && ((padsize & align_mask) == 0))
            {
                p = _mi_heap_malloc_zero(heap, size, zero);
                mi_assert_internal((MI_DEBUG > 1) && ((p == null) || (((nuint)p % alignment) == 0)));
                return(p);
            }

            // otherwise over-allocate
            p = _mi_heap_malloc_zero(heap, size + alignment - 1, zero);

            if (p == null)
            {
                return(null);
            }

            // .. and align within the allocation
            nuint adjust = alignment - (((nuint)p + offset) & align_mask);

            mi_assert_internal((MI_DEBUG > 1) && (adjust <= alignment));

            void *aligned_p = adjust == alignment ? p : (void *)((nuint)p + adjust);

            if (aligned_p != p)
            {
                mi_page_set_has_aligned(_mi_ptr_page(p), true);
            }

            mi_assert_internal((MI_DEBUG > 1) && (((nuint)aligned_p + offset) % alignment == 0));
            mi_assert_internal((MI_DEBUG > 1) && (p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p)));

            return(aligned_p);
        }
Ejemplo n.º 26
0
        // The current small page array is for efficiency and for each
        // small size (up to 256) it points directly to the page for that
        // size without having to compute the bin. This means when the
        // current free page queue is updated for a small bin, we need to update a
        // range of entries in `_mi_page_small_free`.
        private static void mi_heap_queue_first_update(mi_heap_t *heap, [NativeTypeName("const mi_page_queue_t*")] mi_page_queue_t *pq)
        {
            mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(heap, pq));
            nuint size = pq->block_size;

            if (size > MI_SMALL_SIZE_MAX)
            {
                return;
            }

            mi_page_t *page = pq->first;

            if (pq->first == null)
            {
                page = (mi_page_t *)_mi_page_empty;
            }

            // find index in the right direct page array
            nuint idx = _mi_wsize_from_size(size);

            nuint       start;
            mi_page_t **pages_free = &heap->pages_free_direct.e0;

            if (pages_free[idx] == page)
            {
                // already set
                return;
            }

            // find start slot
            if (idx <= 1)
            {
                start = 0;
            }
            else
            {
                // find previous size; due to minimal alignment upto 3 previous bins may need to be skipped
                byte bin = _mi_bin(size);

                mi_page_queue_t *prev = pq - 1;

                while ((bin == _mi_bin(prev->block_size)) && (prev > &heap->pages.e0))
                {
                    prev--;
                }

                start = 1 + _mi_wsize_from_size(prev->block_size);

                if (start > idx)
                {
                    start = idx;
                }
            }

            // set size range to the right page
            mi_assert((MI_DEBUG != 0) && (start <= idx));

            for (nuint sz = start; sz <= idx; sz++)
            {
                pages_free[sz] = page;
            }
        }
Ejemplo n.º 27
0
        private static void mi_page_queue_push(mi_heap_t *heap, mi_page_queue_t *queue, mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == heap));
            mi_assert_internal((MI_DEBUG > 1) && (!mi_page_queue_contains(queue, page)));
            mi_assert_internal((MI_DEBUG > 1) && (_mi_page_segment(page)->page_kind != MI_PAGE_HUGE));
            mi_assert_internal((MI_DEBUG > 1) && ((page->xblock_size == queue->block_size) || ((page->xblock_size > MI_LARGE_OBJ_SIZE_MAX) && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))));

            mi_page_set_in_full(page, mi_page_queue_is_full(queue));
            // mi_atomic_store_ptr_release(ref page->heap, heap);

            page->next = queue->first;
            page->prev = null;

            if (queue->first != null)
            {
                mi_assert_internal((MI_DEBUG > 1) && (queue->first->prev == null));
                queue->first->prev = page;
                queue->first       = page;
            }
            else
            {
                queue->first = queue->last = page;
            }

            // update direct
            mi_heap_queue_first_update(heap, queue);

            heap->page_count++;
        }
Ejemplo n.º 28
0
        private static byte *mi_page_area([NativeTypeName("const mi_page_t*")] mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG >= 3));

            return(_mi_page_start(_mi_page_segment(page), page, out _));
        }
Ejemplo n.º 29
0
 private static partial void mi_page_init(mi_heap_t *heap, mi_page_t *page, [NativeTypeName("size_t")] nuint block_size, mi_tld_t *tld);
Ejemplo n.º 30
0
 private static partial void mi_page_extend_free(mi_heap_t *heap, mi_page_t *page, mi_tld_t *tld);