private static void mi_page_queue_push(mi_heap_t *heap, mi_page_queue_t *queue, mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == heap));
            mi_assert_internal((MI_DEBUG > 1) && (!mi_page_queue_contains(queue, page)));
            mi_assert_internal((MI_DEBUG > 1) && (_mi_page_segment(page)->page_kind != MI_PAGE_HUGE));
            mi_assert_internal((MI_DEBUG > 1) && ((page->xblock_size == queue->block_size) || ((page->xblock_size > MI_LARGE_OBJ_SIZE_MAX) && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))));

            mi_page_set_in_full(page, mi_page_queue_is_full(queue));
            // mi_atomic_store_ptr_release(ref page->heap, heap);

            page->next = queue->first;
            page->prev = null;

            if (queue->first != null)
            {
                mi_assert_internal((MI_DEBUG > 1) && (queue->first->prev == null));
                queue->first->prev = page;
                queue->first       = page;
            }
            else
            {
                queue->first = queue->last = page;
            }

            // update direct
            mi_heap_queue_first_update(heap, queue);

            heap->page_count++;
        }
        // allocate a fresh page from a segment
        private static mi_page_t *mi_page_fresh_alloc(mi_heap_t *heap, mi_page_queue_t *pq, [NativeTypeName("size_t")] nuint block_size)
        {
            mi_assert_internal((MI_DEBUG > 1) && ((pq == null) || mi_heap_contains_queue(heap, pq)));
            mi_assert_internal((MI_DEBUG > 1) && ((pq == null) || (block_size == pq->block_size)));

            mi_page_t *page = _mi_segment_page_alloc(heap, block_size, &heap->tld->segments, &heap->tld->os);

            if (page == null)
            {
                // this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
                return(null);
            }

            // a fresh page was found, initialize it
            mi_assert_internal((MI_DEBUG > 1) && (pq == null || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE));

            mi_page_init(heap, page, block_size, heap->tld);
            _mi_stat_increase(ref heap->tld->stats.pages, 1);

            if (pq != null)
            {
                // huge pages use pq==null
                mi_page_queue_push(heap, pq, page);
            }

            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
            return(page);
        }
        private static partial bool _mi_page_is_valid(mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG >= 3));
            mi_assert_internal((MI_DEBUG > 1) && mi_page_is_valid_init(page));

            if (MI_SECURE != 0)
            {
                mi_assert_internal((MI_DEBUG > 1) && (page->keys.e0 != 0));
            }

            if (mi_page_heap(page) != null)
            {
                mi_segment_t *segment = _mi_page_segment(page);
                mi_assert_internal((MI_DEBUG > 1) && ((segment->thread_id == mi_page_heap(page)->thread_id) || (segment->thread_id == 0)));

                if (segment->page_kind != MI_PAGE_HUGE)
                {
                    mi_page_queue_t *pq = mi_page_queue_of(page);
                    mi_assert_internal((MI_DEBUG > 1) && mi_page_queue_contains(pq, page));

                    mi_assert_internal((MI_DEBUG > 1) && ((pq->block_size == mi_page_block_size(page)) || (mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX) || mi_page_is_in_full(page)));
                    mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(mi_page_heap(page), pq));
                }
            }

            return(true);
        }
        // Abandon a page with used blocks at the end of a thread.
        // Note: only call if it is ensured that no references exist from
        // the `page->heap->thread_delayed_free` into this page.
        // Currently only called through `mi_heap_collect_ex` which ensures this.
        private static partial void _mi_page_abandon(mi_page_t *page, mi_page_queue_t *pq)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
            mi_assert_internal((MI_DEBUG > 1) && (pq == mi_page_queue_of(page)));
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) != null));

            mi_heap_t *pheap = mi_page_heap(page);

            // remove from our page list
            mi_segments_tld_t *segments_tld = &pheap->tld->segments;

            mi_page_queue_remove(pq, page);

            // page is no longer associated with our heap
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_thread_free_flag(page) == MI_NEVER_DELAYED_FREE));
            mi_page_set_heap(page, null);

            if (MI_DEBUG > 1)
            {
                // check there are no references left..
                for (mi_block_t *block = (mi_block_t *)pheap->thread_delayed_free; block != null; block = mi_block_nextx(pheap, block, &pheap->keys.e0))
                {
                    mi_assert_internal((MI_DEBUG > 1) && (_mi_ptr_page(block) != page));
                }
            }

            // and abandon it
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == null));
            _mi_segment_page_abandon(page, segments_tld);
        }
Esempio n. 5
0
        private static bool mi_heap_page_never_delayed_free(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg1, void *arg2)
        {
            _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);

            // don't break
            return(true);
        }
        // Only called from `mi_heap_absorb`.
        private static partial nuint _mi_page_queue_append(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_queue_t *append)
        {
#pragma warning disable CS0420
            mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(heap, pq));
            mi_assert_internal((MI_DEBUG > 1) && (pq->block_size == append->block_size));

            if (append->first == null)
            {
                return(0);
            }

            // set append pages to new heap and count
            nuint count = 0;

            for (mi_page_t *page = append->first; page != null; page = page->next)
            {
                // inline `mi_page_set_heap` to avoid wrong assertion during absorption;
                // in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.
                mi_atomic_store_release(ref page->xheap, (nuint)heap);

                // set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
                // side effect that it spins until any DELAYED_FREEING is finished. This ensures
                // that after appending only the new heap will be used for delayed free operations.
                _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);

                count++;
            }

            if (pq->last == null)
            {
                // take over afresh
                mi_assert_internal((MI_DEBUG > 1) && (pq->first == null));

                pq->first = append->first;
                pq->last  = append->last;

                mi_heap_queue_first_update(heap, pq);
            }
            else
            {
                // append to end

                mi_assert_internal((MI_DEBUG > 1) && (pq->last != null));
                mi_assert_internal((MI_DEBUG > 1) && (append->first != null));

                pq->last->next      = append->first;
                append->first->prev = pq->last;

                pq->last = append->last;
            }

            return(count);

#pragma warning restore CS0420
        }
Esempio n. 7
0
        private static bool mi_heap_page_is_valid(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg1, void *arg2)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG >= 2));
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == heap));

            mi_segment_t *segment = _mi_page_segment(page);

            mi_assert_internal((MI_DEBUG > 1) && (segment->thread_id == heap->thread_id));

            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
            return(true);
        }
        private static void mi_page_queue_enqueue_from(mi_page_queue_t *to, mi_page_queue_t *from, mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && mi_page_queue_contains(from, page));
            mi_assert_expensive((MI_DEBUG > 2) && !mi_page_queue_contains(to, page));
            mi_assert_internal((MI_DEBUG > 1) && (((page->xblock_size == to->block_size) && page->xblock_size == from->block_size) || ((page->xblock_size == to->block_size) && mi_page_queue_is_full(from)) || ((page->xblock_size == from->block_size) && mi_page_queue_is_full(to)) || ((page->xblock_size > MI_LARGE_OBJ_SIZE_MAX) && mi_page_queue_is_huge(to)) || ((page->xblock_size > MI_LARGE_OBJ_SIZE_MAX) && mi_page_queue_is_full(to))));

            mi_heap_t *heap = mi_page_heap(page);

            if (page->prev != null)
            {
                page->prev->next = page->next;
            }

            if (page->next != null)
            {
                page->next->prev = page->prev;
            }

            if (page == from->last)
            {
                from->last = page->prev;
            }

            if (page == from->first)
            {
                from->first = page->next;
                mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(heap, from));

                // update first
                mi_heap_queue_first_update(heap, from);
            }

            page->prev = to->last;
            page->next = null;

            if (to->last != null)
            {
                mi_assert_internal((MI_DEBUG > 1) && (heap == mi_page_heap(to->last)));

                to->last->next = page;
                to->last       = page;
            }
            else
            {
                to->first = page;
                to->last  = page;

                mi_heap_queue_first_update(heap, to);
            }

            mi_page_set_in_full(page, mi_page_queue_is_full(to));
        }
        private static mi_page_queue_t *mi_heap_page_queue_of(mi_heap_t *heap, [NativeTypeName("const mi_page_t*")] mi_page_t *page)
        {
            byte bin = mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->xblock_size);

            mi_assert_internal((MI_DEBUG > 1) && (bin <= MI_BIN_FULL));

            mi_page_queue_t *pq = &heap->pages.e0 + bin;

            mi_assert_internal((MI_DEBUG > 1) && (mi_page_is_in_full(page) || page->xblock_size == pq->block_size));

            return(pq);
        }
Esempio n. 10
0
        /* -----------------------------------------------------------
        *  Heap destroy
        *  ----------------------------------------------------------- */

        private static bool _mi_heap_page_destroy(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg1, void *arg2)
        {
            // ensure no more thread_delayed_free will be added
            _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);

            // stats
            nuint bsize = mi_page_block_size(page);

            if (bsize > MI_LARGE_OBJ_SIZE_MAX)
            {
                if (bsize > MI_HUGE_OBJ_SIZE_MAX)
                {
                    _mi_stat_decrease(ref heap->tld->stats.giant, bsize);
                }
                else
                {
                    _mi_stat_decrease(ref heap->tld->stats.huge, bsize);
                }
            }

            if (MI_STAT > 1)
            {
                // update used count
                _mi_page_free_collect(page, false);

                nuint inuse = page->used;

                if (bsize <= MI_LARGE_OBJ_SIZE_MAX)
                {
                    mi_stat_decrease(ref (&heap->tld->stats.normal.e0)[_mi_bin(bsize)], inuse);
                }

                // todo: off for aligned blocks...
                mi_stat_decrease(ref heap->tld->stats.malloc, bsize * inuse);
            }

            // pretend it is all free now
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_thread_free(page) == null));

            page->used = 0;

            // and free the page
            // mi_page_free(page,false);

            page->next = null;
            page->prev = null;

            _mi_segment_page_free(page, force: false, &heap->tld->segments);

            // keep going
            return(true);
        }
Esempio n. 11
0
        private static bool mi_heap_page_check_owned(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *p, void *vfound)
        {
            bool *        found   = (bool *)vfound;
            mi_segment_t *segment = _mi_page_segment(page);

            void *start = _mi_page_start(segment, page, out _);
            void *end   = (byte *)start + (page->capacity * mi_page_block_size(page));

            *found = (p >= start) && (p < end);

            // continue if not found
            return(!*found);
        }
Esempio n. 12
0
        // Retire a page with no more used blocks
        // Important to not retire too quickly though as new
        // allocations might coming.
        // Note: called from `mi_free` and benchmarks often
        // trigger this due to freeing everything and then
        // allocating again so careful when changing this.
        private static partial void _mi_page_retire(mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
            mi_assert_internal((MI_DEBUG > 1) && mi_page_all_free(page));

            mi_page_set_has_aligned(page, false);

            // don't retire too often..
            // (or we end up retiring and re-allocating most of the time)
            // NOTE: refine this more: we should not retire if this
            // is the only page left with free blocks. It is not clear
            // how to check this efficiently though...
            // for now, we don't retire if it is the only page left of this size class.
            mi_page_queue_t *pq = mi_page_queue_of(page);

            if (mi_likely((page->xblock_size <= MI_MAX_RETIRE_SIZE) && !mi_page_is_in_full(page)))
            {
                if (pq->last == page && pq->first == page)
                {
                    // the only page in the queue?
                    mi_stat_counter_increase(ref _mi_stats_main.page_no_retire, 1);

                    page->retire_expire = (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX) ? MI_RETIRE_CYCLES : (byte)(MI_RETIRE_CYCLES / 4);

                    mi_heap_t *heap = mi_page_heap(page);
                    mi_assert_internal((MI_DEBUG > 1) && (pq >= &heap->pages.e0));

                    nuint index = (nuint)(pq - &heap->pages.e0);
                    mi_assert_internal((MI_DEBUG > 1) && index < MI_BIN_FULL && index < MI_BIN_HUGE);

                    if (index < heap->page_retired_min)
                    {
                        heap->page_retired_min = index;
                    }

                    if (index > heap->page_retired_max)
                    {
                        heap->page_retired_max = index;
                    }

                    mi_assert_internal((MI_DEBUG > 1) && mi_page_all_free(page));

                    // dont't free after all
                    return;
                }
            }

            _mi_page_free(page, pq, false);
        }
Esempio n. 13
0
        private static mi_page_queue_t *mi_page_queue_of([NativeTypeName("const mi_page_t*")] mi_page_t *page)
        {
            byte       bin  = mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->xblock_size);
            mi_heap_t *heap = mi_page_heap(page);

            mi_assert_internal((MI_DEBUG > 1) && heap != null && bin <= MI_BIN_FULL);

            mi_page_queue_t *pq = &heap->pages.e0 + bin;

            mi_assert_internal((MI_DEBUG > 1) && (bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size));
            mi_assert_expensive((MI_DEBUG > 2) && mi_page_queue_contains(pq, page));

            return(pq);
        }
Esempio n. 14
0
        private static mi_page_t *mi_page_fresh(mi_heap_t *heap, mi_page_queue_t *pq)
        {
            mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(heap, pq));
            mi_page_t *page = mi_page_fresh_alloc(heap, pq, pq->block_size);

            if (page == null)
            {
                return(null);
            }

            mi_assert_internal((MI_DEBUG > 1) && (pq->block_size == mi_page_block_size(page)));
            mi_assert_internal((MI_DEBUG > 1) && (pq == mi_page_queue(heap, mi_page_block_size(page))));

            return(page);
        }
Esempio n. 15
0
        private static void mi_page_to_full(mi_page_t *page, mi_page_queue_t *pq)
        {
            mi_assert_internal((MI_DEBUG > 1) && (pq == mi_page_queue_of(page)));
            mi_assert_internal((MI_DEBUG > 1) && (!mi_page_immediate_available(page)));
            mi_assert_internal((MI_DEBUG > 1) && (!mi_page_is_in_full(page)));

            if (mi_page_is_in_full(page))
            {
                return;
            }

            mi_page_queue_enqueue_from(&mi_page_heap(page)->pages.e0 + MI_BIN_FULL, pq, page);

            // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set
            _mi_page_free_collect(page, false);
        }
Esempio n. 16
0
        private static partial void _mi_page_reclaim(mi_heap_t *heap, mi_page_t *page)
        {
            mi_assert_expensive((MI_DEBUG > 2) && mi_page_is_valid_init(page));
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == heap));
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE));
            mi_assert_internal((MI_DEBUG > 1) && (_mi_page_segment(page)->page_kind != MI_PAGE_HUGE));
            mi_assert_internal((MI_DEBUG > 1) && (!page->is_reset));

            // TODO: push on full queue immediately if it is full?

            mi_page_queue_t *pq = mi_page_queue(heap, mi_page_block_size(page));

            mi_page_queue_push(heap, pq, page);

            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
        }
Esempio n. 17
0
        private static bool mi_heap_visit_areas_page(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *vfun, void *arg)
        {
            GCHandle handle            = GCHandle.FromIntPtr((IntPtr)vfun);
            mi_heap_area_visit_fun fun = (mi_heap_area_visit_fun)handle.Target !;

            mi_heap_area_ex_t xarea;
            nuint             bsize = mi_page_block_size(page);

            xarea.page            = page;
            xarea.area.reserved   = page->reserved * bsize;
            xarea.area.committed  = page->capacity * bsize;
            xarea.area.blocks     = _mi_page_start(_mi_page_segment(page), page, out _);
            xarea.area.used       = page->used;
            xarea.area.block_size = bsize;

            return(fun(heap, &xarea, arg));
        }
Esempio n. 18
0
        // free retired pages: we don't need to look at the entire queues
        // since we only retire pages that are at the head position in a queue.
        private static partial void _mi_heap_collect_retired(mi_heap_t *heap, bool force)
        {
            nuint min = MI_BIN_FULL;
            nuint max = 0;

            for (nuint bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++)
            {
                mi_page_queue_t *pq   = &heap->pages.e0 + bin;
                mi_page_t *      page = pq->first;

                if ((page != null) && (page->retire_expire != 0))
                {
                    if (mi_page_all_free(page))
                    {
                        page->retire_expire--;

                        if (force || (page->retire_expire == 0))
                        {
                            _mi_page_free(pq->first, pq, force);
                        }
                        else
                        {
                            // keep retired, update min/max

                            if (bin < min)
                            {
                                min = bin;
                            }

                            if (bin > max)
                            {
                                max = bin;
                            }
                        }
                    }
                    else
                    {
                        page->retire_expire = 0;
                    }
                }
            }

            heap->page_retired_min = min;
            heap->page_retired_max = max;
        }
Esempio n. 19
0
        /* -----------------------------------------------------------
        *  Helpers
        *  ----------------------------------------------------------- */

        // Visit all pages in a heap; returns `false` if break was called.
        private static bool mi_heap_visit_pages(mi_heap_t *heap, [NativeTypeName("heap_page_visitor_fun*")] heap_page_visitor_fun fn, void *arg1, void *arg2)
        {
            if ((heap == null) || (heap->page_count == 0))
            {
                return(false);
            }

            nuint total = 0;

            // visit all pages
            if (MI_DEBUG > 1)
            {
                total = heap->page_count;
            }

            nuint count = 0;

            for (nuint i = 0; i <= MI_BIN_FULL; i++)
            {
                mi_page_queue_t *pq   = &heap->pages.e0 + i;
                mi_page_t *      page = pq->first;

                while (page != null)
                {
                    // save next in case the page gets removed from the queue
                    mi_page_t *next = page->next;

                    mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == heap));

                    count++;

                    if (!fn(heap, pq, page, arg1, arg2))
                    {
                        return(false);
                    }

                    page = next; // and continue
                }
            }

            mi_assert_internal((MI_DEBUG > 1) && (count == total));
            return(true);
        }
Esempio n. 20
0
        /* -----------------------------------------------------------
        *  Safe Heap delete
        *  ----------------------------------------------------------- */

        // Tranfer the pages from one heap to the other
        private static void mi_heap_absorb(mi_heap_t *heap, mi_heap_t *from)
        {
            mi_assert_internal((MI_DEBUG > 1) && (heap != null));

            if ((from == null) || (from->page_count == 0))
            {
                return;
            }

            // reduce the size of the delayed frees
            _mi_heap_delayed_free(from);

            // transfer all pages by appending the queues; this will set a new heap field
            // so threads may do delayed frees in either heap for a while.
            // note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
            // so after this only the new heap will get delayed frees

            for (nuint i = 0; i <= MI_BIN_FULL; i++)
            {
                mi_page_queue_t *pq     = &heap->pages.e0 + i;
                mi_page_queue_t *append = &from->pages.e0 + i;

                nuint pcount = _mi_page_queue_append(heap, pq, append);

                heap->page_count += pcount;
                from->page_count -= pcount;
            }

            mi_assert_internal((MI_DEBUG > 1) && (from->page_count == 0));

            // and do outstanding delayed frees in the `from` heap
            // note: be careful here as the `heap` field in all those pages no longer point to `from`,
            // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
            // the regular `_mi_free_delayed_block` which is safe.
            _mi_heap_delayed_free(from);

            mi_assert_internal((MI_DEBUG > 1) && (from->thread_delayed_free == 0));

            // and reset the `from` heap
            mi_heap_reset_pages(from);
        }
Esempio n. 21
0
        // Free a page with no more free blocks
        private static partial void _mi_page_free(mi_page_t *page, mi_page_queue_t *pq, bool force)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
            mi_assert_internal((MI_DEBUG > 1) && (pq == mi_page_queue_of(page)));
            mi_assert_internal((MI_DEBUG > 1) && mi_page_all_free(page));
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_thread_free_flag(page) != MI_DELAYED_FREEING));

            // no more aligned blocks in here
            mi_page_set_has_aligned(page, false);

            // remove from the page list
            // (no need to do _mi_heap_delayed_free first as all blocks are already free)
            mi_segments_tld_t *segments_tld = &mi_page_heap(page)->tld->segments;

            mi_page_queue_remove(pq, page);

            // and free it
            mi_page_set_heap(page, null);
            _mi_segment_page_free(page, force, segments_tld);
        }
Esempio n. 22
0
        private static void mi_page_queue_remove(mi_page_queue_t *queue, mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && mi_page_queue_contains(queue, page));
            mi_assert_internal((MI_DEBUG > 1) && ((page->xblock_size == queue->block_size) || ((page->xblock_size > MI_LARGE_OBJ_SIZE_MAX) && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))));

            mi_heap_t *heap = mi_page_heap(page);

            if (page->prev != null)
            {
                page->prev->next = page->next;
            }

            if (page->next != null)
            {
                page->next->prev = page->prev;
            }

            if (page == queue->last)
            {
                queue->last = page->prev;
            }

            if (page == queue->first)
            {
                queue->first = page->next;
                mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(heap, queue));

                // update first
                mi_heap_queue_first_update(heap, queue);
            }

            heap->page_count--;

            page->next = null;
            page->prev = null;

            // mi_atomic_store_ptr_release(ref page->heap, null);
            mi_page_set_in_full(page, false);
        }
Esempio n. 23
0
        private static bool mi_page_queue_contains(mi_page_queue_t *queue, [NativeTypeName("const mi_page_t*")] mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG > 1));
            mi_assert_internal((MI_DEBUG > 1) && (page != null));

            mi_page_t *list = queue->first;

            while (list != null)
            {
                mi_assert_internal((MI_DEBUG > 1) && ((list->next == null) || (list->next->prev == list)));
                mi_assert_internal((MI_DEBUG > 1) && ((list->prev == null) || (list->prev->next == list)));

                if (list == page)
                {
                    break;
                }

                list = list->next;
            }

            return(list == page);
        }
Esempio n. 24
0
        /* -----------------------------------------------------------
        *  "Collect" pages by migrating `local_free` and `thread_free`
        *  lists and freeing empty pages. This is done when a thread
        *  stops (and in that case abandons pages if there are still
        *  blocks alive)
        *  ----------------------------------------------------------- */

        private static bool mi_heap_page_collect(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg_collect, void *arg2)
        {
            mi_assert_internal((MI_DEBUG > 1) && mi_heap_page_is_valid(heap, pq, page, null, null));

            mi_collect_t collect = *(mi_collect_t *)arg_collect;

            _mi_page_free_collect(page, collect >= MI_FORCE);

            if (mi_page_all_free(page))
            {
                // no more used blocks, free the page.
                // note: this will free retired pages as well.
                _mi_page_free(page, pq, collect >= MI_FORCE);
            }
            else if (collect == MI_ABANDON)
            {
                // still used blocks but the thread is done; abandon the page
                _mi_page_abandon(page, pq);
            }

            // don't break
            return(true);
        }
Esempio n. 25
0
        /* -----------------------------------------------------------
        *  Unfull, abandon, free and retire
        *  ----------------------------------------------------------- */

        // Move a page from the full list back to a regular list
        private static partial void _mi_page_unfull(mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
            mi_assert_internal((MI_DEBUG > 1) && mi_page_is_in_full(page));

            if (!mi_page_is_in_full(page))
            {
                return;
            }

            mi_heap_t *      heap   = mi_page_heap(page);
            mi_page_queue_t *pqfull = &heap->pages.e0 + MI_BIN_FULL;

            // to get the right queue
            mi_page_set_in_full(page, false);

            mi_page_queue_t *pq = mi_heap_page_queue_of(heap, page);

            mi_page_set_in_full(page, true);

            mi_page_queue_enqueue_from(pq, pqfull, page);
        }
Esempio n. 26
0
 private static bool mi_heap_contains_queue([NativeTypeName("const mi_heap_t*")] mi_heap_t *heap, [NativeTypeName("const mi_page_queue_t*")] mi_page_queue_t *pq)
 {
     mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG > 1));
     return((pq >= &heap->pages.e0) && (pq <= (&heap->pages.e0 + MI_BIN_FULL)));
 }
Esempio n. 27
0
        // The current small page array is for efficiency and for each
        // small size (up to 256) it points directly to the page for that
        // size without having to compute the bin. This means when the
        // current free page queue is updated for a small bin, we need to update a
        // range of entries in `_mi_page_small_free`.
        private static void mi_heap_queue_first_update(mi_heap_t *heap, [NativeTypeName("const mi_page_queue_t*")] mi_page_queue_t *pq)
        {
            mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(heap, pq));
            nuint size = pq->block_size;

            if (size > MI_SMALL_SIZE_MAX)
            {
                return;
            }

            mi_page_t *page = pq->first;

            if (pq->first == null)
            {
                page = (mi_page_t *)_mi_page_empty;
            }

            // find index in the right direct page array
            nuint idx = _mi_wsize_from_size(size);

            nuint       start;
            mi_page_t **pages_free = &heap->pages_free_direct.e0;

            if (pages_free[idx] == page)
            {
                // already set
                return;
            }

            // find start slot
            if (idx <= 1)
            {
                start = 0;
            }
            else
            {
                // find previous size; due to minimal alignment upto 3 previous bins may need to be skipped
                byte bin = _mi_bin(size);

                mi_page_queue_t *prev = pq - 1;

                while ((bin == _mi_bin(prev->block_size)) && (prev > &heap->pages.e0))
                {
                    prev--;
                }

                start = 1 + _mi_wsize_from_size(prev->block_size);

                if (start > idx)
                {
                    start = idx;
                }
            }

            // set size range to the right page
            mi_assert((MI_DEBUG != 0) && (start <= idx));

            for (nuint sz = start; sz <= idx; sz++)
            {
                pages_free[sz] = page;
            }
        }
Esempio n. 28
0
 private static bool mi_page_queue_is_full([NativeTypeName("const mi_page_queue_t*")] mi_page_queue_t *pq) => pq->block_size == (MI_LARGE_OBJ_SIZE_MAX + (2 * SizeOf <nuint>()));
Esempio n. 29
0
 private static bool mi_page_queue_is_empty(mi_page_queue_t *queue) => queue->first == null;
Esempio n. 30
0
 private static bool mi_page_queue_is_special([NativeTypeName("const mi_page_queue_t*")] mi_page_queue_t *pq) => pq->block_size > MI_LARGE_OBJ_SIZE_MAX;