Exemplo n.º 1
0
        // Only called from `mi_heap_absorb`.
        private static partial nuint _mi_page_queue_append(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_queue_t *append)
        {
#pragma warning disable CS0420
            mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(heap, pq));
            mi_assert_internal((MI_DEBUG > 1) && (pq->block_size == append->block_size));

            if (append->first == null)
            {
                return(0);
            }

            // set append pages to new heap and count
            nuint count = 0;

            for (mi_page_t *page = append->first; page != null; page = page->next)
            {
                // inline `mi_page_set_heap` to avoid wrong assertion during absorption;
                // in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.
                mi_atomic_store_release(ref page->xheap, (nuint)heap);

                // set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
                // side effect that it spins until any DELAYED_FREEING is finished. This ensures
                // that after appending only the new heap will be used for delayed free operations.
                _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false);

                count++;
            }

            if (pq->last == null)
            {
                // take over afresh
                mi_assert_internal((MI_DEBUG > 1) && (pq->first == null));

                pq->first = append->first;
                pq->last  = append->last;

                mi_heap_queue_first_update(heap, pq);
            }
            else
            {
                // append to end

                mi_assert_internal((MI_DEBUG > 1) && (pq->last != null));
                mi_assert_internal((MI_DEBUG > 1) && (append->first != null));

                pq->last->next      = append->first;
                append->first->prev = pq->last;

                pq->last = append->last;
            }

            return(count);

#pragma warning restore CS0420
        }
Exemplo n.º 2
0
        // Reallocate but free `p` on errors
        private static void *mi_heap_reallocf(mi_heap_t *heap, void *p, [NativeTypeName("size_t")] nuint newsize)
        {
            void *newp = mi_heap_realloc(heap, p, newsize);

            if ((newp == null) && (p != null))
            {
                mi_free(p);
            }

            return(newp);
        }
Exemplo n.º 3
0
        private static bool mi_heap_page_is_valid(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg1, void *arg2)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG >= 2));
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == heap));

            mi_segment_t *segment = _mi_page_segment(page);

            mi_assert_internal((MI_DEBUG > 1) && (segment->thread_id == heap->thread_id));

            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
            return(true);
        }
Exemplo n.º 4
0
        private static mi_page_queue_t *mi_heap_page_queue_of(mi_heap_t *heap, [NativeTypeName("const mi_page_t*")] mi_page_t *page)
        {
            byte bin = mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->xblock_size);

            mi_assert_internal((MI_DEBUG > 1) && (bin <= MI_BIN_FULL));

            mi_page_queue_t *pq = &heap->pages.e0 + bin;

            mi_assert_internal((MI_DEBUG > 1) && (mi_page_is_in_full(page) || page->xblock_size == pq->block_size));

            return(pq);
        }
Exemplo n.º 5
0
        private static void mi_page_queue_enqueue_from(mi_page_queue_t *to, mi_page_queue_t *from, mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && mi_page_queue_contains(from, page));
            mi_assert_expensive((MI_DEBUG > 2) && !mi_page_queue_contains(to, page));
            mi_assert_internal((MI_DEBUG > 1) && (((page->xblock_size == to->block_size) && page->xblock_size == from->block_size) || ((page->xblock_size == to->block_size) && mi_page_queue_is_full(from)) || ((page->xblock_size == from->block_size) && mi_page_queue_is_full(to)) || ((page->xblock_size > MI_LARGE_OBJ_SIZE_MAX) && mi_page_queue_is_huge(to)) || ((page->xblock_size > MI_LARGE_OBJ_SIZE_MAX) && mi_page_queue_is_full(to))));

            mi_heap_t *heap = mi_page_heap(page);

            if (page->prev != null)
            {
                page->prev->next = page->next;
            }

            if (page->next != null)
            {
                page->next->prev = page->prev;
            }

            if (page == from->last)
            {
                from->last = page->prev;
            }

            if (page == from->first)
            {
                from->first = page->next;
                mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(heap, from));

                // update first
                mi_heap_queue_first_update(heap, from);
            }

            page->prev = to->last;
            page->next = null;

            if (to->last != null)
            {
                mi_assert_internal((MI_DEBUG > 1) && (heap == mi_page_heap(to->last)));

                to->last->next = page;
                to->last       = page;
            }
            else
            {
                to->first = page;
                to->last  = page;

                mi_heap_queue_first_update(heap, to);
            }

            mi_page_set_in_full(page, mi_page_queue_is_full(to));
        }
Exemplo n.º 6
0
        private static void *mi_heap_realloc_zero_aligned_at(mi_heap_t *heap, void *p, [NativeTypeName("size_t")] nuint newsize, [NativeTypeName("size_t")] nuint alignment, [NativeTypeName("size_t")] nuint offset, bool zero)
        {
            mi_assert((MI_DEBUG != 0) && (alignment > 0));

            if (alignment <= SizeOf <nuint>())
            {
                return(_mi_heap_realloc_zero(heap, p, newsize, zero));
            }

            if (p == null)
            {
                return(mi_heap_malloc_zero_aligned_at(heap, newsize, alignment, offset, zero));
            }

            nuint size = mi_usable_size(p);

            if ((newsize <= size) && (newsize >= (size - (size / 2))) && ((((nuint)p + offset) % alignment) == 0))
            {
                // reallocation still fits, is aligned and not more than 50% waste
                return(p);
            }
            else
            {
                void *newp = mi_heap_malloc_aligned_at(heap, newsize, alignment, offset);

                if (newp != null)
                {
                    if (zero && newsize > size)
                    {
                        mi_page_t *page = _mi_ptr_page(newp);

                        if (page->is_zero)
                        {
                            // already zero initialized
                            mi_assert_expensive((MI_DEBUG > 2) && mi_mem_is_zero(newp, newsize));
                        }
                        else
                        {
                            // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
                            nuint start = size >= SizeOf <nuint>() ? size - SizeOf <nuint>() : 0;
                            _ = memset((byte *)newp + start, 0, newsize - start);
                        }
                    }

                    _ = memcpy(newp, p, (newsize > size) ? size : newsize);

                    // only free if successful
                    mi_free(p);
                }

                return(newp);
            }
        }
Exemplo n.º 7
0
        private static partial void *_mi_heap_malloc_zero(mi_heap_t *heap, nuint size, bool zero)
        {
            void *p = mi_heap_malloc(heap, size);

            if (zero && p != null)
            {
                // todo: can we avoid getting the page again?
                _mi_block_zero_init(_mi_ptr_page(p), p, size);
            }

            return(p);
        }
Exemplo n.º 8
0
        /* -----------------------------------------------------------
        *  Heap destroy
        *  ----------------------------------------------------------- */

        private static bool _mi_heap_page_destroy(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg1, void *arg2)
        {
            // ensure no more thread_delayed_free will be added
            _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);

            // stats
            nuint bsize = mi_page_block_size(page);

            if (bsize > MI_LARGE_OBJ_SIZE_MAX)
            {
                if (bsize > MI_HUGE_OBJ_SIZE_MAX)
                {
                    _mi_stat_decrease(ref heap->tld->stats.giant, bsize);
                }
                else
                {
                    _mi_stat_decrease(ref heap->tld->stats.huge, bsize);
                }
            }

            if (MI_STAT > 1)
            {
                // update used count
                _mi_page_free_collect(page, false);

                nuint inuse = page->used;

                if (bsize <= MI_LARGE_OBJ_SIZE_MAX)
                {
                    mi_stat_decrease(ref (&heap->tld->stats.normal.e0)[_mi_bin(bsize)], inuse);
                }

                // todo: off for aligned blocks...
                mi_stat_decrease(ref heap->tld->stats.malloc, bsize * inuse);
            }

            // pretend it is all free now
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_thread_free(page) == null));

            page->used = 0;

            // and free the page
            // mi_page_free(page,false);

            page->next = null;
            page->prev = null;

            _mi_segment_page_free(page, force: false, &heap->tld->segments);

            // keep going
            return(true);
        }
Exemplo n.º 9
0
        public static partial IntPtr mi_heap_get_backing()
        {
            mi_heap_t *heap = (mi_heap_t *)mi_heap_get_default();

            mi_assert_internal((MI_DEBUG > 1) && (heap != null));

            mi_heap_t *bheap = heap->tld->heap_backing;

            mi_assert_internal((MI_DEBUG > 1) && (bheap != null));
            mi_assert_internal((MI_DEBUG > 1) && (bheap->thread_id == _mi_thread_id()));

            return((IntPtr)bheap);
        }
Exemplo n.º 10
0
        private static bool mi_heap_page_check_owned(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *p, void *vfound)
        {
            bool *        found   = (bool *)vfound;
            mi_segment_t *segment = _mi_page_segment(page);

            void *start = _mi_page_start(segment, page, out _);
            void *end   = (byte *)start + (page->capacity * mi_page_block_size(page));

            *found = (p >= start) && (p < end);

            // continue if not found
            return(!*found);
        }
Exemplo n.º 11
0
        private static sbyte *mi_heap_realpath(mi_heap_t *heap, [NativeTypeName("const char*")] sbyte *fname, [NativeTypeName("char*")] sbyte *resolved_name)
        {
            if (IsWindows)
            {
                sbyte *buf = stackalloc sbyte[PATH_MAX];
                uint   res = GetFullPathName(fname, PATH_MAX, (resolved_name == null) ? buf : resolved_name, null);

                if (res == 0)
                {
                    last_errno = (int)GetLastError();
                    return(null);
                }
                else if (res > PATH_MAX)
                {
                    last_errno = EINVAL;
                    return(null);
                }
                else if (resolved_name != null)
                {
                    return(resolved_name);
                }
                else
                {
                    return(mi_heap_strndup(heap, buf, PATH_MAX));
                }
            }
            else
            {
                if (resolved_name != null)
                {
                    return(realpath(fname, resolved_name));
                }
                else
                {
                    nuint  n   = mi_path_max();
                    sbyte *buf = (sbyte *)mi_malloc(n + 1);

                    if (buf == null)
                    {
                        return(null);
                    }

                    sbyte *rname  = realpath(fname, buf);
                    sbyte *result = mi_heap_strndup(heap, rname, n);

                    mi_free(buf);
                    return(result);
                }
            }
        }
Exemplo n.º 12
0
        private static void *mi_heap_realloc_zero_aligned(mi_heap_t *heap, void *p, [NativeTypeName("size_t")] nuint newsize, [NativeTypeName("size_t")] nuint alignment, bool zero)
        {
            mi_assert((MI_DEBUG != 0) && (alignment > 0));

            if (alignment <= SizeOf <nuint>())
            {
                return(_mi_heap_realloc_zero(heap, p, newsize, zero));
            }

            // use offset of previous allocation (p can be null)
            nuint offset = (nuint)p % alignment;

            return(mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, zero));
        }
Exemplo n.º 13
0
        // Retire a page with no more used blocks
        // Important to not retire too quickly though as new
        // allocations might coming.
        // Note: called from `mi_free` and benchmarks often
        // trigger this due to freeing everything and then
        // allocating again so careful when changing this.
        private static partial void _mi_page_retire(mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
            mi_assert_internal((MI_DEBUG > 1) && mi_page_all_free(page));

            mi_page_set_has_aligned(page, false);

            // don't retire too often..
            // (or we end up retiring and re-allocating most of the time)
            // NOTE: refine this more: we should not retire if this
            // is the only page left with free blocks. It is not clear
            // how to check this efficiently though...
            // for now, we don't retire if it is the only page left of this size class.
            mi_page_queue_t *pq = mi_page_queue_of(page);

            if (mi_likely((page->xblock_size <= MI_MAX_RETIRE_SIZE) && !mi_page_is_in_full(page)))
            {
                if (pq->last == page && pq->first == page)
                {
                    // the only page in the queue?
                    mi_stat_counter_increase(ref _mi_stats_main.page_no_retire, 1);

                    page->retire_expire = (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX) ? MI_RETIRE_CYCLES : (byte)(MI_RETIRE_CYCLES / 4);

                    mi_heap_t *heap = mi_page_heap(page);
                    mi_assert_internal((MI_DEBUG > 1) && (pq >= &heap->pages.e0));

                    nuint index = (nuint)(pq - &heap->pages.e0);
                    mi_assert_internal((MI_DEBUG > 1) && index < MI_BIN_FULL && index < MI_BIN_HUGE);

                    if (index < heap->page_retired_min)
                    {
                        heap->page_retired_min = index;
                    }

                    if (index > heap->page_retired_max)
                    {
                        heap->page_retired_max = index;
                    }

                    mi_assert_internal((MI_DEBUG > 1) && mi_page_all_free(page));

                    // dont't free after all
                    return;
                }
            }

            _mi_page_free(page, pq, false);
        }
Exemplo n.º 14
0
        private static mi_page_queue_t *mi_page_queue_of([NativeTypeName("const mi_page_t*")] mi_page_t *page)
        {
            byte       bin  = mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->xblock_size);
            mi_heap_t *heap = mi_page_heap(page);

            mi_assert_internal((MI_DEBUG > 1) && heap != null && bin <= MI_BIN_FULL);

            mi_page_queue_t *pq = &heap->pages.e0 + bin;

            mi_assert_internal((MI_DEBUG > 1) && (bin >= MI_BIN_HUGE || page->xblock_size == pq->block_size));
            mi_assert_expensive((MI_DEBUG > 2) && mi_page_queue_contains(pq, page));

            return(pq);
        }
Exemplo n.º 15
0
        private static void mi_heap_collect_ex(mi_heap_t *heap, mi_collect_t collect)
        {
#pragma warning disable CS0420
            if (heap == null)
            {
                return;
            }

            _mi_deferred_free(heap, collect >= MI_FORCE);

            // note: never reclaim on collect but leave it to threads that need storage to reclaim
            if (((MI_DEBUG == 0) ? (collect == MI_FORCE) : (collect >= MI_FORCE)) && _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim)
            {
                // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
                // if all memory is freed by now, all segments should be freed.
                _mi_abandoned_reclaim_all(heap, &heap->tld->segments);
            }

            // if abandoning, mark all pages to no longer add to delayed_free
            if (collect == MI_ABANDON)
            {
                _ = mi_heap_visit_pages(heap, mi_heap_page_never_delayed_free, null, null);
            }

            // free thread delayed blocks.
            // (if abandoning, after this there are no more thread-delayed references into the pages.)
            _mi_heap_delayed_free(heap);

            // collect retired pages
            _mi_heap_collect_retired(heap, collect >= MI_FORCE);

            // collect all pages owned by this thread
            _ = mi_heap_visit_pages(heap, mi_heap_page_collect, &collect, null);

            mi_assert_internal((MI_DEBUG > 1) && ((collect != MI_ABANDON) || (mi_atomic_load_ptr_acquire <mi_block_t>(ref heap->thread_delayed_free) == null)));

            // collect segment caches
            if (collect >= MI_FORCE)
            {
                _mi_segment_thread_collect(&heap->tld->segments);
            }

            // collect regions on program-exit (or shared library unload)
            if ((collect >= MI_FORCE) && _mi_is_main_thread() && mi_heap_is_backing(heap))
            {
                _mi_mem_collect(&heap->tld->os);
            }
#pragma warning restore CS0420
        }
Exemplo n.º 16
0
        private static mi_page_t *mi_page_fresh(mi_heap_t *heap, mi_page_queue_t *pq)
        {
            mi_assert_internal((MI_DEBUG > 1) && mi_heap_contains_queue(heap, pq));
            mi_page_t *page = mi_page_fresh_alloc(heap, pq, pq->block_size);

            if (page == null)
            {
                return(null);
            }

            mi_assert_internal((MI_DEBUG > 1) && (pq->block_size == mi_page_block_size(page)));
            mi_assert_internal((MI_DEBUG > 1) && (pq == mi_page_queue(heap, mi_page_block_size(page))));

            return(page);
        }
Exemplo n.º 17
0
        private static bool mi_heap_check_owned(mi_heap_t *heap, [NativeTypeName("const void*")] void *p)
        {
            mi_assert((MI_DEBUG != 0) && (heap != null));

            if (((nuint)p & (MI_INTPTR_SIZE - 1)) != 0)
            {
                // only aligned pointers
                return(false);
            }

            bool found = false;

            _ = mi_heap_visit_pages(heap, mi_heap_page_check_owned, (void *)p, &found);
            return(found);
        }
Exemplo n.º 18
0
        public static partial IntPtr mi_heap_new()
        {
            mi_heap_t *bheap = (mi_heap_t *)mi_heap_get_backing();

            // todo: OS allocate in secure mode?
            mi_heap_t *heap = mi_heap_malloc_tp <mi_heap_t>((IntPtr)bheap);

            if (heap == null)
            {
                return(IntPtr.Zero);
            }

            init_mi_heap(heap, bheap->tld, bheap);
            return((IntPtr)heap);
        }
Exemplo n.º 19
0
        private static partial void _mi_page_reclaim(mi_heap_t *heap, mi_page_t *page)
        {
            mi_assert_expensive((MI_DEBUG > 2) && mi_page_is_valid_init(page));
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == heap));
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE));
            mi_assert_internal((MI_DEBUG > 1) && (_mi_page_segment(page)->page_kind != MI_PAGE_HUGE));
            mi_assert_internal((MI_DEBUG > 1) && (!page->is_reset));

            // TODO: push on full queue immediately if it is full?

            mi_page_queue_t *pq = mi_page_queue(heap, mi_page_block_size(page));

            mi_page_queue_push(heap, pq, page);

            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
        }
Exemplo n.º 20
0
        private static sbyte *mi_heap_strdup(mi_heap_t *heap, [NativeTypeName("const char*")] sbyte *s)
        {
            if (s == null)
            {
                return(null);
            }
            nuint n = strlen(s);

            sbyte *t = (sbyte *)mi_heap_malloc(heap, n + 1);

            if (t != null)
            {
                _ = memcpy(t, s, n + 1);
            }
            return(t);
        }
Exemplo n.º 21
0
        // called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources.
        private static void mi_heap_free(mi_heap_t *heap)
        {
            mi_assert((MI_DEBUG != 0) && (heap != null));

            if (mi_heap_is_backing(heap))
            {
                // dont free the backing heap
                return;
            }

            // reset default
            if (mi_heap_is_default(heap))
            {
                _mi_heap_set_default_direct(heap->tld->heap_backing);
            }

            // remove ourselves from the thread local heaps list
            // linear search but we expect the number of heaps to be relatively small

            mi_heap_t *prev = null;
            mi_heap_t *curr = heap->tld->heaps;

            while (curr != heap && curr != null)
            {
                prev = curr;
                curr = curr->next;
            }

            mi_assert_internal((MI_DEBUG > 1) && (curr == heap));

            if (curr == heap)
            {
                if (prev != null)
                {
                    prev->next = heap->next;
                }
                else
                {
                    heap->tld->heaps = heap->next;
                }
            }

            mi_assert_internal((MI_DEBUG > 1) && (heap->tld->heaps != null));

            // and free the used memory
            mi_free(heap);
        }
Exemplo n.º 22
0
        private static bool mi_heap_visit_areas_page(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *vfun, void *arg)
        {
            GCHandle handle            = GCHandle.FromIntPtr((IntPtr)vfun);
            mi_heap_area_visit_fun fun = (mi_heap_area_visit_fun)handle.Target !;

            mi_heap_area_ex_t xarea;
            nuint             bsize = mi_page_block_size(page);

            xarea.page            = page;
            xarea.area.reserved   = page->reserved * bsize;
            xarea.area.committed  = page->capacity * bsize;
            xarea.area.blocks     = _mi_page_start(_mi_page_segment(page), page, out _);
            xarea.area.used       = page->used;
            xarea.area.block_size = bsize;

            return(fun(heap, &xarea, arg));
        }
Exemplo n.º 23
0
        // free retired pages: we don't need to look at the entire queues
        // since we only retire pages that are at the head position in a queue.
        private static partial void _mi_heap_collect_retired(mi_heap_t *heap, bool force)
        {
            nuint min = MI_BIN_FULL;
            nuint max = 0;

            for (nuint bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++)
            {
                mi_page_queue_t *pq   = &heap->pages.e0 + bin;
                mi_page_t *      page = pq->first;

                if ((page != null) && (page->retire_expire != 0))
                {
                    if (mi_page_all_free(page))
                    {
                        page->retire_expire--;

                        if (force || (page->retire_expire == 0))
                        {
                            _mi_page_free(pq->first, pq, force);
                        }
                        else
                        {
                            // keep retired, update min/max

                            if (bin < min)
                            {
                                min = bin;
                            }

                            if (bin > max)
                            {
                                max = bin;
                            }
                        }
                    }
                    else
                    {
                        page->retire_expire = 0;
                    }
                }
            }

            heap->page_retired_min = min;
            heap->page_retired_max = max;
        }
Exemplo n.º 24
0
        private static void mi_heap_destroy(mi_heap_t *heap)
        {
            mi_assert((MI_DEBUG != 0) && (heap != null));
            mi_assert((MI_DEBUG != 0) && heap->no_reclaim);
            mi_assert_expensive((MI_DEBUG > 2) && mi_heap_is_valid(heap));

            if (!heap->no_reclaim)
            {
                // don't free in case it may contain reclaimed pages
                mi_heap_delete(heap);
            }
            else
            {
                // free all pages
                _mi_heap_destroy_pages(heap);
                mi_heap_free(heap);
            }
        }
Exemplo n.º 25
0
        // Safe delete a heap without freeing any still allocated blocks in that heap.
        private static void mi_heap_delete(mi_heap_t *heap)
        {
            mi_assert((MI_DEBUG != 0) && (heap != null));
            mi_assert_expensive((MI_DEBUG > 2) && mi_heap_is_valid(heap));

            if (!mi_heap_is_backing(heap))
            {
                // tranfer still used pages to the backing heap
                mi_heap_absorb(heap->tld->heap_backing, heap);
            }
            else
            {
                // the backing heap abandons its pages
                _mi_heap_collect_abandon(heap);
            }

            mi_assert_internal((MI_DEBUG > 1) && (heap->page_count == 0));
            mi_heap_free(heap);
        }
Exemplo n.º 26
0
        // Visit all heap pages as areas
        private static bool mi_heap_visit_areas([NativeTypeName("const mi_heap_t*")] mi_heap_t *heap, [NativeTypeName("mi_heap_area_visit_fun*")] mi_heap_area_visit_fun visitor, void *arg)
        {
            if (visitor == null)
            {
                return(false);
            }

            // note: function pointer to void* :-{
            GCHandle handle = GCHandle.Alloc(visitor);

            try
            {
                return(mi_heap_visit_pages(heap, mi_heap_visit_areas_page, (void *)GCHandle.ToIntPtr(handle), arg));
            }
            finally
            {
                handle.Free();
            }
        }
Exemplo n.º 27
0
        /* -----------------------------------------------------------
        *  Helpers
        *  ----------------------------------------------------------- */

        // Visit all pages in a heap; returns `false` if break was called.
        private static bool mi_heap_visit_pages(mi_heap_t *heap, [NativeTypeName("heap_page_visitor_fun*")] heap_page_visitor_fun fn, void *arg1, void *arg2)
        {
            if ((heap == null) || (heap->page_count == 0))
            {
                return(false);
            }

            nuint total = 0;

            // visit all pages
            if (MI_DEBUG > 1)
            {
                total = heap->page_count;
            }

            nuint count = 0;

            for (nuint i = 0; i <= MI_BIN_FULL; i++)
            {
                mi_page_queue_t *pq   = &heap->pages.e0 + i;
                mi_page_t *      page = pq->first;

                while (page != null)
                {
                    // save next in case the page gets removed from the queue
                    mi_page_t *next = page->next;

                    mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == heap));

                    count++;

                    if (!fn(heap, pq, page, arg1, arg2))
                    {
                        return(false);
                    }

                    page = next; // and continue
                }
            }

            mi_assert_internal((MI_DEBUG > 1) && (count == total));
            return(true);
        }
Exemplo n.º 28
0
        // Visit all blocks in a heap
        private static bool mi_heap_visit_blocks([NativeTypeName("const mi_heap_t*")] mi_heap_t *heap, bool visit_blocks, [NativeTypeName("mi_block_visit_fun*")] mi_block_visit_fun visitor, void *arg)
        {
            // note: function pointer to void* :-{
            GCHandle handle = GCHandle.Alloc(visitor);

            try
            {
                mi_visit_blocks_args_t args = new mi_visit_blocks_args_t {
                    visit_blocks = visit_blocks,
                    visitor      = (void *)GCHandle.ToIntPtr(handle),
                    arg          = arg,
                };

                return(mi_heap_visit_areas(heap, mi_heap_area_visitor, &args));
            }
            finally
            {
                handle.Free();
            }
        }
Exemplo n.º 29
0
        /* -----------------------------------------------------------
        *  Safe Heap delete
        *  ----------------------------------------------------------- */

        // Tranfer the pages from one heap to the other
        private static void mi_heap_absorb(mi_heap_t *heap, mi_heap_t *from)
        {
            mi_assert_internal((MI_DEBUG > 1) && (heap != null));

            if ((from == null) || (from->page_count == 0))
            {
                return;
            }

            // reduce the size of the delayed frees
            _mi_heap_delayed_free(from);

            // transfer all pages by appending the queues; this will set a new heap field
            // so threads may do delayed frees in either heap for a while.
            // note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
            // so after this only the new heap will get delayed frees

            for (nuint i = 0; i <= MI_BIN_FULL; i++)
            {
                mi_page_queue_t *pq     = &heap->pages.e0 + i;
                mi_page_queue_t *append = &from->pages.e0 + i;

                nuint pcount = _mi_page_queue_append(heap, pq, append);

                heap->page_count += pcount;
                from->page_count -= pcount;
            }

            mi_assert_internal((MI_DEBUG > 1) && (from->page_count == 0));

            // and do outstanding delayed frees in the `from` heap
            // note: be careful here as the `heap` field in all those pages no longer point to `from`,
            // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
            // the regular `_mi_free_delayed_block` which is safe.
            _mi_heap_delayed_free(from);

            mi_assert_internal((MI_DEBUG > 1) && (from->thread_delayed_free == 0));

            // and reset the `from` heap
            mi_heap_reset_pages(from);
        }
Exemplo n.º 30
0
        private static bool mi_heap_area_visitor([NativeTypeName("const mi_heap_t*")] mi_heap_t *heap, [NativeTypeName("const mi_heap_area_ex_t*")] mi_heap_area_ex_t *xarea, void *arg)
        {
            mi_visit_blocks_args_t *args = (mi_visit_blocks_args_t *)arg;

            GCHandle           handle  = GCHandle.FromIntPtr((IntPtr)args->visitor);
            mi_block_visit_fun visitor = (mi_block_visit_fun)handle.Target !;

            if (!visitor((IntPtr)heap, &xarea->area, null, xarea->area.block_size, args->arg))
            {
                return(false);
            }

            if (args->visit_blocks)
            {
                return(mi_heap_area_visit_blocks(xarea, visitor, args->arg));
            }
            else
            {
                return(true);
            }
        }