Esempio n. 1
0
        private static void mi_heap_collect_ex(mi_heap_t *heap, mi_collect_t collect)
        {
#pragma warning disable CS0420
            if (heap == null)
            {
                return;
            }

            _mi_deferred_free(heap, collect >= MI_FORCE);

            // note: never reclaim on collect but leave it to threads that need storage to reclaim
            if (((MI_DEBUG == 0) ? (collect == MI_FORCE) : (collect >= MI_FORCE)) && _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim)
            {
                // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
                // if all memory is freed by now, all segments should be freed.
                _mi_abandoned_reclaim_all(heap, &heap->tld->segments);
            }

            // if abandoning, mark all pages to no longer add to delayed_free
            if (collect == MI_ABANDON)
            {
                _ = mi_heap_visit_pages(heap, mi_heap_page_never_delayed_free, null, null);
            }

            // free thread delayed blocks.
            // (if abandoning, after this there are no more thread-delayed references into the pages.)
            _mi_heap_delayed_free(heap);

            // collect retired pages
            _mi_heap_collect_retired(heap, collect >= MI_FORCE);

            // collect all pages owned by this thread
            _ = mi_heap_visit_pages(heap, mi_heap_page_collect, &collect, null);

            mi_assert_internal((MI_DEBUG > 1) && ((collect != MI_ABANDON) || (mi_atomic_load_ptr_acquire <mi_block_t>(ref heap->thread_delayed_free) == null)));

            // collect segment caches
            if (collect >= MI_FORCE)
            {
                _mi_segment_thread_collect(&heap->tld->segments);
            }

            // collect regions on program-exit (or shared library unload)
            if ((collect >= MI_FORCE) && _mi_is_main_thread() && mi_heap_is_backing(heap))
            {
                _mi_mem_collect(&heap->tld->os);
            }
#pragma warning restore CS0420
        }
Esempio n. 2
0
        /* -----------------------------------------------------------
        *  "Collect" pages by migrating `local_free` and `thread_free`
        *  lists and freeing empty pages. This is done when a thread
        *  stops (and in that case abandons pages if there are still
        *  blocks alive)
        *  ----------------------------------------------------------- */

        private static bool mi_heap_page_collect(mi_heap_t *heap, mi_page_queue_t *pq, mi_page_t *page, void *arg_collect, void *arg2)
        {
            mi_assert_internal((MI_DEBUG > 1) && mi_heap_page_is_valid(heap, pq, page, null, null));

            mi_collect_t collect = *(mi_collect_t *)arg_collect;

            _mi_page_free_collect(page, collect >= MI_FORCE);

            if (mi_page_all_free(page))
            {
                // no more used blocks, free the page.
                // note: this will free retired pages as well.
                _mi_page_free(page, pq, collect >= MI_FORCE);
            }
            else if (collect == MI_ABANDON)
            {
                // still used blocks but the thread is done; abandon the page
                _mi_page_abandon(page, pq);
            }

            // don't break
            return(true);
        }