Example #1
0
        private static bool mi_heap_area_visitor([NativeTypeName("const mi_heap_t*")] mi_heap_t *heap, [NativeTypeName("const mi_heap_area_ex_t*")] mi_heap_area_ex_t *xarea, void *arg)
        {
            mi_visit_blocks_args_t *args = (mi_visit_blocks_args_t *)arg;

            GCHandle           handle  = GCHandle.FromIntPtr((IntPtr)args->visitor);
            mi_block_visit_fun visitor = (mi_block_visit_fun)handle.Target !;

            if (!visitor((IntPtr)heap, &xarea->area, null, xarea->area.block_size, args->arg))
            {
                return(false);
            }

            if (args->visit_blocks)
            {
                return(mi_heap_area_visit_blocks(xarea, visitor, args->arg));
            }
            else
            {
                return(true);
            }
        }
Example #2
0
 public static partial bool mi_heap_visit_blocks(IntPtr heap, bool visit_blocks, mi_block_visit_fun visitor, void *arg) => mi_heap_visit_blocks((mi_heap_t *)heap, visit_blocks, visitor, arg);
Example #3
0
        // Visit all blocks in a heap
        private static bool mi_heap_visit_blocks([NativeTypeName("const mi_heap_t*")] mi_heap_t *heap, bool visit_blocks, [NativeTypeName("mi_block_visit_fun*")] mi_block_visit_fun visitor, void *arg)
        {
            // note: function pointer to void* :-{
            GCHandle handle = GCHandle.Alloc(visitor);

            try
            {
                mi_visit_blocks_args_t args = new mi_visit_blocks_args_t {
                    visit_blocks = visit_blocks,
                    visitor      = (void *)GCHandle.ToIntPtr(handle),
                    arg          = arg,
                };

                return(mi_heap_visit_areas(heap, mi_heap_area_visitor, &args));
            }
            finally
            {
                handle.Free();
            }
        }
Example #4
0
        /* -----------------------------------------------------------
        *  Visit all heap blocks and areas
        *  Todo: enable visiting abandoned pages, and
        *       enable visiting all blocks of all heaps across threads
        *  ----------------------------------------------------------- */

        private static bool mi_heap_area_visit_blocks([NativeTypeName("const mi_heap_area_ex_t*")] mi_heap_area_ex_t *xarea, [NativeTypeName("mi_block_visit_fun*")] mi_block_visit_fun visitor, void *arg)
        {
            mi_assert((MI_DEBUG != 0) && (xarea != null));

            if (xarea == null)
            {
                return(true);
            }

            mi_heap_area_t *area = &xarea->area;
            mi_page_t *     page = xarea->page;

            mi_assert((MI_DEBUG != 0) && (page != null));

            if (page == null)
            {
                return(true);
            }

            _mi_page_free_collect(page, true);
            mi_assert_internal((MI_DEBUG > 1) && (page->local_free == null));

            if (page->used == 0)
            {
                return(true);
            }

            nuint bsize  = mi_page_block_size(page);
            byte *pstart = _mi_page_start(_mi_page_segment(page), page, out nuint psize);

            if (page->capacity == 1)
            {
                // optimize page with one block
                mi_assert_internal((MI_DEBUG > 1) && page->used == 1 && page->free == null);
                return(visitor((IntPtr)mi_page_heap(page), area, pstart, bsize, arg));
            }

            // create a bitmap of free blocks.

            nuint *free_map = stackalloc nuint[(int)(MI_MAX_BLOCKS / SizeOf <nuint>())];

            _ = memset(free_map, 0, MI_MAX_BLOCKS / SizeOf <nuint>());

            nuint free_count = 0;

            for (mi_block_t *block = page->free; block != null; block = mi_block_next(page, block))
            {
                free_count++;
                mi_assert_internal((MI_DEBUG > 1) && ((byte *)block >= pstart) && ((byte *)block < (pstart + psize)));

                nuint offset = (nuint)block - (nuint)pstart;
                mi_assert_internal((MI_DEBUG > 1) && (offset % bsize == 0));

                // Todo: avoid division?
                nuint blockidx = offset / bsize;

                mi_assert_internal((MI_DEBUG > 1) && (blockidx < MI_MAX_BLOCKS));

                nuint bitidx = blockidx / SizeOf <nuint>();
                nuint bit    = blockidx - (bitidx * SizeOf <nuint>());

                free_map[bitidx] |= (nuint)1 << (int)bit;
            }

            mi_assert_internal((MI_DEBUG > 1) && (page->capacity == (free_count + page->used)));

            // walk through all blocks skipping the free ones
            nuint used_count = 0;

            for (nuint i = 0; i < page->capacity; i++)
            {
                nuint bitidx = i / SizeOf <nuint>();
                nuint bit    = i - (bitidx * SizeOf <nuint>());
                nuint m      = free_map[bitidx];

                if ((bit == 0) && (m == UINTPTR_MAX))
                {
                    // skip a run of free blocks
                    i += SizeOf <nuint>() - 1;
                }
                else if ((m & ((nuint)1 << (int)bit)) == 0)
                {
                    used_count++;
                    byte *block = pstart + (i * bsize);

                    if (!visitor((IntPtr)mi_page_heap(page), area, block, bsize, arg))
                    {
                        return(false);
                    }
                }
            }

            mi_assert_internal((MI_DEBUG > 1) && (page->used == used_count));
            return(true);
        }
Example #5
0
 public static partial bool mi_heap_visit_blocks([NativeTypeName("const mi_heap_t*")] IntPtr heap, bool visit_blocks, mi_block_visit_fun visitor, void *arg);