Ejemplo n.º 1
0
        private static void mi_free_generic([NativeTypeName("const mi_segment_t*")] mi_segment_t *segment, bool local, void *p)
        {
            mi_page_t * page  = _mi_segment_page_of(segment, p);
            mi_block_t *block = mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t *)p;

            _mi_free_block(page, local, block);
        }
Ejemplo n.º 2
0
        private static bool mi_page_is_valid_init(mi_page_t *page)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG >= 3));
            mi_assert_internal((MI_DEBUG > 1) && (page->xblock_size > 0));
            mi_assert_internal((MI_DEBUG > 1) && (page->used <= page->capacity));
            mi_assert_internal((MI_DEBUG > 1) && (page->capacity <= page->reserved));

            nuint bsize = mi_page_block_size(page);

            mi_segment_t *segment = _mi_page_segment(page);
            byte *        start   = _mi_page_start(segment, page, out _);

            mi_assert_internal((MI_DEBUG > 1) && (start == _mi_segment_page_start(segment, page, bsize, out _, out _)));
            // mi_assert_internal((MI_DEBUG > 1) && ((start + page->capacity * page->block_size) == page->top));

            mi_assert_internal((MI_DEBUG > 1) && mi_page_list_is_valid(page, page->free));
            mi_assert_internal((MI_DEBUG > 1) && mi_page_list_is_valid(page, page->local_free));

            mi_block_t *tfree = mi_page_thread_free(page);

            mi_assert_internal((MI_DEBUG > 1) && mi_page_list_is_valid(page, tfree));

            // nuint tfree_count = mi_page_list_count(page, tfree);
            // mi_assert_internal((MI_DEBUG > 1) && (tfree_count <= page->thread_freed + 1));

            nuint free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free);

            mi_assert_internal((MI_DEBUG > 1) && (page->used + free_count == page->capacity));

            return(true);
        }
Ejemplo n.º 3
0
        // Abandon a page with used blocks at the end of a thread.
        // Note: only call if it is ensured that no references exist from
        // the `page->heap->thread_delayed_free` into this page.
        // Currently only called through `mi_heap_collect_ex` which ensures this.
        private static partial void _mi_page_abandon(mi_page_t *page, mi_page_queue_t *pq)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));
            mi_assert_expensive((MI_DEBUG > 2) && _mi_page_is_valid(page));
            mi_assert_internal((MI_DEBUG > 1) && (pq == mi_page_queue_of(page)));
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) != null));

            mi_heap_t *pheap = mi_page_heap(page);

            // remove from our page list
            mi_segments_tld_t *segments_tld = &pheap->tld->segments;

            mi_page_queue_remove(pq, page);

            // page is no longer associated with our heap
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_thread_free_flag(page) == MI_NEVER_DELAYED_FREE));
            mi_page_set_heap(page, null);

            if (MI_DEBUG > 1)
            {
                // check there are no references left..
                for (mi_block_t *block = (mi_block_t *)pheap->thread_delayed_free; block != null; block = mi_block_nextx(pheap, block, &pheap->keys.e0))
                {
                    mi_assert_internal((MI_DEBUG > 1) && (_mi_ptr_page(block) != page));
                }
            }

            // and abandon it
            mi_assert_internal((MI_DEBUG > 1) && (mi_page_heap(page) == null));
            _mi_segment_page_abandon(page, segments_tld);
        }
Ejemplo n.º 4
0
        // Free a block
        public static partial void mi_free(void *p)
        {
            mi_segment_t *segment = mi_checked_ptr_segment(p, "mi_free");

            if (mi_unlikely(segment == null))
            {
                return;
            }

            nuint       tid   = _mi_thread_id();
            mi_page_t * page  = _mi_segment_page_of(segment, p);
            mi_block_t *block = (mi_block_t *)p;

            if (MI_STAT > 1)
            {
                mi_heap_t *heap  = (mi_heap_t *)mi_heap_get_default();
                nuint      bsize = mi_page_usable_block_size(page);

                mi_stat_decrease(ref heap->tld->stats.malloc, bsize);

                if (bsize <= MI_LARGE_OBJ_SIZE_MAX)
                {
                    // huge page stats are accounted for in `_mi_page_retire`
                    mi_stat_decrease(ref (&heap->tld->stats.normal.e0)[_mi_bin(bsize)], 1);
                }
            }

            if (mi_likely((tid == segment->thread_id) && (page->flags.full_aligned == 0)))
            {
                // the thread id matches and it is not a full page, nor has aligned blocks
                // local, and not full or aligned

                if (mi_unlikely(mi_check_is_double_free(page, block)))
                {
                    return;
                }

                mi_check_padding(page, block);

                if (MI_DEBUG != 0)
                {
                    _ = memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
                }

                mi_block_set_next(page, block, page->local_free);
                page->local_free = block;

                if (mi_unlikely(--page->used == 0))
                {
                    // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
                    _mi_page_retire(page);
                }
            }
            else
            {
                // non-local, aligned blocks, or a full page; use the more generic path
                // note: recalc page in generic to improve code generation
                mi_free_generic(segment, tid == segment->thread_id, p);
            }
        }
Ejemplo n.º 5
0
        private static partial bool _mi_free_delayed_block(mi_block_t *block)
        {
            // get segment and page
            mi_segment_t *segment = _mi_ptr_segment(block);

            mi_assert_internal((MI_DEBUG > 1) && (_mi_ptr_cookie(segment) == segment->cookie));
            mi_assert_internal((MI_DEBUG > 1) && (_mi_thread_id() == segment->thread_id));

            mi_page_t *page = _mi_segment_page_of(segment, block);

            // Clear the no-delayed flag so delayed freeing is used again for this page.
            // This must be done before collecting the free lists on this page -- otherwise
            // some blocks may end up in the page `thread_free` list with no blocks in the
            // heap `thread_delayed_free` list which may cause the page to be never freed!
            // (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)
            _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, override_never: false);

            // collect all other non-local frees to ensure up-to-date `used` count
            _mi_page_free_collect(page, false);

            // and free the block (possibly freeing the page as well since used is updated)
            _mi_free_block(page, true, block);

            return(true);
        }
Ejemplo n.º 6
0
        private static nuint _mi_usable_size([NativeTypeName("const void*")] void *p, [NativeTypeName("const char*")] string msg)
        {
            mi_segment_t *segment = mi_checked_ptr_segment(p, msg);

            if (segment == null)
            {
                return(0);
            }

            mi_page_t * page  = _mi_segment_page_of(segment, p);
            mi_block_t *block = (mi_block_t *)p;

            if (mi_unlikely(mi_page_has_aligned(page)))
            {
                block = _mi_page_ptr_unalign(segment, page, p);
                nuint size = mi_page_usable_size_of(page, block);

                nint adjust = (nint)((nuint)p - (nuint)block);

                mi_assert_internal((MI_DEBUG > 1) && (adjust >= 0) && ((nuint)adjust <= size));
                return(size - (nuint)adjust);
            }
            else
            {
                return(mi_page_usable_size_of(page, block));
            }
        }
Ejemplo n.º 7
0
        /* -----------------------------------------------------------
        *  Do any delayed frees
        *  (put there by other threads if they deallocated in a full page)
        *  ----------------------------------------------------------- */
        private static partial void _mi_heap_delayed_free(mi_heap_t *heap)
        {
#pragma warning disable CS0420
            // take over the list (note: no atomic exchange since it is often null)
            nuint block = (nuint)mi_atomic_load_ptr_relaxed <mi_block_t>(ref heap->thread_delayed_free);

            while ((block != 0) && !mi_atomic_cas_ptr_weak_acq_rel <mi_block_t>(ref heap->thread_delayed_free, ref block, null))
            {
                /* nothing */
            }

            // and free them all
            while (block != 0)
            {
                mi_block_t *next = mi_block_nextx(heap, (mi_block_t *)block, &heap->keys.e0);

                // use internal free instead of regular one to keep stats etc correct
                if (!_mi_free_delayed_block((mi_block_t *)block))
                {
                    // we might already start delayed freeing while another thread has not yet
                    // reset the delayed_freeing flag; in that case delay it further by reinserting.
                    nuint dfree = (nuint)mi_atomic_load_ptr_relaxed <mi_block_t>(ref heap->thread_delayed_free);

                    do
                    {
                        mi_block_set_nextx(heap, (mi_block_t *)block, (mi_block_t *)dfree, &heap->keys.e0);
                    }while (!mi_atomic_cas_ptr_weak_release(ref heap->thread_delayed_free, ref dfree, (mi_block_t *)block));
                }

                block = (nuint)next;
            }
#pragma warning restore CS0420
        }
Ejemplo n.º 8
0
        /* -----------------------------------------------------------
        *  Page collect the `local_free` and `thread_free` lists
        *  ----------------------------------------------------------- */

        // Collect the local `thread_free` list using an atomic exchange.
        // Note: The exchange must be done atomically as this is used right after
        // moving to the full list in `mi_page_collect_ex` and we need to
        // ensure that there was no race where the page became unfull just before the move.
        private static void _mi_page_thread_free_collect(mi_page_t *page)
        {
#pragma warning disable CS0420
            mi_block_t *head;

            nuint tfreex;
            nuint tfree = mi_atomic_load_relaxed(ref page->xthread_free);

            do
            {
                head   = mi_tf_block(tfree);
                tfreex = mi_tf_set_block(tfree, null);
            }while (!mi_atomic_cas_weak_acq_rel(ref page->xthread_free, ref tfree, tfreex));

            if (head == null)
            {
                // return if the list is empty
                return;
            }

            // find the tail -- also to get a proper count (without data races)

            // cannot collect more than capacity
            uint max_count = page->capacity;

            uint count = 1;

            mi_block_t *tail = head;
            mi_block_t *next;

            while (((next = mi_block_next(page, tail)) != null) && (count <= max_count))
            {
                count++;
                tail = next;
            }

            if (count > max_count)
            {
                // if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free)
                _mi_error_message(EFAULT, "corrupted thread-free list\n");

                // the thread-free items cannot be freed
                return;
            }

            // and append the current local free list
            mi_block_set_next(page, tail, page->local_free);
            page->local_free = head;

            // update counts now
            page->used -= count;
#pragma warning restore CS0420
        }
Ejemplo n.º 9
0
        private static nuint mi_page_list_count(mi_page_t *page, mi_block_t *head)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG >= 3));

            nuint count = 0;

            while (head != null)
            {
                mi_assert_internal((MI_DEBUG > 1) && (page == _mi_ptr_page(head)));
                count++;
                head = mi_block_next(page, head);
            }

            return(count);
        }
Ejemplo n.º 10
0
        private static bool mi_check_is_double_free([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *block)
        {
            if ((MI_ENCODE_FREELIST != 0) && ((MI_SECURE >= 4) || (MI_DEBUG != 0)))
            {
                // pretend it is freed, and get the decoded first field
                mi_block_t *n = mi_block_nextx(page, block, &page->keys.e0);

                // quick check: aligned pointer && in same page or null
                if ((((nuint)n & (MI_INTPTR_SIZE - 1)) == 0) && ((n == null) || mi_is_in_same_page(block, n)))
                {
                    // Suspicous: decoded value a in block is in the same page (or null) -- maybe a double free?
                    // (continue in separate function to improve code generation)
                    return(mi_check_is_double_freex(page, block));
                }
            }

            return(false);
        }
Ejemplo n.º 11
0
        private static partial void _mi_page_free_collect(mi_page_t *page, bool force)
        {
            mi_assert_internal((MI_DEBUG > 1) && (page != null));

            // collect the thread free list
            if (force || (mi_page_thread_free(page) != null))
            {
                // quick test to avoid an atomic operation
                _mi_page_thread_free_collect(page);
            }

            // and the local free list
            if (page->local_free != null)
            {
                if (mi_likely(page->free == null))
                {
                    // usual case
                    page->free       = page->local_free;
                    page->local_free = null;
                    page->is_zero    = false;
                }
                else if (force)
                {
                    // append -- only on shutdown (force) as this is a linear operation

                    mi_block_t *tail = page->local_free;
                    mi_block_t *next;

                    while ((next = mi_block_next(page, tail)) != null)
                    {
                        tail = next;
                    }

                    mi_block_set_next(page, tail, page->free);
                    page->free = page->local_free;

                    page->local_free = null;
                    page->is_zero    = false;
                }
            }

            mi_assert_internal((MI_DEBUG > 1) && (!force || (page->local_free == null)));
        }
Ejemplo n.º 12
0
        private static bool mi_page_list_is_valid(mi_page_t *page, mi_block_t *p)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_DEBUG >= 3));
            byte *page_area = _mi_page_start(_mi_page_segment(page), page, out nuint psize);

            mi_block_t *start = (mi_block_t *)page_area;
            mi_block_t *end   = (mi_block_t *)(page_area + psize);

            while (p != null)
            {
                if (p < start || p >= end)
                {
                    return(false);
                }
                p = mi_block_next(page, p);
            }

            return(true);
        }
Ejemplo n.º 13
0
        // regular free
        private static void _mi_free_block(mi_page_t *page, bool local, mi_block_t *block)
        {
            // and push it on the free list
            if (mi_likely(local))
            {
                // owning thread can free a block directly

                if (mi_unlikely(mi_check_is_double_free(page, block)))
                {
                    return;
                }

                mi_check_padding(page, block);

                if (MI_DEBUG != 0)
                {
                    _ = memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
                }

                mi_block_set_next(page, block, page->local_free);

                page->local_free = block;
                page->used--;

                if (mi_unlikely(mi_page_all_free(page)))
                {
                    _mi_page_retire(page);
                }
                else if (mi_unlikely(mi_page_is_in_full(page)))
                {
                    _mi_page_unfull(page);
                }
            }
            else
            {
                _mi_free_block_mt(page, block);
            }
        }
Ejemplo n.º 14
0
        /* -----------------------------------------------------------
        *  Visit all heap blocks and areas
        *  Todo: enable visiting abandoned pages, and
        *       enable visiting all blocks of all heaps across threads
        *  ----------------------------------------------------------- */

        private static bool mi_heap_area_visit_blocks([NativeTypeName("const mi_heap_area_ex_t*")] mi_heap_area_ex_t *xarea, [NativeTypeName("mi_block_visit_fun*")] mi_block_visit_fun visitor, void *arg)
        {
            mi_assert((MI_DEBUG != 0) && (xarea != null));

            if (xarea == null)
            {
                return(true);
            }

            mi_heap_area_t *area = &xarea->area;
            mi_page_t *     page = xarea->page;

            mi_assert((MI_DEBUG != 0) && (page != null));

            if (page == null)
            {
                return(true);
            }

            _mi_page_free_collect(page, true);
            mi_assert_internal((MI_DEBUG > 1) && (page->local_free == null));

            if (page->used == 0)
            {
                return(true);
            }

            nuint bsize  = mi_page_block_size(page);
            byte *pstart = _mi_page_start(_mi_page_segment(page), page, out nuint psize);

            if (page->capacity == 1)
            {
                // optimize page with one block
                mi_assert_internal((MI_DEBUG > 1) && page->used == 1 && page->free == null);
                return(visitor((IntPtr)mi_page_heap(page), area, pstart, bsize, arg));
            }

            // create a bitmap of free blocks.

            nuint *free_map = stackalloc nuint[(int)(MI_MAX_BLOCKS / SizeOf <nuint>())];

            _ = memset(free_map, 0, MI_MAX_BLOCKS / SizeOf <nuint>());

            nuint free_count = 0;

            for (mi_block_t *block = page->free; block != null; block = mi_block_next(page, block))
            {
                free_count++;
                mi_assert_internal((MI_DEBUG > 1) && ((byte *)block >= pstart) && ((byte *)block < (pstart + psize)));

                nuint offset = (nuint)block - (nuint)pstart;
                mi_assert_internal((MI_DEBUG > 1) && (offset % bsize == 0));

                // Todo: avoid division?
                nuint blockidx = offset / bsize;

                mi_assert_internal((MI_DEBUG > 1) && (blockidx < MI_MAX_BLOCKS));

                nuint bitidx = blockidx / SizeOf <nuint>();
                nuint bit    = blockidx - (bitidx * SizeOf <nuint>());

                free_map[bitidx] |= (nuint)1 << (int)bit;
            }

            mi_assert_internal((MI_DEBUG > 1) && (page->capacity == (free_count + page->used)));

            // walk through all blocks skipping the free ones
            nuint used_count = 0;

            for (nuint i = 0; i < page->capacity; i++)
            {
                nuint bitidx = i / SizeOf <nuint>();
                nuint bit    = i - (bitidx * SizeOf <nuint>());
                nuint m      = free_map[bitidx];

                if ((bit == 0) && (m == UINTPTR_MAX))
                {
                    // skip a run of free blocks
                    i += SizeOf <nuint>() - 1;
                }
                else if ((m & ((nuint)1 << (int)bit)) == 0)
                {
                    used_count++;
                    byte *block = pstart + (i * bsize);

                    if (!visitor((IntPtr)mi_page_heap(page), area, block, bsize, arg))
                    {
                        return(false);
                    }
                }
            }

            mi_assert_internal((MI_DEBUG > 1) && (page->used == used_count));
            return(true);
        }
Ejemplo n.º 15
0
        private static nuint mi_page_usable_size_of([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *block)
        {
            if ((MI_PADDING > 0) && (MI_ENCODE_FREELIST != 0))
            {
                bool ok = mi_page_decode_padding(page, block, out nuint delta, out nuint bsize);

                mi_assert_internal((MI_DEBUG > 1) && ok);
                mi_assert_internal((MI_DEBUG > 1) && (delta <= bsize));

                return(ok ? (bsize - delta) : 0);
            }
            else
            {
                return(mi_page_usable_block_size(page));
            }
        }
Ejemplo n.º 16
0
        private static void _mi_free_block_mt(mi_page_t *page, mi_block_t *block)
        {
#pragma warning disable CS0420
            // The padding check may access the non-thread-owned page for the key values.
            // that is safe as these are constant and the page won't be freed (as the block is not freed yet).
            mi_check_padding(page, block);

            // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
            mi_padding_shrink(page, block, SizeOf <mi_block_t>());

            if (MI_DEBUG != 0)
            {
                _ = memset(block, MI_DEBUG_FREED, mi_usable_size(block));
            }

            // huge page segments are always abandoned and can be freed immediately
            mi_segment_t *segment = _mi_page_segment(page);

            if (segment->page_kind == MI_PAGE_HUGE)
            {
                _mi_segment_huge_page_free(segment, page, block);
                return;
            }

            // Try to put the block on either the page-local thread free list, or the heap delayed free list.
            nuint tfreex;

            bool  use_delayed;
            nuint tfree = mi_atomic_load_relaxed(ref page->xthread_free);

            do
            {
                use_delayed = mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE;

                if (mi_unlikely(use_delayed))
                {
                    // unlikely: this only happens on the first concurrent free in a page that is in the full list
                    tfreex = mi_tf_set_delayed(tfree, MI_DELAYED_FREEING);
                }
                else
                {
                    // usual: directly add to page thread_free list
                    mi_block_set_next(page, block, mi_tf_block(tfree));
                    tfreex = mi_tf_set_block(tfree, block);
                }
            }while (!mi_atomic_cas_weak_release(ref page->xthread_free, ref tfree, tfreex));

            if (mi_unlikely(use_delayed))
            {
                // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
                mi_heap_t *heap = (mi_heap_t *)mi_atomic_load_acquire(ref page->xheap);

                mi_assert_internal((MI_DEBUG > 1) && (heap != null));

                if (heap != null)
                {
                    // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity)
                    nuint dfree = (nuint)mi_atomic_load_ptr_relaxed <mi_block_t>(ref heap->thread_delayed_free);

                    do
                    {
                        mi_block_set_nextx(heap, block, (mi_block_t *)dfree, &heap->keys.e0);
                    }while (!mi_atomic_cas_ptr_weak_release(ref heap->thread_delayed_free, ref dfree, block));
                }

                // and reset the MI_DELAYED_FREEING flag
                tfree = mi_atomic_load_relaxed(ref page->xthread_free);

                do
                {
                    tfreex = tfree;
                    mi_assert_internal((MI_DEBUG > 1) && (mi_tf_delayed(tfree) == MI_DELAYED_FREEING));
                    tfreex = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE);
                }while (!mi_atomic_cas_weak_release(ref page->xthread_free, ref tfree, tfreex));
            }
#pragma warning restore CS0420
        }
Ejemplo n.º 17
0
        // When a non-thread-local block is freed, it becomes part of the thread delayed free
        // list that is freed later by the owning heap. If the exact usable size is too small to
        // contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
        // so it will later not trigger an overflow error in `mi_free_block`.
        private static void mi_padding_shrink([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *block, [NativeTypeName("const size_t")] nuint min_size)
        {
            if ((MI_PADDING > 0) && (MI_ENCODE_FREELIST != 0))
            {
                bool ok = mi_page_decode_padding(page, block, out nuint delta, out nuint bsize);
                mi_assert_internal((MI_DEBUG > 1) && ok);

                if (!ok || ((bsize - delta) >= min_size))
                {
                    // usually already enough space
                    return;
                }

                mi_assert_internal((MI_DEBUG > 1) && (bsize >= min_size));

                if (bsize < min_size)
                {
                    // should never happen
                    return;
                }

                nuint new_delta = bsize - min_size;
                mi_assert_internal((MI_DEBUG > 1) && (new_delta < bsize));

                mi_padding_t *padding = (mi_padding_t *)((byte *)block + bsize);
                padding->delta = (uint)new_delta;
            }
        }
Ejemplo n.º 18
0
 private static void mi_check_padding([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *block)
 {
     if ((MI_PADDING > 0) && (MI_ENCODE_FREELIST != 0) && !mi_verify_padding(page, block, out nuint size, out nuint wrong))
     {
         _mi_error_message(EFAULT, "buffer overflow in heap block {0:X} of size {1}: write after {2} bytes\n", (nuint)block, size, wrong);
     }
 }
Ejemplo n.º 19
0
        private static bool mi_verify_padding([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *block, [NativeTypeName("size_t*")] out nuint size, [NativeTypeName("size_t*")] out nuint wrong)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_PADDING > 0) && (MI_ENCODE_FREELIST != 0));

            bool ok = mi_page_decode_padding(page, block, out nuint delta, out nuint bsize);

            size = wrong = bsize;

            if (!ok)
            {
                return(false);
            }

            mi_assert_internal((MI_DEBUG > 1) && (bsize >= delta));

            size = bsize - delta;
            byte *fill = (byte *)block + bsize - delta;

            // check at most the first N padding bytes
            nuint maxpad = (delta > MI_MAX_ALIGN_SIZE) ? MI_MAX_ALIGN_SIZE : delta;

            for (nuint i = 0; i < maxpad; i++)
            {
                if (fill[i] != MI_DEBUG_PADDING)
                {
                    wrong = bsize - delta + i;
                    return(false);
                }
            }

            return(true);
        }
Ejemplo n.º 20
0
        // ------------------------------------------------------
        // Check for double free in secure and debug mode
        // This is somewhat expensive so only enabled for secure mode 4
        // ------------------------------------------------------

        // linear check if the free list contains a specific element
        private static bool mi_list_contains([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *list, [NativeTypeName("const mi_block_t*")] mi_block_t *elem)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_ENCODE_FREELIST != 0) && ((MI_SECURE >= 4) || (MI_DEBUG != 0)));

            while (list != null)
            {
                if (elem == list)
                {
                    return(true);
                }
                list = mi_block_next(page, list);
            }

            return(false);
        }
Ejemplo n.º 21
0
        // ---------------------------------------------------------------------------
        // Check for heap block overflow by setting up padding at the end of the block
        // ---------------------------------------------------------------------------

        private static bool mi_page_decode_padding([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *block, [NativeTypeName("size_t*")] out nuint delta, [NativeTypeName("size_t*")] out nuint bsize)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_PADDING > 0) && (MI_ENCODE_FREELIST != 0));

            bsize = mi_page_usable_block_size(page);
            mi_padding_t *padding = (mi_padding_t *)((byte *)block + bsize);

            delta = padding->delta;
            return((unchecked ((uint)mi_ptr_encode(page, block, &page->keys.e0)) == padding->canary) && (delta <= bsize));
        }
Ejemplo n.º 22
0
        // ------------------------------------------------------
        // Allocation
        // ------------------------------------------------------

        // Fast allocation in a page: just pop from the free list.
        // Fall back to generic allocation only if the list is empty.
        private static partial void *_mi_page_malloc(mi_heap_t *heap, mi_page_t *page, nuint size)
        {
            mi_assert_internal((MI_DEBUG > 1) && ((page->xblock_size == 0) || (mi_page_block_size(page) >= size)));
            mi_block_t *block = page->free;

            if (mi_unlikely(block == null))
            {
                return(_mi_malloc_generic(heap, size));
            }

            mi_assert_internal((MI_DEBUG > 1) && block != null && _mi_ptr_page(block) == page);

            // pop from the free list
            page->free = mi_block_next(page, block);

            page->used++;
            mi_assert_internal((MI_DEBUG > 1) && ((page->free == null) || (_mi_ptr_page(page->free) == page)));

            if (MI_DEBUG > 0)
            {
                if (!page->is_zero)
                {
                    _ = memset(block, MI_DEBUG_UNINIT, size);
                }
            }
            else if (MI_SECURE != 0)
            {
                // don't leak internal data
                block->next = 0;
            }

            if (MI_STAT > 1)
            {
                nuint bsize = mi_page_usable_block_size(page);

                if (bsize <= MI_LARGE_OBJ_SIZE_MAX)
                {
                    nuint bin = _mi_bin(bsize);
                    mi_stat_increase(ref (&heap->tld->stats.normal.e0)[bin], 1);
                }
            }

            if ((MI_PADDING > 0) && (MI_ENCODE_FREELIST != 0))
            {
                mi_padding_t *padding = (mi_padding_t *)((byte *)block + mi_page_usable_block_size(page));
                nint          delta   = (nint)((nuint)padding - (nuint)block - (size - MI_PADDING_SIZE));

                mi_assert_internal((MI_DEBUG > 1) && (delta >= 0) && (mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + (nuint)delta)));

                padding->canary = unchecked ((uint)mi_ptr_encode(page, block, &page->keys.e0));
                padding->delta  = (uint)delta;

                byte *fill = (byte *)padding - delta;

                // set at most N initial padding bytes
                nuint maxpad = ((nuint)delta > MI_MAX_ALIGN_SIZE) ? MI_MAX_ALIGN_SIZE : (nuint)delta;

                for (nuint i = 0; i < maxpad; i++)
                {
                    fill[i] = MI_DEBUG_PADDING;
                }
            }

            return(block);
        }
Ejemplo n.º 23
0
        private static bool mi_check_is_double_freex([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *block)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_ENCODE_FREELIST != 0) && ((MI_SECURE >= 4) || (MI_DEBUG != 0)));

            // The decoded value is in the same page (or null).
            // Walk the free lists to verify positively if it is already freed

            if (mi_list_contains(page, page->free, block) || mi_list_contains(page, page->local_free, block) || mi_list_contains(page, mi_page_thread_free(page), block))
            {
                _mi_error_message(EAGAIN, "double free detected of block {0:X} with size {1}\n", (nuint)block, mi_page_block_size(page));
                return(true);
            }

            return(false);
        }