コード例 #1
0
        // When a non-thread-local block is freed, it becomes part of the thread delayed free
        // list that is freed later by the owning heap. If the exact usable size is too small to
        // contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
        // so it will later not trigger an overflow error in `mi_free_block`.
        private static void mi_padding_shrink([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *block, [NativeTypeName("const size_t")] nuint min_size)
        {
            if ((MI_PADDING > 0) && (MI_ENCODE_FREELIST != 0))
            {
                bool ok = mi_page_decode_padding(page, block, out nuint delta, out nuint bsize);
                mi_assert_internal((MI_DEBUG > 1) && ok);

                if (!ok || ((bsize - delta) >= min_size))
                {
                    // usually already enough space
                    return;
                }

                mi_assert_internal((MI_DEBUG > 1) && (bsize >= min_size));

                if (bsize < min_size)
                {
                    // should never happen
                    return;
                }

                nuint new_delta = bsize - min_size;
                mi_assert_internal((MI_DEBUG > 1) && (new_delta < bsize));

                mi_padding_t *padding = (mi_padding_t *)((byte *)block + bsize);
                padding->delta = (uint)new_delta;
            }
        }
コード例 #2
0
        // ---------------------------------------------------------------------------
        // Check for heap block overflow by setting up padding at the end of the block
        // ---------------------------------------------------------------------------

        private static bool mi_page_decode_padding([NativeTypeName("const mi_page_t*")] mi_page_t *page, [NativeTypeName("const mi_block_t*")] mi_block_t *block, [NativeTypeName("size_t*")] out nuint delta, [NativeTypeName("size_t*")] out nuint bsize)
        {
            mi_assert_internal((MI_DEBUG > 1) && (MI_PADDING > 0) && (MI_ENCODE_FREELIST != 0));

            bsize = mi_page_usable_block_size(page);
            mi_padding_t *padding = (mi_padding_t *)((byte *)block + bsize);

            delta = padding->delta;
            return((unchecked ((uint)mi_ptr_encode(page, block, &page->keys.e0)) == padding->canary) && (delta <= bsize));
        }
コード例 #3
0
        // ------------------------------------------------------
        // Allocation
        // ------------------------------------------------------

        // Fast allocation in a page: just pop from the free list.
        // Fall back to generic allocation only if the list is empty.
        private static partial void *_mi_page_malloc(mi_heap_t *heap, mi_page_t *page, nuint size)
        {
            mi_assert_internal((MI_DEBUG > 1) && ((page->xblock_size == 0) || (mi_page_block_size(page) >= size)));
            mi_block_t *block = page->free;

            if (mi_unlikely(block == null))
            {
                return(_mi_malloc_generic(heap, size));
            }

            mi_assert_internal((MI_DEBUG > 1) && block != null && _mi_ptr_page(block) == page);

            // pop from the free list
            page->free = mi_block_next(page, block);

            page->used++;
            mi_assert_internal((MI_DEBUG > 1) && ((page->free == null) || (_mi_ptr_page(page->free) == page)));

            if (MI_DEBUG > 0)
            {
                if (!page->is_zero)
                {
                    _ = memset(block, MI_DEBUG_UNINIT, size);
                }
            }
            else if (MI_SECURE != 0)
            {
                // don't leak internal data
                block->next = 0;
            }

            if (MI_STAT > 1)
            {
                nuint bsize = mi_page_usable_block_size(page);

                if (bsize <= MI_LARGE_OBJ_SIZE_MAX)
                {
                    nuint bin = _mi_bin(bsize);
                    mi_stat_increase(ref (&heap->tld->stats.normal.e0)[bin], 1);
                }
            }

            if ((MI_PADDING > 0) && (MI_ENCODE_FREELIST != 0))
            {
                mi_padding_t *padding = (mi_padding_t *)((byte *)block + mi_page_usable_block_size(page));
                nint          delta   = (nint)((nuint)padding - (nuint)block - (size - MI_PADDING_SIZE));

                mi_assert_internal((MI_DEBUG > 1) && (delta >= 0) && (mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + (nuint)delta)));

                padding->canary = unchecked ((uint)mi_ptr_encode(page, block, &page->keys.e0));
                padding->delta  = (uint)delta;

                byte *fill = (byte *)padding - delta;

                // set at most N initial padding bytes
                nuint maxpad = ((nuint)delta > MI_MAX_ALIGN_SIZE) ? MI_MAX_ALIGN_SIZE : (nuint)delta;

                for (nuint i = 0; i < maxpad; i++)
                {
                    fill[i] = MI_DEBUG_PADDING;
                }
            }

            return(block);
        }