Esempio n. 1
0
        /* -----------------------------------------------------------
        *  Thread safe allocation in an arena
        *  ----------------------------------------------------------- */
        private static bool mi_arena_alloc(mi_arena_t *arena, [NativeTypeName("size_t")] nuint blocks, [NativeTypeName("mi_bitmap_index_t*")] out nuint bitmap_idx)
        {
#pragma warning disable CS0420
            nuint fcount = arena->field_count;

            // start from last search
            nuint idx = mi_atomic_load_acquire(ref arena->search_idx);

            for (nuint visited = 0; visited < fcount; visited++, idx++)
            {
                if (idx >= fcount)
                {
                    // wrap around
                    idx = 0;
                }

                // try to atomically claim a range of bits

                if (mi_bitmap_try_find_claim_field(&arena->blocks_inuse.e0, idx, blocks, out bitmap_idx))
                {
                    // start search from here next time
                    mi_atomic_store_release(ref arena->search_idx, idx);
                    return(true);
                }
            }

            bitmap_idx = 0;
            return(false);

#pragma warning restore CS0420
        }
Esempio n. 2
0
        /* -----------------------------------------------------------
        *  Arena free
        *  ----------------------------------------------------------- */

        private static partial void _mi_arena_free(void *p, nuint size, nuint memid, bool all_committed, ref mi_stats_t stats)
        {
#pragma warning disable CS0420
            mi_assert_internal((MI_DEBUG > 1) && (size > 0));

            if (p == null)
            {
                return;
            }

            if (size == 0)
            {
                return;
            }

            if (memid == MI_MEMID_OS)
            {
                // was a direct OS allocation, pass through
                _mi_os_free_ex(p, size, all_committed, ref stats);
            }
            else
            {
                // allocated in an arena

                mi_arena_id_indices(memid, out nuint arena_idx, out nuint bitmap_idx);
                mi_assert_internal((MI_DEBUG > 1) && (arena_idx < MI_MAX_ARENAS));

                mi_arena_t *arena = mi_atomic_load_ptr_relaxed <mi_arena_t>(ref mi_arenas[arena_idx]);
                mi_assert_internal((MI_DEBUG > 1) && (arena != null));

                if (arena == null)
                {
                    _mi_error_message(EINVAL, "trying to free from non-existent arena: {0:X}, size {1}, memid: 0x{2:X}\n", (nuint)p, size, memid);
                    return;
                }

                mi_assert_internal((MI_DEBUG > 1) && (arena->field_count > mi_bitmap_index_field(bitmap_idx)));

                if (arena->field_count <= mi_bitmap_index_field(bitmap_idx))
                {
                    _mi_error_message(EINVAL, "trying to free from non-existent arena block: {0}, size {1}, memid: 0x{2:X}\n", (nuint)p, size, memid);
                    return;
                }

                nuint blocks = mi_block_count_of_size(size);
                bool  ones   = mi_bitmap_unclaim(&arena->blocks_inuse.e0, arena->field_count, blocks, bitmap_idx);

                if (!ones)
                {
                    _mi_error_message(EAGAIN, "trying to free an already freed block: {0:X}, size {1}\n", (nuint)p, size);
                    return;
                }
                ;
            }
#pragma warning restore CS0420
        }
Esempio n. 3
0
        /* -----------------------------------------------------------
        *  Arena Allocation
        *  ----------------------------------------------------------- */

        private static void *mi_arena_alloc_from(mi_arena_t *arena, [NativeTypeName("size_t")] nuint arena_index, [NativeTypeName("size_t")] nuint needed_bcount, [NativeTypeName("bool*")] ref bool commit, [NativeTypeName("bool*")] ref bool large, [NativeTypeName("bool*")] out bool is_zero, [NativeTypeName("size_t*")] out nuint memid, mi_os_tld_t *tld)
        {
            if (!mi_arena_alloc(arena, needed_bcount, out nuint bitmap_index))
            {
                memid   = 0;
                is_zero = false;
                return(null);
            }

            // claimed it! set the dirty bits (todo: no need for an atomic op here?)
            void *p = (void *)(arena->start + (mi_bitmap_index_bit(bitmap_index) * MI_ARENA_BLOCK_SIZE));

            memid   = mi_arena_id_create(arena_index, bitmap_index);
            is_zero = mi_bitmap_claim(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, out _);
            large   = arena->is_large;

            if (arena->is_committed)
            {
                // always committed
                commit = true;
            }
            else if (commit)
            {
                // arena not committed as a whole, but commit requested: ensure commit now
                _ = mi_bitmap_claim(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, out bool any_uncommitted);

                if (any_uncommitted)
                {
                    _ = _mi_os_commit(p, needed_bcount * MI_ARENA_BLOCK_SIZE, out bool commit_zero, ref *tld->stats);

                    if (commit_zero)
                    {
                        is_zero = true;
                    }
                }
            }
            else
            {
                // no need to commit, but check if already fully committed
                commit = mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
            }

            return(p);
        }
Esempio n. 4
0
        /* -----------------------------------------------------------
        *  Add an arena.
        *  ----------------------------------------------------------- */

        private static bool mi_arena_add(mi_arena_t *arena)
        {
#pragma warning disable CS0420
            mi_assert_internal((MI_DEBUG > 1) && (arena != null));
            mi_assert_internal((MI_DEBUG > 1) && (((nuint)mi_atomic_load_ptr_relaxed <byte>(ref arena->start) % MI_SEGMENT_ALIGN) == 0));
            mi_assert_internal((MI_DEBUG > 1) && (arena->block_count > 0));

            nuint i = mi_atomic_increment_acq_rel(ref mi_arena_count);

            if (i >= MI_MAX_ARENAS)
            {
                _ = mi_atomic_decrement_acq_rel(ref mi_arena_count);
                return(false);
            }

            mi_atomic_store_ptr_release <mi_arena_t>(ref mi_arenas[i], arena);
            return(true);

#pragma warning restore CS0420
        }
Esempio n. 5
0
        /* -----------------------------------------------------------
        *  Reserve a huge page arena.
        *  ----------------------------------------------------------- */

        // reserve at a specific numa node
        public static partial int mi_reserve_huge_os_pages_at(nuint pages, int numa_node, nuint timeout_msecs)
        {
#pragma warning disable CS0420
            if (pages == 0)
            {
                return(0);
            }

            if (numa_node < -1)
            {
                numa_node = -1;
            }

            if (numa_node >= 0)
            {
                numa_node %= (int)_mi_os_numa_node_count();
            }

            void *p = _mi_os_alloc_huge_os_pages(pages, numa_node, (long)timeout_msecs, out nuint pages_reserved, out nuint hsize);

            if ((p == null) || (pages_reserved == 0))
            {
                _mi_warning_message("failed to reserve {0} gb huge pages\n", pages);
                return(ENOMEM);
            }

            _mi_verbose_message("numa node {0}: reserved {1} gb huge pages (of the {2} gb requested)\n", numa_node, pages_reserved, pages);

            nuint bcount = mi_block_count_of_size(hsize);
            nuint fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
            nuint asize  = SizeOf <mi_arena_t>() + (2 * fields * SizeOf <nuint>());

            // TODO: can we avoid allocating from the OS?
            mi_arena_t *arena = (mi_arena_t *)_mi_os_alloc(asize, ref _mi_stats_main);

            if (arena == null)
            {
                _mi_os_free_huge_pages(p, hsize, ref _mi_stats_main);
                return(ENOMEM);
            }

            arena->block_count = bcount;
            arena->field_count = fields;

            arena->start = (nuint)p;

            // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
            arena->numa_node = numa_node;

            arena->is_large     = true;
            arena->is_zero_init = true;
            arena->is_committed = true;
            arena->search_idx   = 0;

            // just after inuse bitmap
            arena->blocks_dirty = &arena->blocks_inuse.e0 + fields;

            arena->blocks_committed = null;

            // the bitmaps are already zero initialized due to os_alloc
            // just claim leftover blocks if needed

            nint post = (nint)((fields * MI_BITMAP_FIELD_BITS) - bcount);
            mi_assert_internal((MI_DEBUG > 1) && (post >= 0));

            if (post > 0)
            {
                // don't use leftover bits at the end
                nuint postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - (nuint)post);
                _ = mi_bitmap_claim(&arena->blocks_inuse.e0, fields, (nuint)post, postidx, out _);
            }

            _ = mi_arena_add(arena);
            return(0);

#pragma warning restore CS0420
        }
Esempio n. 6
0
        private static partial void *_mi_arena_alloc_aligned(nuint size, nuint alignment, ref bool commit, ref bool large, out bool is_zero, out nuint memid, mi_os_tld_t *tld)
        {
            mi_assert_internal((MI_DEBUG > 1) && (tld != null));
            mi_assert_internal((MI_DEBUG > 1) && (size > 0));

            memid   = MI_MEMID_OS;
            is_zero = false;

            // try to allocate in an arena if the alignment is small enough
            // and the object is not too large or too small.

            if ((alignment <= MI_SEGMENT_ALIGN) && (size <= MI_ARENA_MAX_OBJ_SIZE) && (size >= MI_ARENA_MIN_OBJ_SIZE))
            {
                nuint bcount = mi_block_count_of_size(size);

                // current numa node
                int numa_node = _mi_os_numa_node(tld);

                mi_assert_internal((MI_DEBUG > 1) && (size <= (bcount * MI_ARENA_BLOCK_SIZE)));

                // try numa affine allocation
                for (nuint i = 0; i < MI_MAX_ARENAS; i++)
                {
                    mi_arena_t *arena = mi_atomic_load_ptr_relaxed <mi_arena_t>(ref mi_arenas[i]);

                    if (arena == null)
                    {
                        // end reached
                        break;
                    }

                    // numa local, large OS pages allowed, or arena is not large OS pages

                    if (((arena->numa_node < 0) || (arena->numa_node == numa_node)) && (large || !arena->is_large))
                    {
                        void *p = mi_arena_alloc_from(arena, i, bcount, ref commit, ref large, out is_zero, out memid, tld);
                        mi_assert_internal((MI_DEBUG > 1) && (((nuint)p % alignment) == 0));

                        if (p != null)
                        {
                            return(p);
                        }
                    }
                }

                // try from another numa node instead..

                for (nuint i = 0; i < MI_MAX_ARENAS; i++)
                {
                    mi_arena_t *arena = mi_atomic_load_ptr_relaxed <mi_arena_t>(ref mi_arenas[i]);

                    if (arena == null)
                    {
                        // end reached
                        break;
                    }

                    // not numa local, large OS pages allowed, or arena is not large OS pages
                    if ((arena->numa_node >= 0) && (arena->numa_node != numa_node) && (large || !arena->is_large))
                    {
                        void *p = mi_arena_alloc_from(arena, i, bcount, ref commit, ref large, out is_zero, out memid, tld);
                        mi_assert_internal((MI_DEBUG > 1) && (((nuint)p % alignment) == 0));

                        if (p != null)
                        {
                            return(p);
                        }
                    }
                }
            }

            // finally, fall back to the OS

            is_zero = true;
            memid   = MI_MEMID_OS;

            return(_mi_os_alloc_aligned(size, alignment, commit, ref large, tld));
        }