Exemplo n.º 1
0
        private void MultiThread(
            Action <ConcurrentHashmapOfKeys, ExpandableArrayOfKeys, ulong, ulong, ulong> a,
            ConcurrentHashmapOfKeys map, ExpandableArrayOfKeys keys, int nThreads, ulong count, ulong offset)
        {
            var tasks = new Task[nThreads];

            var first = offset;

            for (var i = 0; i < tasks.Length; i++)
            {
                var first1 = first;
                tasks[i] = new Task(() => a(map, keys, first1, count, offset));
                first   += count;
            }

            foreach (var task in tasks)
            {
                task.Start();
            }

            foreach (var task in tasks)
            {
                task.Wait();
            }
        }
Exemplo n.º 2
0
        private unsafe void ReadAction(ConcurrentHashmapOfKeys map, ExpandableArrayOfKeys keys, ulong first, ulong count, ulong offset)
        {
            var watch = Stopwatch.StartNew();

            for (var k = 0; k < 10000; k++)
            {
                for (var i = first; i < first + count; i++)
                {
                    ulong val = 0;
                    if (!map.TryGetValue(keys.GetAt(i - offset), ref val))
                    {
                        throw new Exception("Failed to get at " + i + ", offset from " + offset);
                    }

                    if (val != i)
                    {
                        throw new Exception("Failed to validate at " + i + ", offset from " + offset);
                    }

                    //Console.WriteLine("Added {0} at {1}", val, i);
                }
            }

            watch.Stop();
            //Console.WriteLine("Elapsed: {0}, for {1}, {2}", watch.ElapsedMilliseconds, first, count);
        }
        public DocumentDataContainer(
            DataContainerDescriptor dataContainerDescriptor,
            DocumentTypeDescriptor documentTypeDescriptor,
            IUnmanagedAllocator allocator,
            ITracer tracer)
        {
            if (tracer == null)
            {
                throw new ArgumentNullException("tracer");
            }

            if (dataContainerDescriptor == null)
            {
                throw new ArgumentNullException("dataContainerDescriptor");
            }

            if (documentTypeDescriptor == null)
            {
                throw new ArgumentNullException("documentTypeDescriptor");
            }

            if (allocator == null)
            {
                throw new ArgumentNullException("allocator");
            }

            m_logger = tracer;

            m_allocator             = allocator;
            DocDesc                 = documentTypeDescriptor;
            DataContainerDescriptor = dataContainerDescriptor;

            ColumnStores         = new ColumnDataBase[DocDesc.Fields.Length];
            DocumentKeys         = new ExpandableArrayOfKeys(m_allocator);
            FieldIdToColumnStore = new Dictionary <int, int>(ColumnStores.Length * 2);
            PrimaryKeyFieldId    = dataContainerDescriptor.RequireField(documentTypeDescriptor.DocumentType, documentTypeDescriptor.PrimaryKeyFieldName).FieldId;

            for (var i = 0; i < DocDesc.Fields.Length; i++)
            {
                var field = dataContainerDescriptor.RequireField(DocDesc.Fields[i]);
                ColumnStores[i] = CreateColumnStore(field.DbType, m_allocator, null);
                FieldIdToColumnStore.Add(field.FieldId, i);
            }

            DocumentIdToIndex    = new ConcurrentHashmapOfKeys(m_allocator);
            ValidDocumentsBitmap = new BitVector(m_allocator);
            SortIndexManager     = new SortIndexManager(this);
            StructureLock        = new ReaderWriterLockSlim(LockRecursionPolicy.SupportsRecursion);
        }
        public unsafe void Test()
        {
            const ulong nThreads = 4;
            const ulong offset = 0;
            const ulong count = 15000000;

            for (var loop = 0; loop < 20; loop++)
            {
                using (var keys = GenerateKeys(nThreads * count))
                {
                    Console.WriteLine("Generated " + count * nThreads);
                    using (var map = new ConcurrentHashmapOfKeys(_pool))
                    {
                        MultiThread(InsertAndReadAction, map, keys, (int) nThreads, count, offset);
                        //MultiThread(ReadAction, map, keys, (int) nThreads, count, offset);
                        //Console.WriteLine("Inserted " + count * nThreads);
                    }

                        //TestPersistence(count, keys);

                        Console.WriteLine("Completed test");
                }

                //_pool.Recycle();
                //_pool.DeallocateGarbage();
                Console.WriteLine("Deallocated garbage");
            }

            Console.WriteLine("Disposed keys");
            //Console.ReadLine();
            Console.WriteLine("Disposed map");
            //Console.ReadLine();

            _pool.DeallocateGarbage();
            Console.WriteLine("Deallocated garbage");
            Console.ReadLine();

            //_pool.Recycle();
            //Console.WriteLine("Recycled pool");
            //Console.ReadLine();
            _pool.Dispose();
            Console.WriteLine("Disposed pool");
            Console.ReadLine();
        }
Exemplo n.º 5
0
        public unsafe void Test()
        {
            const ulong nThreads = 4;
            const ulong offset   = 0;
            const ulong count    = 15000000;

            for (var loop = 0; loop < 20; loop++)
            {
                using (var keys = GenerateKeys(nThreads * count))
                {
                    Console.WriteLine("Generated " + count * nThreads);
                    using (var map = new ConcurrentHashmapOfKeys(_pool))
                    {
                        MultiThread(InsertAndReadAction, map, keys, (int)nThreads, count, offset);
                        //MultiThread(ReadAction, map, keys, (int) nThreads, count, offset);
                        //Console.WriteLine("Inserted " + count * nThreads);
                    }

                    //TestPersistence(count, keys);

                    Console.WriteLine("Completed test");
                }

                //_pool.Recycle();
                //_pool.DeallocateGarbage();
                Console.WriteLine("Deallocated garbage");
            }

            Console.WriteLine("Disposed keys");
            //Console.ReadLine();
            Console.WriteLine("Disposed map");
            //Console.ReadLine();

            _pool.DeallocateGarbage();
            Console.WriteLine("Deallocated garbage");
            Console.ReadLine();

            //_pool.Recycle();
            //Console.WriteLine("Recycled pool");
            //Console.ReadLine();
            _pool.Dispose();
            Console.WriteLine("Disposed pool");
            Console.ReadLine();
        }
        private unsafe void BasicMemoryAction(ConcurrentHashmapOfKeys map, ExpandableArrayOfKeys keys, ulong first, ulong count, ulong offset)
        {
            for (var k = first; k < first + count; k++)
            {
                using (var map2 = new ConcurrentHashmapOfKeys(_pool))
                {
                    map2.TryAdd(keys.GetAt(0), 0);
                }
            }

            //var b = stackalloc void*[(int)100000];
            //for (var k = 0; k < 10000000000; k++)
            //{
            //    for (var i = 0; i < 100000; i++)
            //    {
            //        b[i] = _pool.Alloc((ulong) i % 10000);
            //    }
            //    for (var i = 0; i < 100000; i++)
            //    {
            //        _pool.Free(b[i]);
            //    }
            //}
        }
Exemplo n.º 7
0
        private unsafe void BasicMemoryAction(ConcurrentHashmapOfKeys map, ExpandableArrayOfKeys keys, ulong first, ulong count, ulong offset)
        {
            for (var k = first; k < first + count; k++)
            {
                using (var map2 = new ConcurrentHashmapOfKeys(_pool))
                {
                    map2.TryAdd(keys.GetAt(0), 0);
                }
            }

            //var b = stackalloc void*[(int)100000];
            //for (var k = 0; k < 10000000000; k++)
            //{
            //    for (var i = 0; i < 100000; i++)
            //    {
            //        b[i] = _pool.Alloc((ulong) i % 10000);
            //    }
            //    for (var i = 0; i < 100000; i++)
            //    {
            //        _pool.Free(b[i]);
            //    }
            //}
        }
        private unsafe void ReadAction(ConcurrentHashmapOfKeys map, ExpandableArrayOfKeys keys, ulong first, ulong count, ulong offset)
        {
            var watch = Stopwatch.StartNew();

            for (var k = 0; k < 10000; k++ )
                for (var i = first; i < first + count; i++)
                {
                    ulong val = 0;
                    if (!map.TryGetValue(keys.GetAt(i - offset), ref val))
                    {
                        throw new Exception("Failed to get at " + i + ", offset from " + offset);
                    }

                    if (val != i)
                    {
                        throw new Exception("Failed to validate at " + i + ", offset from " + offset);
                    }

                    //Console.WriteLine("Added {0} at {1}", val, i);
                }

            watch.Stop();
            //Console.WriteLine("Elapsed: {0}, for {1}, {2}", watch.ElapsedMilliseconds, first, count);
        }
        private void MultiThread(
            Action<ConcurrentHashmapOfKeys, ExpandableArrayOfKeys, ulong, ulong, ulong> a,
            ConcurrentHashmapOfKeys map, ExpandableArrayOfKeys keys, int nThreads, ulong count, ulong offset)
        {
            var tasks = new Task[nThreads];

            var first = offset;

            for (var i = 0; i < tasks.Length; i++)
            {
                var first1 = first;
                tasks[i] = new Task(() => a(map, keys, first1, count, offset));
                first += count;
            }

            foreach (var task in tasks)
            {
                task.Start();
            }

            foreach (var task in tasks)
            {
                task.Wait();
            }
        }
        public DocumentDataContainer(
            DataContainerDescriptor dataContainerDescriptor,
            DocumentTypeDescriptor documentTypeDescriptor,
            IUnmanagedAllocator allocator,
            ITracer tracer)
        {
            if (tracer == null)
            {
                throw new ArgumentNullException("tracer");
            }

            if (dataContainerDescriptor == null)
            {
                throw new ArgumentNullException("dataContainerDescriptor");
            }

            if (documentTypeDescriptor == null)
            {
                throw new ArgumentNullException("documentTypeDescriptor");
            }

            if (allocator == null)
            {
                throw new ArgumentNullException("allocator");
            }

            m_logger = tracer;

            m_allocator = allocator;
            DocDesc = documentTypeDescriptor;
            DataContainerDescriptor = dataContainerDescriptor;

            ColumnStores = new ColumnDataBase[DocDesc.Fields.Length];
            DocumentKeys = new ExpandableArrayOfKeys(m_allocator);
            FieldIdToColumnStore = new Dictionary<int, int>(ColumnStores.Length * 2);
            PrimaryKeyFieldId = dataContainerDescriptor.RequireField(documentTypeDescriptor.DocumentType, documentTypeDescriptor.PrimaryKeyFieldName).FieldId;

            for (var i = 0; i < DocDesc.Fields.Length; i++)
            {
                var field = dataContainerDescriptor.RequireField(DocDesc.Fields[i]);
                ColumnStores[i] = CreateColumnStore(field.DbType, m_allocator, null);
                FieldIdToColumnStore.Add(field.FieldId, i);
            }

            DocumentIdToIndex = new ConcurrentHashmapOfKeys(m_allocator);
            ValidDocumentsBitmap = new BitVector(m_allocator);
            SortIndexManager = new SortIndexManager(this);
            StructureLock = new ReaderWriterLockSlim(LockRecursionPolicy.SupportsRecursion);
        }
        /// <summary>
        /// Reconstructs all unmanaged data in the new pool.
        /// Side effect is that all fragmentation is removed.
        /// </summary>
        /// <param name="newpool">The new memory pool to use.</param>
        public void MigrateRAM(IUnmanagedAllocator newpool)
        {
            CheckState();

            var vdb = ValidDocumentsBitmap;
            var dk = DocumentKeys;
            var diti = DocumentIdToIndex;
            var colstores = ColumnStores.ToArray();

            StructureLock.EnterWriteLock();
            try
            {
                // generate a new copy of the data
                var tasks = new List<Task>();

                tasks.Add(new Task<BitVector>(() => new BitVector(ValidDocumentsBitmap, newpool)));
                tasks.Add(new Task<ExpandableArrayOfKeys>(() => new ExpandableArrayOfKeys(DocumentKeys, newpool)));

                foreach (var c in ColumnStores)
                {
                    tasks.Add(new Task<ColumnDataBase>(o => CreateColumnStore(((ColumnDataBase)o).DbType, newpool, (ColumnDataBase)o), c));
                }

                foreach (var t in tasks)
                {
                    t.Start();
                }

                Task.WaitAll(tasks.ToArray());

                var newvdb = ((Task<BitVector>) tasks[0]).Result;
                var newdk = ((Task<ExpandableArrayOfKeys>) tasks[1]).Result;
                var newditi = new ConcurrentHashmapOfKeys(DocumentIdToIndex, newdk, newpool);

                // now, since no exception was thrown, let's consume results and dispose of old structures
                try
                {
                    ValidDocumentsBitmap = newvdb;
                    DocumentKeys = newdk;
                    DocumentIdToIndex = newditi;

                    for (var i = 2; i < tasks.Count; i++)
                    {
                        ColumnStores[i-2] = ((Task<ColumnDataBase>)tasks[i]).Result;
                    }

                    vdb.Dispose();
                    dk.Dispose();
                    diti.Dispose();

                    foreach (var c in colstores)
                    {
                        c.Dispose();
                    }
                }
                catch
                {
                    m_stateBroken = true;
                    throw;
                }

                m_allocator = newpool;
            }
            finally
            {
                StructureLock.ExitWriteLock();
            }
        }
        /// <summary>
        /// Reconstructs all unmanaged data in the new pool.
        /// Side effect is that all fragmentation is removed.
        /// </summary>
        /// <param name="newpool">The new memory pool to use.</param>
        public void MigrateRAM(IUnmanagedAllocator newpool)
        {
            CheckState();

            var vdb       = ValidDocumentsBitmap;
            var dk        = DocumentKeys;
            var diti      = DocumentIdToIndex;
            var colstores = ColumnStores.ToArray();

            StructureLock.EnterWriteLock();
            try
            {
                // generate a new copy of the data
                var tasks = new List <Task>();

                tasks.Add(new Task <BitVector>(() => new BitVector(ValidDocumentsBitmap, newpool)));
                tasks.Add(new Task <ExpandableArrayOfKeys>(() => new ExpandableArrayOfKeys(DocumentKeys, newpool)));

                foreach (var c in ColumnStores)
                {
                    tasks.Add(new Task <ColumnDataBase>(o => CreateColumnStore(((ColumnDataBase)o).DbType, newpool, (ColumnDataBase)o), c));
                }

                foreach (var t in tasks)
                {
                    t.Start();
                }

                Task.WaitAll(tasks.ToArray());

                var newvdb  = ((Task <BitVector>)tasks[0]).Result;
                var newdk   = ((Task <ExpandableArrayOfKeys>)tasks[1]).Result;
                var newditi = new ConcurrentHashmapOfKeys(DocumentIdToIndex, newdk, newpool);

                // now, since no exception was thrown, let's consume results and dispose of old structures
                try
                {
                    ValidDocumentsBitmap = newvdb;
                    DocumentKeys         = newdk;
                    DocumentIdToIndex    = newditi;

                    for (var i = 2; i < tasks.Count; i++)
                    {
                        ColumnStores[i - 2] = ((Task <ColumnDataBase>)tasks[i]).Result;
                    }

                    vdb.Dispose();
                    dk.Dispose();
                    diti.Dispose();

                    foreach (var c in colstores)
                    {
                        c.Dispose();
                    }
                }
                catch
                {
                    m_stateBroken = true;
                    throw;
                }

                m_allocator = newpool;
            }
            finally
            {
                StructureLock.ExitWriteLock();
            }
        }