Пример #1
0
        private void TestPersistence(ulong count, ExpandableArrayOfKeys keys)
        {
            using (var validEntries = new BitVector(_pool))
            {
                validEntries.EnsureCapacity(count);
                validEntries.ChangeAll(true);

                using (var stream = new FileStream(@"c:\temp\data.d", FileMode.Create, FileAccess.ReadWrite))
                {
                    using (var writer = new BinaryWriter(stream))
                    {
                        keys.Write(writer, (ulong)count, validEntries);
                    }
                }

                using (var keys2 = new ExpandableArrayOfKeys(_pool))
                {
                    using (var stream = new FileStream(@"c:\temp\data.d", FileMode.Open, FileAccess.Read))
                    {
                        using (var reader = new BinaryReader(stream))
                        {
                            {
                                keys2.Read(reader, (ulong)count, validEntries);
                            }
                        }
                    }
                }
            }
        }
Пример #2
0
        private void MultiThread(
            Action <ConcurrentHashmapOfKeys, ExpandableArrayOfKeys, ulong, ulong, ulong> a,
            ConcurrentHashmapOfKeys map, ExpandableArrayOfKeys keys, int nThreads, ulong count, ulong offset)
        {
            var tasks = new Task[nThreads];

            var first = offset;

            for (var i = 0; i < tasks.Length; i++)
            {
                var first1 = first;
                tasks[i] = new Task(() => a(map, keys, first1, count, offset));
                first   += count;
            }

            foreach (var task in tasks)
            {
                task.Start();
            }

            foreach (var task in tasks)
            {
                task.Wait();
            }
        }
Пример #3
0
        private unsafe void ReadAction(ConcurrentHashmapOfKeys map, ExpandableArrayOfKeys keys, ulong first, ulong count, ulong offset)
        {
            var watch = Stopwatch.StartNew();

            for (var k = 0; k < 10000; k++)
            {
                for (var i = first; i < first + count; i++)
                {
                    ulong val = 0;
                    if (!map.TryGetValue(keys.GetAt(i - offset), ref val))
                    {
                        throw new Exception("Failed to get at " + i + ", offset from " + offset);
                    }

                    if (val != i)
                    {
                        throw new Exception("Failed to validate at " + i + ", offset from " + offset);
                    }

                    //Console.WriteLine("Added {0} at {1}", val, i);
                }
            }

            watch.Stop();
            //Console.WriteLine("Elapsed: {0}, for {1}, {2}", watch.ElapsedMilliseconds, first, count);
        }
        public DocumentDataContainer(
            DataContainerDescriptor dataContainerDescriptor,
            DocumentTypeDescriptor documentTypeDescriptor,
            IUnmanagedAllocator allocator,
            ITracer tracer)
        {
            if (tracer == null)
            {
                throw new ArgumentNullException("tracer");
            }

            if (dataContainerDescriptor == null)
            {
                throw new ArgumentNullException("dataContainerDescriptor");
            }

            if (documentTypeDescriptor == null)
            {
                throw new ArgumentNullException("documentTypeDescriptor");
            }

            if (allocator == null)
            {
                throw new ArgumentNullException("allocator");
            }

            m_logger = tracer;

            m_allocator             = allocator;
            DocDesc                 = documentTypeDescriptor;
            DataContainerDescriptor = dataContainerDescriptor;

            ColumnStores         = new ColumnDataBase[DocDesc.Fields.Length];
            DocumentKeys         = new ExpandableArrayOfKeys(m_allocator);
            FieldIdToColumnStore = new Dictionary <int, int>(ColumnStores.Length * 2);
            PrimaryKeyFieldId    = dataContainerDescriptor.RequireField(documentTypeDescriptor.DocumentType, documentTypeDescriptor.PrimaryKeyFieldName).FieldId;

            for (var i = 0; i < DocDesc.Fields.Length; i++)
            {
                var field = dataContainerDescriptor.RequireField(DocDesc.Fields[i]);
                ColumnStores[i] = CreateColumnStore(field.DbType, m_allocator, null);
                FieldIdToColumnStore.Add(field.FieldId, i);
            }

            DocumentIdToIndex    = new ConcurrentHashmapOfKeys(m_allocator);
            ValidDocumentsBitmap = new BitVector(m_allocator);
            SortIndexManager     = new SortIndexManager(this);
            StructureLock        = new ReaderWriterLockSlim(LockRecursionPolicy.SupportsRecursion);
        }
        private unsafe void BasicMemoryAction(ConcurrentHashmapOfKeys map, ExpandableArrayOfKeys keys, ulong first, ulong count, ulong offset)
        {
            for (var k = first; k < first + count; k++)
            {
                using (var map2 = new ConcurrentHashmapOfKeys(_pool))
                {
                    map2.TryAdd(keys.GetAt(0), 0);
                }
            }

            //var b = stackalloc void*[(int)100000];
            //for (var k = 0; k < 10000000000; k++)
            //{
            //    for (var i = 0; i < 100000; i++)
            //    {
            //        b[i] = _pool.Alloc((ulong) i % 10000);
            //    }
            //    for (var i = 0; i < 100000; i++)
            //    {
            //        _pool.Free(b[i]);
            //    }
            //}
        }
Пример #6
0
        private unsafe void BasicMemoryAction(ConcurrentHashmapOfKeys map, ExpandableArrayOfKeys keys, ulong first, ulong count, ulong offset)
        {
            for (var k = first; k < first + count; k++)
            {
                using (var map2 = new ConcurrentHashmapOfKeys(_pool))
                {
                    map2.TryAdd(keys.GetAt(0), 0);
                }
            }

            //var b = stackalloc void*[(int)100000];
            //for (var k = 0; k < 10000000000; k++)
            //{
            //    for (var i = 0; i < 100000; i++)
            //    {
            //        b[i] = _pool.Alloc((ulong) i % 10000);
            //    }
            //    for (var i = 0; i < 100000; i++)
            //    {
            //        _pool.Free(b[i]);
            //    }
            //}
        }
        unsafe ExpandableArrayOfKeys GenerateKeys(ulong count)
        {
            var result = new ExpandableArrayOfKeys(_pool);
            result.EnsureCapacity(count);

            var key = new byte[10];

            for (ulong i = 1; i <= count; i++)
            {
                var val = i;
                byte pos = 1;
                while (val != 0)
                {
                    key[pos++] = (byte)val;
                    val >>= 8;
                }

                key[0] = (byte)(pos-1);

                //result[i-1] = key;
                if (!result.TrySetAt((int)i - 1, key))
                {
                    throw new Exception("Failed to set a key element at " + (i-1));
                }
            }

            for (ulong i = 1; i <= count; i++)
            {
                var storedKey = result.GetAt(i-1);

                var val = i;
                byte pos = 1;
                while (val != 0)
                {
                    key[pos++] = (byte)val;
                    val >>= 8;
                }

                key[0] = (byte)(pos-1);

                if (storedKey[0] != key[0])
                {
                    throw new Exception("Length prefix broken at " + (i - 1));
                }

                for (var j = 0; j <= key[0]; j++)
                {
                    //Console.Write(storedKey[j]);
                    //Console.Write(',');

                    if (storedKey[j] != key[j])
                    {
                        throw new Exception("Data broken at " + (i - 1) + ", offset " + j);
                    }
                }

                //Console.WriteLine();
            }

            return result;
        }
        private void TestPersistence(ulong count, ExpandableArrayOfKeys keys)
        {
            using (var validEntries = new BitVector(_pool))
            {
                validEntries.EnsureCapacity(count);
                validEntries.ChangeAll(true);

                using (var stream = new FileStream(@"c:\temp\data.d", FileMode.Create, FileAccess.ReadWrite))
                {
                    using (var writer = new BinaryWriter(stream))
                    {
                        keys.Write(writer, (ulong) count, validEntries);
                    }
                }

                using (var keys2 = new ExpandableArrayOfKeys(_pool))
                {
                    using (var stream = new FileStream(@"c:\temp\data.d", FileMode.Open, FileAccess.Read))
                    {
                        using (var reader = new BinaryReader(stream))
                        {
                            {
                                keys2.Read(reader, (ulong) count, validEntries);
                            }
                        }
                    }
                }
            }
        }
        private unsafe void ReadAction(ConcurrentHashmapOfKeys map, ExpandableArrayOfKeys keys, ulong first, ulong count, ulong offset)
        {
            var watch = Stopwatch.StartNew();

            for (var k = 0; k < 10000; k++ )
                for (var i = first; i < first + count; i++)
                {
                    ulong val = 0;
                    if (!map.TryGetValue(keys.GetAt(i - offset), ref val))
                    {
                        throw new Exception("Failed to get at " + i + ", offset from " + offset);
                    }

                    if (val != i)
                    {
                        throw new Exception("Failed to validate at " + i + ", offset from " + offset);
                    }

                    //Console.WriteLine("Added {0} at {1}", val, i);
                }

            watch.Stop();
            //Console.WriteLine("Elapsed: {0}, for {1}, {2}", watch.ElapsedMilliseconds, first, count);
        }
        private void MultiThread(
            Action<ConcurrentHashmapOfKeys, ExpandableArrayOfKeys, ulong, ulong, ulong> a,
            ConcurrentHashmapOfKeys map, ExpandableArrayOfKeys keys, int nThreads, ulong count, ulong offset)
        {
            var tasks = new Task[nThreads];

            var first = offset;

            for (var i = 0; i < tasks.Length; i++)
            {
                var first1 = first;
                tasks[i] = new Task(() => a(map, keys, first1, count, offset));
                first += count;
            }

            foreach (var task in tasks)
            {
                task.Start();
            }

            foreach (var task in tasks)
            {
                task.Wait();
            }
        }
        public DocumentDataContainer(
            DataContainerDescriptor dataContainerDescriptor,
            DocumentTypeDescriptor documentTypeDescriptor,
            IUnmanagedAllocator allocator,
            ITracer tracer)
        {
            if (tracer == null)
            {
                throw new ArgumentNullException("tracer");
            }

            if (dataContainerDescriptor == null)
            {
                throw new ArgumentNullException("dataContainerDescriptor");
            }

            if (documentTypeDescriptor == null)
            {
                throw new ArgumentNullException("documentTypeDescriptor");
            }

            if (allocator == null)
            {
                throw new ArgumentNullException("allocator");
            }

            m_logger = tracer;

            m_allocator = allocator;
            DocDesc = documentTypeDescriptor;
            DataContainerDescriptor = dataContainerDescriptor;

            ColumnStores = new ColumnDataBase[DocDesc.Fields.Length];
            DocumentKeys = new ExpandableArrayOfKeys(m_allocator);
            FieldIdToColumnStore = new Dictionary<int, int>(ColumnStores.Length * 2);
            PrimaryKeyFieldId = dataContainerDescriptor.RequireField(documentTypeDescriptor.DocumentType, documentTypeDescriptor.PrimaryKeyFieldName).FieldId;

            for (var i = 0; i < DocDesc.Fields.Length; i++)
            {
                var field = dataContainerDescriptor.RequireField(DocDesc.Fields[i]);
                ColumnStores[i] = CreateColumnStore(field.DbType, m_allocator, null);
                FieldIdToColumnStore.Add(field.FieldId, i);
            }

            DocumentIdToIndex = new ConcurrentHashmapOfKeys(m_allocator);
            ValidDocumentsBitmap = new BitVector(m_allocator);
            SortIndexManager = new SortIndexManager(this);
            StructureLock = new ReaderWriterLockSlim(LockRecursionPolicy.SupportsRecursion);
        }
        /// <summary>
        /// Reconstructs all unmanaged data in the new pool.
        /// Side effect is that all fragmentation is removed.
        /// </summary>
        /// <param name="newpool">The new memory pool to use.</param>
        public void MigrateRAM(IUnmanagedAllocator newpool)
        {
            CheckState();

            var vdb = ValidDocumentsBitmap;
            var dk = DocumentKeys;
            var diti = DocumentIdToIndex;
            var colstores = ColumnStores.ToArray();

            StructureLock.EnterWriteLock();
            try
            {
                // generate a new copy of the data
                var tasks = new List<Task>();

                tasks.Add(new Task<BitVector>(() => new BitVector(ValidDocumentsBitmap, newpool)));
                tasks.Add(new Task<ExpandableArrayOfKeys>(() => new ExpandableArrayOfKeys(DocumentKeys, newpool)));

                foreach (var c in ColumnStores)
                {
                    tasks.Add(new Task<ColumnDataBase>(o => CreateColumnStore(((ColumnDataBase)o).DbType, newpool, (ColumnDataBase)o), c));
                }

                foreach (var t in tasks)
                {
                    t.Start();
                }

                Task.WaitAll(tasks.ToArray());

                var newvdb = ((Task<BitVector>) tasks[0]).Result;
                var newdk = ((Task<ExpandableArrayOfKeys>) tasks[1]).Result;
                var newditi = new ConcurrentHashmapOfKeys(DocumentIdToIndex, newdk, newpool);

                // now, since no exception was thrown, let's consume results and dispose of old structures
                try
                {
                    ValidDocumentsBitmap = newvdb;
                    DocumentKeys = newdk;
                    DocumentIdToIndex = newditi;

                    for (var i = 2; i < tasks.Count; i++)
                    {
                        ColumnStores[i-2] = ((Task<ColumnDataBase>)tasks[i]).Result;
                    }

                    vdb.Dispose();
                    dk.Dispose();
                    diti.Dispose();

                    foreach (var c in colstores)
                    {
                        c.Dispose();
                    }
                }
                catch
                {
                    m_stateBroken = true;
                    throw;
                }

                m_allocator = newpool;
            }
            finally
            {
                StructureLock.ExitWriteLock();
            }
        }
        /// <summary>
        /// Reconstructs all unmanaged data in the new pool.
        /// Side effect is that all fragmentation is removed.
        /// </summary>
        /// <param name="newpool">The new memory pool to use.</param>
        public void MigrateRAM(IUnmanagedAllocator newpool)
        {
            CheckState();

            var vdb       = ValidDocumentsBitmap;
            var dk        = DocumentKeys;
            var diti      = DocumentIdToIndex;
            var colstores = ColumnStores.ToArray();

            StructureLock.EnterWriteLock();
            try
            {
                // generate a new copy of the data
                var tasks = new List <Task>();

                tasks.Add(new Task <BitVector>(() => new BitVector(ValidDocumentsBitmap, newpool)));
                tasks.Add(new Task <ExpandableArrayOfKeys>(() => new ExpandableArrayOfKeys(DocumentKeys, newpool)));

                foreach (var c in ColumnStores)
                {
                    tasks.Add(new Task <ColumnDataBase>(o => CreateColumnStore(((ColumnDataBase)o).DbType, newpool, (ColumnDataBase)o), c));
                }

                foreach (var t in tasks)
                {
                    t.Start();
                }

                Task.WaitAll(tasks.ToArray());

                var newvdb  = ((Task <BitVector>)tasks[0]).Result;
                var newdk   = ((Task <ExpandableArrayOfKeys>)tasks[1]).Result;
                var newditi = new ConcurrentHashmapOfKeys(DocumentIdToIndex, newdk, newpool);

                // now, since no exception was thrown, let's consume results and dispose of old structures
                try
                {
                    ValidDocumentsBitmap = newvdb;
                    DocumentKeys         = newdk;
                    DocumentIdToIndex    = newditi;

                    for (var i = 2; i < tasks.Count; i++)
                    {
                        ColumnStores[i - 2] = ((Task <ColumnDataBase>)tasks[i]).Result;
                    }

                    vdb.Dispose();
                    dk.Dispose();
                    diti.Dispose();

                    foreach (var c in colstores)
                    {
                        c.Dispose();
                    }
                }
                catch
                {
                    m_stateBroken = true;
                    throw;
                }

                m_allocator = newpool;
            }
            finally
            {
                StructureLock.ExitWriteLock();
            }
        }
Пример #14
0
        unsafe ExpandableArrayOfKeys GenerateKeys(ulong count)
        {
            var result = new ExpandableArrayOfKeys(_pool);

            result.EnsureCapacity(count);

            var key = new byte[10];

            for (ulong i = 1; i <= count; i++)
            {
                var  val = i;
                byte pos = 1;
                while (val != 0)
                {
                    key[pos++] = (byte)val;
                    val      >>= 8;
                }

                key[0] = (byte)(pos - 1);

                //result[i-1] = key;
                if (!result.TrySetAt((int)i - 1, key))
                {
                    throw new Exception("Failed to set a key element at " + (i - 1));
                }
            }

            for (ulong i = 1; i <= count; i++)
            {
                var storedKey = result.GetAt(i - 1);

                var  val = i;
                byte pos = 1;
                while (val != 0)
                {
                    key[pos++] = (byte)val;
                    val      >>= 8;
                }

                key[0] = (byte)(pos - 1);

                if (storedKey[0] != key[0])
                {
                    throw new Exception("Length prefix broken at " + (i - 1));
                }

                for (var j = 0; j <= key[0]; j++)
                {
                    //Console.Write(storedKey[j]);
                    //Console.Write(',');

                    if (storedKey[j] != key[j])
                    {
                        throw new Exception("Data broken at " + (i - 1) + ", offset " + j);
                    }
                }

                //Console.WriteLine();
            }


            return(result);
        }