public DataContainer(ITracer tracer, DataContainerDescriptor dataContainerDescriptor, string storageRoot) { if (tracer == null) { throw new ArgumentNullException("tracer"); } if (dataContainerDescriptor == null) { throw new ArgumentNullException("dataContainerDescriptor"); } // intentionally allowed to be null - in case if we don't want to use any persistence m_storageRoot = storageRoot; m_tracer = tracer; m_memoryPool = new DynamicMemoryPool(); m_dataContainerDescriptor = dataContainerDescriptor; m_documentDataContainers = new Dictionary <int, DocumentDataContainer>(50); m_documentDataContainerLocks = new Dictionary <int, object>(50); foreach (var item in dataContainerDescriptor.EnumerateDocumentTypes()) { m_documentDataContainerLocks.Add(item.DocumentType, new object()); } }
private static ColumnDataBase CreateColumnStore(DbType dbType, IUnmanagedAllocator allocator, ColumnDataBase migrated) { var dataType = DriverRowData.DeriveSystemType(dbType); var columnStoreType = typeof(ColumnData <>).MakeGenericType(dataType); return(migrated == null ? (ColumnDataBase)Activator.CreateInstance(columnStoreType, dbType, allocator) : (ColumnDataBase)Activator.CreateInstance(columnStoreType, migrated, allocator)); }
public ColumnData(DbType dbType, IUnmanagedAllocator allocator) : base(allocator) { m_dbType = dbType; DataArray = new ExpandableArray <T>(1, typeof(T).IsValueType ? DriverRowData.GetByteCount(dbType) : IntPtr.Size); AssignFromDriverRow = GenerateAssignFromDriverRowAction(); AssignToDriverRow = GenerateAssignToDriverRowAction(); WriteData = GenerateWriteDataAction(); ReadData = GenerateReadDataAction(); }
public ColumnData(ColumnDataBase source, IUnmanagedAllocator allocator) : base(source, allocator) { var typed = (ColumnData <T>)source; m_dbType = typed.DbType; DataArray = typed.DataArray; AssignFromDriverRow = typed.AssignFromDriverRow; AssignToDriverRow = typed.AssignToDriverRow; WriteData = typed.WriteData; ReadData = typed.ReadData; }
public DocumentDataContainer( DataContainerDescriptor dataContainerDescriptor, DocumentTypeDescriptor documentTypeDescriptor, IUnmanagedAllocator allocator, ITracer tracer) { if (tracer == null) { throw new ArgumentNullException("tracer"); } if (dataContainerDescriptor == null) { throw new ArgumentNullException("dataContainerDescriptor"); } if (documentTypeDescriptor == null) { throw new ArgumentNullException("documentTypeDescriptor"); } if (allocator == null) { throw new ArgumentNullException("allocator"); } m_logger = tracer; m_allocator = allocator; DocDesc = documentTypeDescriptor; DataContainerDescriptor = dataContainerDescriptor; ColumnStores = new ColumnDataBase[DocDesc.Fields.Length]; DocumentKeys = new ExpandableArrayOfKeys(m_allocator); FieldIdToColumnStore = new Dictionary<int, int>(ColumnStores.Length * 2); PrimaryKeyFieldId = dataContainerDescriptor.RequireField(documentTypeDescriptor.DocumentType, documentTypeDescriptor.PrimaryKeyFieldName).FieldId; for (var i = 0; i < DocDesc.Fields.Length; i++) { var field = dataContainerDescriptor.RequireField(DocDesc.Fields[i]); ColumnStores[i] = CreateColumnStore(field.DbType, m_allocator, null); FieldIdToColumnStore.Add(field.FieldId, i); } DocumentIdToIndex = new ConcurrentHashmapOfKeys(m_allocator); ValidDocumentsBitmap = new BitVector(m_allocator); SortIndexManager = new SortIndexManager(this); StructureLock = new ReaderWriterLockSlim(LockRecursionPolicy.SupportsRecursion); }
public DocumentDataContainer( DataContainerDescriptor dataContainerDescriptor, DocumentTypeDescriptor documentTypeDescriptor, IUnmanagedAllocator allocator, ITracer tracer) { if (tracer == null) { throw new ArgumentNullException("tracer"); } if (dataContainerDescriptor == null) { throw new ArgumentNullException("dataContainerDescriptor"); } if (documentTypeDescriptor == null) { throw new ArgumentNullException("documentTypeDescriptor"); } if (allocator == null) { throw new ArgumentNullException("allocator"); } m_logger = tracer; m_allocator = allocator; DocDesc = documentTypeDescriptor; DataContainerDescriptor = dataContainerDescriptor; ColumnStores = new ColumnDataBase[DocDesc.Fields.Length]; DocumentKeys = new ExpandableArrayOfKeys(m_allocator); FieldIdToColumnStore = new Dictionary <int, int>(ColumnStores.Length * 2); PrimaryKeyFieldId = dataContainerDescriptor.RequireField(documentTypeDescriptor.DocumentType, documentTypeDescriptor.PrimaryKeyFieldName).FieldId; for (var i = 0; i < DocDesc.Fields.Length; i++) { var field = dataContainerDescriptor.RequireField(DocDesc.Fields[i]); ColumnStores[i] = CreateColumnStore(field.DbType, m_allocator, null); FieldIdToColumnStore.Add(field.FieldId, i); } DocumentIdToIndex = new ConcurrentHashmapOfKeys(m_allocator); ValidDocumentsBitmap = new BitVector(m_allocator); SortIndexManager = new SortIndexManager(this); StructureLock = new ReaderWriterLockSlim(LockRecursionPolicy.SupportsRecursion); }
private void RebuildUnmanagedData() { var newpool = new DynamicMemoryPool(); Action <object> action = x => ((DocumentDataContainer)x).MigrateRAM(newpool); var tasks = m_documentDataContainers.Values.Select(x => { var x1 = x; return(new Task(action, x1)); }).ToArray(); try { try { foreach (var t in tasks) { t.Start(); } Task.WaitAll(tasks); } catch { newpool.Dispose(); throw; } try { m_memoryPool.Dispose(); } finally { m_memoryPool = newpool; } } catch { foreach (var c in m_documentDataContainers.Values) { c.MarkAsInvalid(); } throw; } }
protected ColumnDataBase(ColumnDataBase source, IUnmanagedAllocator allocator) { // may throw due to insufficient memory NotNulls = new BitVector(source.NotNulls, allocator); }
protected ColumnDataBase(IUnmanagedAllocator allocator) { NotNulls = new BitVector(allocator); }
private static ColumnDataBase CreateColumnStore(DbType dbType, IUnmanagedAllocator allocator, ColumnDataBase migrated) { var dataType = DriverRowData.DeriveSystemType(dbType); var columnStoreType = typeof(ColumnData<>).MakeGenericType(dataType); return migrated == null ? (ColumnDataBase) Activator.CreateInstance(columnStoreType, dbType, allocator) : (ColumnDataBase) Activator.CreateInstance(columnStoreType, migrated, allocator); }
/// <summary> /// Reconstructs all unmanaged data in the new pool. /// Side effect is that all fragmentation is removed. /// </summary> /// <param name="newpool">The new memory pool to use.</param> public void MigrateRAM(IUnmanagedAllocator newpool) { CheckState(); var vdb = ValidDocumentsBitmap; var dk = DocumentKeys; var diti = DocumentIdToIndex; var colstores = ColumnStores.ToArray(); StructureLock.EnterWriteLock(); try { // generate a new copy of the data var tasks = new List<Task>(); tasks.Add(new Task<BitVector>(() => new BitVector(ValidDocumentsBitmap, newpool))); tasks.Add(new Task<ExpandableArrayOfKeys>(() => new ExpandableArrayOfKeys(DocumentKeys, newpool))); foreach (var c in ColumnStores) { tasks.Add(new Task<ColumnDataBase>(o => CreateColumnStore(((ColumnDataBase)o).DbType, newpool, (ColumnDataBase)o), c)); } foreach (var t in tasks) { t.Start(); } Task.WaitAll(tasks.ToArray()); var newvdb = ((Task<BitVector>) tasks[0]).Result; var newdk = ((Task<ExpandableArrayOfKeys>) tasks[1]).Result; var newditi = new ConcurrentHashmapOfKeys(DocumentIdToIndex, newdk, newpool); // now, since no exception was thrown, let's consume results and dispose of old structures try { ValidDocumentsBitmap = newvdb; DocumentKeys = newdk; DocumentIdToIndex = newditi; for (var i = 2; i < tasks.Count; i++) { ColumnStores[i-2] = ((Task<ColumnDataBase>)tasks[i]).Result; } vdb.Dispose(); dk.Dispose(); diti.Dispose(); foreach (var c in colstores) { c.Dispose(); } } catch { m_stateBroken = true; throw; } m_allocator = newpool; } finally { StructureLock.ExitWriteLock(); } }
/// <summary> /// Reconstructs all unmanaged data in the new pool. /// Side effect is that all fragmentation is removed. /// </summary> /// <param name="newpool">The new memory pool to use.</param> public void MigrateRAM(IUnmanagedAllocator newpool) { CheckState(); var vdb = ValidDocumentsBitmap; var dk = DocumentKeys; var diti = DocumentIdToIndex; var colstores = ColumnStores.ToArray(); StructureLock.EnterWriteLock(); try { // generate a new copy of the data var tasks = new List <Task>(); tasks.Add(new Task <BitVector>(() => new BitVector(ValidDocumentsBitmap, newpool))); tasks.Add(new Task <ExpandableArrayOfKeys>(() => new ExpandableArrayOfKeys(DocumentKeys, newpool))); foreach (var c in ColumnStores) { tasks.Add(new Task <ColumnDataBase>(o => CreateColumnStore(((ColumnDataBase)o).DbType, newpool, (ColumnDataBase)o), c)); } foreach (var t in tasks) { t.Start(); } Task.WaitAll(tasks.ToArray()); var newvdb = ((Task <BitVector>)tasks[0]).Result; var newdk = ((Task <ExpandableArrayOfKeys>)tasks[1]).Result; var newditi = new ConcurrentHashmapOfKeys(DocumentIdToIndex, newdk, newpool); // now, since no exception was thrown, let's consume results and dispose of old structures try { ValidDocumentsBitmap = newvdb; DocumentKeys = newdk; DocumentIdToIndex = newditi; for (var i = 2; i < tasks.Count; i++) { ColumnStores[i - 2] = ((Task <ColumnDataBase>)tasks[i]).Result; } vdb.Dispose(); dk.Dispose(); diti.Dispose(); foreach (var c in colstores) { c.Dispose(); } } catch { m_stateBroken = true; throw; } m_allocator = newpool; } finally { StructureLock.ExitWriteLock(); } }
private void RebuildUnmanagedData() { var newpool = new DynamicMemoryPool(); Action<object> action = x => ((DocumentDataContainer)x).MigrateRAM(newpool); var tasks = m_documentDataContainers.Values.Select(x => { var x1 = x; return new Task(action, x1); }).ToArray(); try { try { foreach (var t in tasks) { t.Start(); } Task.WaitAll(tasks); } catch { newpool.Dispose(); throw; } try { m_memoryPool.Dispose(); } finally { m_memoryPool = newpool; } } catch { foreach (var c in m_documentDataContainers.Values) { c.MarkAsInvalid(); } throw; } }
public DataContainer(ITracer tracer, DataContainerDescriptor dataContainerDescriptor, string storageRoot) { if (tracer == null) { throw new ArgumentNullException("tracer"); } if (dataContainerDescriptor == null) { throw new ArgumentNullException("dataContainerDescriptor"); } // intentionally allowed to be null - in case if we don't want to use any persistence m_storageRoot = storageRoot; m_tracer = tracer; m_memoryPool = new DynamicMemoryPool(); m_dataContainerDescriptor = dataContainerDescriptor; m_documentDataContainers = new Dictionary<int, DocumentDataContainer>(50); m_documentDataContainerLocks = new Dictionary<int, object>(50); foreach (var item in dataContainerDescriptor.EnumerateDocumentTypes()) { m_documentDataContainerLocks.Add(item.DocumentType, new object()); } }