/// <summary> /// Write data to disk in bulk mode. /// This method can spin off multiple threads part of doing Asynchronous operations /// to accomplish following processes: /// - back up existing target data segments that will be overwritten to respective transaction log file. /// - overwrite target data segments with data from in-memory source provided (source parameter). /// </summary> /// <param name="parent"></param> /// <param name="source"></param> /// <param name="dataChunks"></param> public void Write(ConcurrentIOPoolManager writePool, Algorithm.Collection.ICollectionOnDisk parent, byte[] source, List <DataChunk> dataChunks) { Log.Logger.Instance.Log(Log.LogLevels.Information, "BulkWriter.Write begin."); byte[] data = source; #region Async write data segments from source onto respective target regions on disk... #if (_WIN32) if (dataChunks.Count == 1 && dataChunks[0].Size <= (int)512) //DataBlockSize.Minimum) { var chunk = dataChunks[0]; long dataAddress = chunk.TargetDataAddress; int dataIndex = chunk.Index; int dataSize = chunk.Size; var writer = parent.FileStream; if (dataAddress != writer.GetPosition(true)) { writer.Seek(dataAddress, SeekOrigin.Begin, true); } writer.Write(data, dataIndex, dataSize); Log.Logger.Instance.Log(Log.LogLevels.Information, "BulkWriter.Write end (Size <= {0}).", (int)512); //DataBlockSize.Minimum); return; } #endif bool initial = true; foreach (var chunk in dataChunks) { // short circuit if IO exception is encountered. if (writePool.AsyncThreadException != null) { throw writePool.AsyncThreadException; } long dataAddress = chunk.TargetDataAddress; int dataIndex = chunk.Index; int dataSize = chunk.Size; var writer = writePool.GetInstance(parent.File.Filename, null); // extend file if needed on initial step of the loop. if (initial) { initial = false; long targetLastByteOnFileOffset = dataChunks[dataChunks.Count - 1].TargetDataAddress + dataChunks[dataChunks.Count - 1].Size; if (writer.FileStream.Length < targetLastByteOnFileOffset) { writer.FileStream.Seek(targetLastByteOnFileOffset, SeekOrigin.Begin, true); } } writer.FileStream.InUse = true; if (dataAddress != writer.FileStream.GetPosition(true)) { writer.FileStream.Seek(dataAddress, SeekOrigin.Begin, true); } var param = new[] { null, writer }; writer.FileStream.BeginWrite(data, dataIndex, dataSize, Transaction.Transaction.WriteCallback, param); } #endregion Log.Logger.Instance.Log(Log.LogLevels.Information, "BulkWriter.Write end."); }
/// <summary> /// Write data to disk in bulk mode. /// This method can spin off multiple threads part of doing Asynchronous operations /// to accomplish following processes: /// - back up existing target data segments that will be overwritten to respective transaction log file. /// - overwrite target data segments with data from in-memory source provided (source parameter). /// </summary> /// <param name="parent"></param> /// <param name="source"></param> /// <param name="dataChunks"></param> public void Write(Algorithm.Collection.ICollectionOnDisk parent, byte[] source, List <DataChunk> dataChunks) { Log.Logger.Instance.Log(Log.LogLevels.Information, "BulkWriter.Write begin."); byte[] data = source; ITransactionLogger trans = parent.Transaction; if (trans != null) { #region Async Backup target disk regions for update... using (var writePool = new ConcurrentIOPoolManager()) { using (var readPool = new ConcurrentIOPoolManager()) { foreach (var chunk in dataChunks) { // Identify regions that were not backed up and overwritten yet then back them up... ((TransactionBase)trans).RegisterSave((CollectionOnDisk)parent, chunk.TargetDataAddress, chunk.Size, readPool, writePool); } } } #endregion } #region Async write data segments from source onto respective target regions on disk... if (dataChunks.Count == 1 && dataChunks[0].Size <= (int)DataBlockSize.FiveTwelve) { var chunk = dataChunks[0]; long dataAddress = chunk.TargetDataAddress; int dataIndex = chunk.Index; int dataSize = chunk.Size; var writer = parent.FileStream; if (dataAddress != writer.Position) { writer.Seek(dataAddress, SeekOrigin.Begin); } writer.Write(data, dataIndex, dataSize); Log.Logger.Instance.Log(Log.LogLevels.Information, "BulkWriter.Write end (Size <= 512)."); return; } using (var writePool2 = new ConcurrentIOPoolManager()) { bool initial = true; foreach (var chunk in dataChunks) { long dataAddress = chunk.TargetDataAddress; int dataIndex = chunk.Index; int dataSize = chunk.Size; var writer = writePool2.GetInstance(parent.File.Filename, null, dataSize); // extend file if needed on initial step of the loop. long targetLastByteOnFileOffset = dataChunks[dataChunks.Count - 1].TargetDataAddress + dataChunks[dataChunks.Count - 1].Size; if (initial && writer.FileStream.Length < targetLastByteOnFileOffset) { initial = false; writer.FileStream.Seek(targetLastByteOnFileOffset, SeekOrigin.Begin); } if (dataAddress != writer.FileStream.Position) { writer.FileStream.Seek(dataAddress, SeekOrigin.Begin); } var param = new[] { null, writer }; writer.FileStream.BeginWrite(data, dataIndex, dataSize, Transaction.Transaction.WriteCallback, param); } } #endregion Log.Logger.Instance.Log(Log.LogLevels.Information, "BulkWriter.Write end."); }