private void BackupDataWithNoIntersection( IEnumerable <KeyValuePair <BackupDataLogKey, BackupDataLogValue> > intersectingLogs, BackupDataLogKey logKey, KeyValuePair <long, int> area, OnDisk.File.IFile f, string fFilename, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool, RecordKey key) { string systemBackupFilename = Server.Path + DataBackupFilename; int size = area.Value; key.Address = area.Key; // no intersection nor mergeable logs, add new log! backup and log the data area ConcurrentIOData reader = f != null ? readPool.GetInstance(f, size) : readPool.GetInstance(fFilename, null, size); ConcurrentIOData writer = writePool.GetInstance(systemBackupFilename, (TransactionRoot)Root); if (reader == null || writer == null) { throw new SopException("This program has a bug! 'didn't get reader or writer from Async IO Pool."); } LogTracer.Verbose("BackupDataWithNoIntersection: Start for Thread {0}.", Thread.CurrentThread.ManagedThreadId); var logValue = new BackupDataLogValue(); logValue.DataSize = size; logValue.TransactionId = Id; logValue.BackupFileHandle = GetLogBackupFileHandle(DataBackupFilename); // return the current backup file size and grow it to make room for data to be backed up... logValue.BackupDataAddress = GrowBackupFile(size, writer.FileStream); // save a record of the backed up data.. LogCollection.Add(logKey, logValue); // log after data was backed up!! Sop.VoidFunc logBackedupData = () => { UpdateLogger.LogLine("{0}{1}:{2} to {3}:{4} Size={5}", BackupFromToken, f != null ? f.Filename : fFilename, area.Key, DataBackupFilename, logValue.BackupDataAddress, size); }; writer.FileStream.Seek(logValue.BackupDataAddress, SeekOrigin.Begin, true); reader.FileStream.Seek(area.Key, SeekOrigin.Begin, true); reader.FileStream.BeginRead( reader.Buffer, 0, size, ReadCallback, new object[] { new[] { reader, writer }, true, logKey, logBackedupData }); }
private void BackupDataWithNoIntersection( IEnumerable <KeyValuePair <BackupDataLogKey, BackupDataLogValue> > intersectingLogs, BackupDataLogKey logKey, KeyValuePair <long, int> area, OnDisk.File.IFile f, string fFilename, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool, RecordKey key) { string systemBackupFilename = Server.Path + DataBackupFilename; int size = area.Value; key.Address = area.Key; //if (RegisterAdd(_addStore, null, null, key, size, false)) //{ // Logger.LogLine("Extending, skipping Backup..."); // return; //} //** no intersection nor mergeable logs, add new log! //** backup and log the data area ConcurrentIOData reader = f != null ? readPool.GetInstance(f, size) : readPool.GetInstance(fFilename, null, size); ConcurrentIOData writer = writePool.GetInstance(systemBackupFilename, (TransactionRoot)Root, size); if (reader == null || writer == null) { return; } var logValue = new BackupDataLogValue(); logValue.DataSize = size; logValue.TransactionId = Id; //** todo: can we remove this block: //long readerFileSize = reader.FileStream.Length; //if (area.Key + size > readerFileSize) //{ // int appendSize = (int)(area.Key + size - readerFileSize); // key.Address = readerFileSize; // RegisterAdd(_addStore, null, null, key, appendSize, false); // size = (int)(readerFileSize - area.Key); // logValue.DataSize = size; // reader.Buffer = new byte[size]; //} //** reader.FileStream.Seek(area.Key, SeekOrigin.Begin); logValue.BackupFileHandle = GetLogBackupFileHandle(DataBackupFilename); logValue.BackupDataAddress = writer.FileStream.Seek(0, SeekOrigin.End); UpdateLogger.LogLine("{0}{1}:{2} to {3}:{4} Size={5}", BackupFromToken, f != null ? f.Filename : fFilename, area.Key, DataBackupFilename, logValue.BackupDataAddress, size); // resize target file to accomodate data to be copied. writer.FileStream.Seek(size, SeekOrigin.End); writer.FileStream.Seek(logValue.BackupDataAddress, SeekOrigin.Begin); reader.FileStream.BeginRead( reader.Buffer, 0, size, ReadCallback, new object[] { new[] { reader, writer }, true, logKey } ); //** save a record of the backed up data.. LogCollection.Add(logKey, logValue); }
private void BackupDataWithIntersection( IEnumerable <KeyValuePair <BackupDataLogKey, BackupDataLogValue> > intersectingLogs, BackupDataLogKey logKey, KeyValuePair <long, int> area, OnDisk.File.IFile f, string fFilename, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool, RecordKey key ) { if (intersectingLogs == null) { //** process conflicts with other trans... ProcessTransactionConflicts(logKey, area.Value); //** area is within an already backed up area (intersectingLogs == null), do nothing... return; } //** get area(s) outside each intersecting segment and back it up... var newRegion = new Region(area.Key, area.Value); bool wasIntersected = false; foreach (KeyValuePair <BackupDataLogKey, BackupDataLogValue> entry in intersectingLogs) { //** process conflicts with other trans... ProcessTransactionConflicts(entry.Key, entry.Value.DataSize); if (newRegion.Subtract(entry.Key.SourceDataAddress, entry.Value.DataSize)) { wasIntersected = true; } } //** copy if (!wasIntersected) { return; } foreach (KeyValuePair <long, int> newArea in newRegion) { var logKey2 = new BackupDataLogKey(); logKey2.SourceFilename = logKey.SourceFilename; logKey2.SourceDataAddress = newArea.Key; var logValue = new BackupDataLogValue(); logValue.DataSize = newArea.Value; logValue.TransactionId = Id; int newSize = newArea.Value; key.Address = newArea.Key; //if (RegisterAdd(_addStore, null, null, key, newArea.Value, false)) // return; logValue.BackupFileHandle = GetLogBackupFileHandle(DataBackupFilename); ConcurrentIOData reader = f != null ? readPool.GetInstance(f, newArea.Value) : readPool.GetInstance(fFilename, null, newArea.Value); if (reader == null) { throw new InvalidOperationException("Can't get ConcurrentIOData from ReadPool"); } string systemBackupFilename = Server.Path + DataBackupFilename; ConcurrentIOData writer = writePool.GetInstance(systemBackupFilename, ((TransactionRoot)Root), newArea.Value); if (writer == null) { throw new InvalidOperationException("Can't get ConcurrentIOData from WritePool"); } logValue.BackupDataAddress = writer.FileStream.Seek(0, SeekOrigin.End); //** todo: can we remove this block: //long readerFileSize = reader.FileStream.Length; //if (newArea.Key + newArea.Value > readerFileSize) //{ // int appendSize = (int)(newArea.Key + newArea.Value - readerFileSize); // key.Address = readerFileSize; // RegisterAdd(_addStore, null, null, key, appendSize, false); // newSize = (int)(readerFileSize - newArea.Key); // logValue.DataSize = newSize; // reader.Buffer = new byte[newSize]; //} //** reader.FileStream.Seek(newArea.Key, SeekOrigin.Begin); UpdateLogger.LogLine( "{0}{1}:{2} to {3}:{4} Size={5}", BackupFromToken, logKey2.SourceFilename, logKey2.SourceDataAddress, DataBackupFilename, logValue.BackupDataAddress, newSize); // resize target file to accomodate data to be copied. writer.FileStream.Seek(newSize, SeekOrigin.End); writer.FileStream.Seek(logValue.BackupDataAddress, SeekOrigin.Begin); reader.FileStream.BeginRead( reader.Buffer, 0, newSize, ReadCallback, new object[] { new[] { reader, writer }, true, logKey2 } ); //** save a record of the backed up data.. LogCollection.Add(logKey2, logValue); } }
private void BackupDataWithIntersection( IEnumerable <KeyValuePair <BackupDataLogKey, BackupDataLogValue> > intersectingLogs, BackupDataLogKey logKey, KeyValuePair <long, int> area, OnDisk.File.IFile f, string fFilename, ConcurrentIOPoolManager readPool, ConcurrentIOPoolManager writePool, RecordKey key ) { if (intersectingLogs == null) { // process conflicts with other trans... //ProcessTransactionConflicts(logKey, area.Value); // area is within an already backed up area (intersectingLogs == null), do nothing... return; } LogTracer.Verbose("BackupDataWithIntersection: Start for Thread {0}.", Thread.CurrentThread.ManagedThreadId); // get area(s) outside each intersecting segment and back it up... var newRegion = new Region(area.Key, area.Value); #region for future implements... ? //bool wasIntersected = false; //foreach (KeyValuePair<BackupDataLogKey, BackupDataLogValue> entry in intersectingLogs) //{ // // process conflicts with other trans... // ProcessTransactionConflicts(entry.Key, entry.Value.DataSize); // if (newRegion.Subtract(entry.Key.SourceDataAddress, entry.Value.DataSize)) // wasIntersected = true; //} //if (!wasIntersected) return; #endregion // copy modified blocks to the transaction backup file. foreach (KeyValuePair <long, int> newArea in newRegion) { if (readPool.AsyncThreadException != null) { throw readPool.AsyncThreadException; } if (writePool.AsyncThreadException != null) { throw writePool.AsyncThreadException; } var logKey2 = new BackupDataLogKey(); logKey2.SourceFilename = logKey.SourceFilename; logKey2.SourceDataAddress = newArea.Key; var logValue = new BackupDataLogValue(); logValue.DataSize = newArea.Value; logValue.TransactionId = Id; int newSize = newArea.Value; key.Address = newArea.Key; //if (RegisterAdd(_addBlocksStore, null, null, key, newArea.Value, false)) // return; logValue.BackupFileHandle = GetLogBackupFileHandle(DataBackupFilename); ConcurrentIOData reader = f != null ? readPool.GetInstance(f, newArea.Value) : readPool.GetInstance(fFilename, null, newArea.Value); if (reader == null) { throw new InvalidOperationException("Can't get ConcurrentIOData from ReadPool"); } string systemBackupFilename = Server.Path + DataBackupFilename; ConcurrentIOData writer = writePool.GetInstance(systemBackupFilename, ((TransactionRoot)Root)); if (writer == null) { throw new InvalidOperationException("Can't get ConcurrentIOData from WritePool"); } // return the current backup file size and grow it to make room for data to be backed up... logValue.BackupDataAddress = GrowBackupFile(newSize, writer.FileStream); // save a record of the backed up data.. LogCollection.Add(logKey2, logValue); // prepare lambda expression to log after data was backed up!! Sop.VoidFunc logBackedupData = () => { UpdateLogger.LogLine( "{0}{1}:{2} to {3}:{4} Size={5}", BackupFromToken, logKey2.SourceFilename, logKey2.SourceDataAddress, DataBackupFilename, logValue.BackupDataAddress, newSize); }; writer.FileStream.Seek(logValue.BackupDataAddress, SeekOrigin.Begin, true); reader.FileStream.Seek(newArea.Key, SeekOrigin.Begin, true); reader.FileStream.BeginRead( reader.Buffer, 0, newSize, ReadCallback, new object[] { new[] { reader, writer }, true, logKey2, logBackedupData }); } }