/// <summary> /// Move parent Collection's file pointer to 'Address'. /// </summary> /// <param name="parent"></param> /// <param name="address"></param> /// <returns></returns> public bool MoveTo(Algorithm.Collection.ICollectionOnDisk parent, long address) { if (address >= 0) { FileStream fs = parent.FileStream; if (address != fs.Position) { fs.Seek(address, SeekOrigin.Begin); } if (((CollectionOnDisk)parent).CurrentEntryDataAddress != address) { if (((CollectionOnDisk)parent).currentDataBlock != null && ((CollectionOnDisk)parent).currentDataBlock.DataAddress > -1 ) { ((CollectionOnDisk)parent).currentDataBlock = null; } ((CollectionOnDisk)parent).currentEntry = null; ((CollectionOnDisk)parent).CurrentEntryDataAddress = address; } return(true); } return(false); }
private long GrowBackupFile(long growthSize, FileStream writer = null) { const int GrowthSizeChunk = 4 * 1024 * 1024; long newSize, r; lock (this) { r = BackupFileSize; BackupFileSize += growthSize; newSize = BackupFileSize; //r = Interlocked.Read(ref BackupFileSize); //newSize = Interlocked.Add(ref BackupFileSize, growthSize); if (newSize < ActualBackupFileSize || writer == null) { return(r); } // File resize across threads is not safe (can corrupt/overstep), so lock before file resize // resize by growth file chunk size... int chunkCount = 1; if (newSize > GrowthSizeChunk) { chunkCount = (int)(newSize / GrowthSizeChunk); if (newSize % GrowthSizeChunk == 0 || newSize % GrowthSizeChunk >= (int)GrowthSizeChunk / 2) { chunkCount++; } } ActualBackupFileSize += (chunkCount * GrowthSizeChunk); if (writer != null) { writer.SetLength(ActualBackupFileSize, true); } } return(r); }
private static void ProcessUpdateLog(string serverRootPath, bool cleanup) { /** UpdateLogxx.txt * Backup d:\Sopbin\Sop\File.dta:62976 to _SystemTransactionDataBackup1:386048 Size=2560 * Backup d:\Sopbin\Sop\File.dta:66048 to _SystemTransactionDataBackup2:388608 Size=2560 */ string[] updateLogs = null; try { updateLogs = Directory.GetFiles(serverRootPath, string.Format("{0}*.txt", UpdateLogLiteral)); } catch { return; } var backupFiles = new Dictionary <string, object>(); var restoreLookup = new List <CopyParams>(); foreach (string s in updateLogs) { restoreLookup.Clear(); //** open the file and do restore for each backed up entry FileStream fs = null; try { fs = new FileStream(s, FileMode.Open, FileAccess.Read, FileShare.None); } catch { if (fs != null) { try { fs.Dispose(); } catch { } } fs = null; } if (fs == null) { continue; } using (fs) { using (var sr = new StreamReader(fs.RealStream)) { while (sr.Peek() >= 0) { string l = sr.ReadLine(); if (l.StartsWith(BackupFromToken)) { const string ToToken = " to "; int i2 = l.IndexOf(ToToken + DataBackupFilenameLiteral, BackupFromToken.Length); string from = l.Substring(BackupFromToken.Length, i2 - BackupFromToken.Length); long fromAddress; int d = from.LastIndexOf(':'); if (d > 0 && long.TryParse(from.Substring(d + 1), out fromAddress)) { from = from.Substring(0, d); string s2 = l.Substring(i2 + ToToken.Length); // + DataBackupFilenameLiteral.Length + 1); if (!string.IsNullOrEmpty(s2)) { string[] toP = s2.Split(new char[] { ' ' }); if (toP.Length > 1) { int indexOfSemi = toP[0].IndexOf(':', DataBackupFilenameLiteral.Length); string addressText = toP[0].Substring(indexOfSemi + 1); long toAddress; if (long.TryParse(addressText, out toAddress)) { int dataSize; if (toP[1].Length > 5 && int.TryParse(toP[1].Substring(5), out dataSize) && Sop.Utility.Utility.FileExists(from)) { string sourceFilename = toP[0].Substring(0, indexOfSemi); if (!backupFiles.ContainsKey(sourceFilename)) { backupFiles.Add(sourceFilename, null); } var rs = new CopyParams(); rs.DataSize = dataSize; rs.TargetAddress = fromAddress; rs.TargetFilename = from; rs.SourceAddress = toAddress; rs.SourceFilename = sourceFilename; restoreLookup.Add(rs); } } } } } } } } } if (restoreLookup.Count > 0) { CopyData(serverRootPath, restoreLookup); } //** delete the Update log file after processing its contents... (we're done with it) if (cleanup) { File.Delete(s); } } //** delete the system transaction backup file if (cleanup && backupFiles.Count > 0) { foreach (string s in backupFiles.Keys) { File.Delete(string.Format("{0}\\{1}", serverRootPath, s)); } } }
private Sop.DataBlock ReadBlockFromDisk(Algorithm.Collection.ICollectionOnDisk parent, long dataAddress, bool getForRemoval, bool readMetaInfoOnly, Sop.DataBlock target) { byte[] backedupData = Transaction.Transaction.ReadBlockFromBackup(parent, dataAddress, getForRemoval, readMetaInfoOnly); Sop.DataBlock block = target; if (backedupData == null && !_readAheadBuffer.IsEmpty) { backedupData = _readAheadBuffer.Get(dataAddress, block.Length); } FileStream fileStream = parent.FileStream; if (backedupData == null && fileStream.Position != dataAddress) { fileStream.Seek(dataAddress, SeekOrigin.Begin); } if (backedupData != null || fileStream.Length > 0) { if (block.DataAddress < 0) { block.DataAddress = dataAddress; } BinaryReader br; if (backedupData == null) { if (fileStream.Position != block.DataAddress) { fileStream.Seek(block.DataAddress, SeekOrigin.Begin); } br = fileStream.CreateBinaryReader(parent.File.Server.Encoding); backedupData = new byte[readMetaInfoOnly ? Sop.DataBlock.OverheadSize : block.Length]; if (fileStream.Read(backedupData, 0, backedupData.Length) < backedupData.Length) { throw new SopException(string.Format("Expected to read {0} bytes but read {1} bytes instead.", backedupData.Length)); } br.Close(); } //** read data from byte array! using (var ms = new MemoryStream(backedupData)) { using (br = new BinaryReader(ms, parent.File.Server.Encoding)) { //**** read Block Header and Data to disk // Byte 0 to 7: Next Item Address (64 bit long int) = 0 (no next item) block.NextItemAddress = br.ReadInt64(); // Byte 8 to 11: Size Occupied block.SizeOccupied = br.ReadInt32(); // Byte 12 to 19: Low-level next block address block.InternalNextBlockAddress = br.ReadInt64(); byte memberCount = br.ReadByte(); if (block.SizeOccupied == 0 && block.NextItemAddress <= 0 && block.InternalNextBlockAddress <= 0) { block.InternalNextBlockAddress = block.NextItemAddress = -1; } if (!readMetaInfoOnly) { if (block.SizeOccupied > 0) { Array.Copy(backedupData, br.BaseStream.Position, block.Data, 0, block.SizeOccupied); } if (!getForRemoval) { SetIsDirty(block, false); } if (!block.IsHead && memberCount > 0) { block.IsHead = true; } // encache rest of blocks' data as a read ahead buffer... if (memberCount > 1) { _readAheadBuffer.Read(fileStream, block.NextItemAddress, (memberCount - 1) * block.Length); } } } } } return(block); }
public List <Sop.DataBlock.Info> ReadBlockInfoFromDisk(Algorithm.Collection.ICollectionOnDisk parent, long dataAddress) { // todo: support reading from Transaction backup segments... //byte[] backedupData = Transaction.Transaction.ReadBlockFromBackup(parent, // dataAddress, getForRemoval, // readMetaInfoOnly); FileStream fileStream = parent.FileStream; if (fileStream.Position != dataAddress) { fileStream.Seek(dataAddress, SeekOrigin.Begin); } List <Sop.DataBlock.Info> r = new List <Sop.DataBlock.Info>(); if (fileStream.Length > 0) { var metaData = new byte[Sop.DataBlock.OverheadSize]; var countRead = fileStream.Read(metaData, 0, metaData.Length); if (countRead < metaData.Length) { // check if data is not on disk, just return empty array to mean data not found... if (countRead == 0) { return(r); } throw new SopException(string.Format("Expected to read {0} bytes but read {1} bytes instead.", metaData.Length, countRead)); } var da = dataAddress; while (true) { using (var ms = new MemoryStream(metaData)) { using (var br = new BinaryReader(ms, parent.File.Server.Encoding)) { Sop.DataBlock.Info blockInfo = new Sop.DataBlock.Info() { Address = da }; // Byte 0 to 7: Next Item Address (64 bit long int) = 0 (no next item) var nextItemAddress = br.ReadInt64(); // Byte 8 to 11: Size Occupied var sizeOccupied = br.ReadInt32(); // Byte 12 to 19: Low-level next block address var internalNextBlockAddress = br.ReadInt64(); blockInfo.BlockCount = br.ReadUInt16(); r.Add(blockInfo); // read next block segment info... if (blockInfo.BlockCount > 1) { var addr = blockInfo.Address + (blockInfo.BlockCount - 1) * (int)parent.DataBlockSize; fileStream.Seek(blockInfo.Address + (blockInfo.BlockCount - 1) * (int)parent.DataBlockSize, SeekOrigin.Begin); countRead = fileStream.Read(metaData, 0, sizeof(long)); if (countRead < sizeof(long)) { throw new SopException(string.Format("Expected to read {0} bytes but read {1} bytes instead.", sizeof(long), countRead)); } using (var ms2 = new MemoryStream(metaData)) { using (var br2 = new BinaryReader(ms2, parent.File.Server.Encoding)) { nextItemAddress = br2.ReadInt64(); } } } if (nextItemAddress != -1) { fileStream.Seek(nextItemAddress, SeekOrigin.Begin); da = fileStream.Position; } else { break; } } } countRead = fileStream.Read(metaData, 0, metaData.Length); if (countRead < metaData.Length) { throw new SopException(string.Format("Expected to read {0} bytes but read {1} bytes instead.", metaData.Length, countRead)); } } } return(r); }