// splits a big file ( size > maxchunkfile) into multiple chunks private List <BChunk> GetBigFileChunks(IFSEntry bigFile, long filePosInChunk) { long pos = 0; long remaining = bigFile.FileSize; List <BChunk> chunks = new List <BChunk>(); while (remaining > 0) { chunkOrder++; IFSEntry f = bigFile.Clone(); BChunk chunk = new BChunk(this.TaskId); chunk.Order = chunkOrder; chunk.RootDriveId = this.backupRootDrive.ID; f.ChunkStartPos = 0; chunk.Add(f); chunks.Add(chunk); f.FileStartPos = pos; if (remaining > maxChunkSize) { pos += maxChunkSize; } else { pos += remaining; } remaining = bigFile.FileSize - pos; //if(remaining <0) Logger.Append(Severity.TRIVIA, "GetNextChunk() : splitted file " + f.SnapFullPath + " (size " + f.FileSize + ") , remaining=" + remaining + " to chunk - " + chunk.Name + " starting @ offset " + f.FileStartPos); } return(chunks); }
internal IEnumerable <IFSEntry> GetItemsEnumerator() { string query = "SELECT i.data FROM items i"; System.Data.IDbCommand itemC = indexDbConn.CreateCommand(); itemC.CommandText = query; System.Data.IDataReader reader = itemC.ExecuteReader(); while (reader.Read()) { int dataSize = (int)reader.GetBytes(0, 0, null, 0, 0); if (dataSize == 0) { continue; } int offset = 0, bytesRead = 0; byte[] buffer = new byte[dataSize]; while (bytesRead < dataSize) { bytesRead += (int)reader.GetBytes(0, offset, buffer, offset, (int)dataSize); offset += bytesRead; Console.WriteLine("GetItemsEnumerator() : loop read=" + bytesRead); } using (dataMs = new MemoryStream()){ dataMs.Write(buffer, 0, bytesRead); //dataMs.Flush(); dataMs.Position = 0; using (gz = new System.IO.Compression.GZipStream(dataMs, System.IO.Compression.CompressionMode.Decompress, true)){ IFSEntry item = (IFSEntry)dataFormatter.Deserialize(gz); yield return(item); } } } dataMs.Close(); }
/// <summary> /// Gets the next incremental/diff entry to backup. /// </summary> /// <returns> /// and IFSEntry /// </returns> /// <param name='path'> /// Path. /// </param> /// <param name='snapshottedPath'> /// Snapshotted path. /// </param> public IEnumerable <IFSEntry> GetNextEntry(BasePath path, string snapshottedPath) { //To efficiently reuse existing code, we instanciate // a "phantom" BackupRootDriveHandler an use its "GetFull()" FS items provider as an items source to check // for modifications. // Each entry already existing in the ref backup index is marked as 'found' (Tuple.Item2 = true) // to allow to detect not found items (deleted) after collecting curent FS items. phantomBrd = new BackupRootDriveHandler(rootDrive, this.taskId, 0, 0, 0, BackupLevel.Full, 0, 0, 0); phantomBrd.SetCurrentPath(path); phantomBrd.SubCompletionEvent += new BackupRootDriveHandler.SubCompletionHandler(BubbleUpSubCompletion); foreach (IFSEntry entry in phantomBrd.GetFull(true)) { // New File (inode/id > last refMaxId known inode/id) or doesn't exist in ref backup if (entry.ID > refMaxId || !refEntries.ContainsKey(entry.ID)) //todo : also check CreationTime? { Console.WriteLine("Incremental GetFilesToBackup() added NEW entry : " + entry.SnapFullPath); entry.ChangeStatus = DataLayoutInfos.New; yield return(entry); continue; } else if (entry.LastModifiedTime > refEntries[entry.ID].Item1) { entry.ChangeStatus = DataLayoutInfos.HasChanges; refEntries[entry.ID].Item2 = true; yield return(entry); continue; } else if (entry.LastMetadataModifiedTime > refBackupTimeStart) { entry.ChangeStatus = DataLayoutInfos.MetadaOnly; refEntries[entry.ID].Item2 = true; yield return(entry); continue; } else { refEntries[entry.ID].Item2 = true; } } // // end foreach phantomBrdh // last step : returd deleted entries foreach (var item in refEntries) { if (!item.Value.Item2) { IFSEntry deleted = prov.GetEmptyItem(); deleted.ID = item.Key; deleted.ChangeStatus = DataLayoutInfos.Deleted; yield return(deleted); } } refEntries = null; yield break; }
//处理条目文件数据 private bool DealEntryDatas() { if (_waitToDealBytes.Count > 4 && (_dealEntryDataIndex < _ifsFile.EntryCount)) { //数据长度 _dealOffset = 0; IFSEntry entry = _ifsFile.Entrys[_dealEntryDataIndex]; byte[] ll = _waitToDealBytes.GetRange(0, 4).ToArray();; int vv = IFSArchiver.ConvertBytesToInt(ll, ref _dealOffset); entry.DataSize = vv; if (_waitToDealBytes.Count >= (4 + vv)) { string outPath = JW.Res.FileUtil.CombinePaths(_unarchiveDir, entry.Name); string directory = Path.GetDirectoryName(outPath); if (!Directory.Exists(directory)) { Directory.CreateDirectory(directory); } //数据 byte[] fileDd = _waitToDealBytes.GetRange(4, vv).ToArray(); if (_ifsFile.CompressType == IFSCompressType.None) { // JW.Res.FileUtil.DeleteFile(outPath); // FileStream output = new FileStream(outPath, FileMode.Create); try { output.Write(fileDd, 0, vv); output.Flush(); } finally { output.Close(); output.Dispose(); output = null; } } else if (_ifsFile.CompressType == IFSCompressType.LZMA) { //LZMA 数据 UnLzmaDataToFile(outPath, fileDd, vv); } // _dealEntryDataIndex++; //移除 _waitToDealBytes.RemoveRange(0, vv + 4); return(true); } else { return(false); } } return(false); }
//条目数据位置信息 private bool DealEntryDataPos() { if (_waitToDealBytes.Count > (_ifsFile.EntryCount * 4)) { byte[] poss = _waitToDealBytes.GetRange(0, _ifsFile.EntryCount * 4).ToArray(); _dealOffset = 0; for (int i = 0; i < _ifsFile.EntryCount; i++) { IFSEntry entry = _ifsFile.Entrys[i]; int vv = IFSArchiver.ConvertBytesToInt(poss, ref _dealOffset); entry.DataPos = vv; } _waitToDealBytes.RemoveRange(0, _ifsFile.EntryCount * 4); poss = null; return(true); } return(false); }
//处理条目名称 private bool DealEntryNames() { if (_waitToDealBytes.Count > _entryNameLength) { byte[] names = _waitToDealBytes.GetRange(0, _entryNameLength).ToArray(); _dealOffset = 0; //读取条目名称 for (int i = 0; i < _ifsFile.EntryCount; i++) { IFSEntry entry = _ifsFile.Entrys[i]; entry.Name = IFSArchiver.ConvertBytesToString(names, ref _dealOffset); JW.Common.Log.LogD("Get Entry Name:" + entry.Name); } // _waitToDealBytes.RemoveRange(0, _entryNameLength); // names = null; return(true); } return(false); }
private IFSEntry SearchItemInActualIndex(IFSEntry entry) { if (actualTaskChunks == null) { actualTaskChunks = taskIndex.ReadAllChunks(); } foreach (BChunk taskChunk in actualTaskChunks) { //if(entry.OriginalFullPath.IndexOf(taskChunk.RootDriveName) <0) continue; //Console.WriteLine ("SearchItemInActualIndex() : searching "+entry.OriginalFullPath+" in chunk "+taskChunk.Name+", chunk path="+taskChunk.RootDriveName); foreach (IFSEntry chunkEntry in taskChunk.Files) { //Console.WriteLine (entry.OriginalFullPath+" == "+chunkEntry.OriginalFullPath+" ??"); if (chunkEntry.OriginalFullPath == entry.OriginalFullPath) { return(chunkEntry); } } } return(null); }
/// <summary> /// Gets the next incremental/diff entry to backup. To efficiently reuse existing code, we instanciate /// a "phantom" BackupRootDriveHandler an use its "GetFull()" FS items provider as an items source to check /// for modifications. /// </summary> /// <returns> /// and IFSEntry /// </returns> /// <param name='path'> /// Path. /// </param> /// <param name='snapshottedPath'> /// Snapshotted path. /// </param> public IEnumerable <IFSEntry> GetNextEntry(BasePath path, string snapshottedPath) { idsToWatch = new List <long>(); fsProv = FSEnumeratorProvider.GetFSEnumeratorProvider().GetFSEnumerator(snapshottedPath); phantomBrd = new BackupRootDriveHandler(rootDrive, this.taskId, 0, 0, 0, BackupLevel.Full, 0, 0, 0); phantomBrd.SetCurrentPath(path); phantomBrd.SubCompletionEvent += new BackupRootDriveHandler.SubCompletionHandler(BubbleUpSubCompletion); bool moveRefEnumerator = true; IFSEntry refEntry = null; IFSEntry realRefEntry = null; foreach (IFSEntry entry in phantomBrd.GetFull(true)) { Console.WriteLine("PhantomBrd current entry : " + entry.Name); // New File (inode/id > last refMaxId known inode/id) if (entry.ID > refMaxId /*|| entry.CreateTime > refBackupTimeStart*/) { Console.WriteLine("Incremental GetFilesToBackup() added NEW entry : " + entry.SnapFullPath); yield return(entry); continue; } //try{ //TOREMOVE if (moveRefEnumerator || refEntry == null) { if (idxEnumerator.MoveNext()) // try to position to the same entry, previously backuped /*if(refEntry != null && refEntry.ID == idxEnumerator.Current.ID){ * while(idxEnumerator.Current.ID == refEntry.ID) // in case we meet BigFiles (multiple same id entries), loop. * if(!idxEnumerator.MoveNext()) * break; * }*/ //else { refEntry = idxEnumerator.Current; Console.WriteLine("Ref entry : " + refEntry.Name); } else { Console.WriteLine("CANNOT MoveNext() -- last entry is " + refEntry.Name); } //firstMove = false; } else { moveRefEnumerator = true; Console.WriteLine("moveRef = false"); } /*} * catch(Exception e){ // TOREMOVE * Console.WriteLine ("Incremental GetFilesToBackup() ERROR : "+e.ToString()); * }*/ //Console.WriteLine("Incremental GetFilesToBackup() ------- Refentry id="+refEntry.ID+", name="+refEntry.Name+", curid="+entry.ID+", curname="+entry.Name); //Console.WriteLine ("RefEntry: "+refEntry.ToString()+", curEntry: "+entry.ToString()); //if(refEntry.ID == entry.ID) // Console.WriteLine("Incremental GetFilesToBackup() entry "+entry.SnapFullPath+" __MATCHED__ in ref backup"); if (refEntry.ID != entry.ID) { Console.WriteLine("Incremental cur entry " + entry.ToString() + " DOESN'T_MATCH ref = " + refEntry.ToString()); long refItemPos = 0; // new file potentially reusing an inode/ID number < ref max ID if (entry.CreateTime > refBackupTimeStart || entry.LastModifiedTime > refBackupTimeStart) { moveRefEnumerator = false; yield return(entry); continue; } //other cases : File RenamedOrMovedItem RenamedOrMovedItem deleted realRefEntry = refIndex.SearchItem(entry, rootDrive.SystemDrive.MountPoint, out refItemPos); if (realRefEntry == null) { moveRefEnumerator = false; yield return(entry); continue; } // check if entry has been moved from outside to the current dir else { refEntry = realRefEntry; if (refEntry.ParentID != entry.ParentID) // moved { moveRefEnumerator = false; //entry.ChangeStatus = //continue; } } } //refEntry = srch; // Entry already existed under the same id. if it also has the same name, continue checks. // else, it may have been (1)renamed, or (2)deleted + inode reused for new file. // (1) it it safe to assume it as only renamed if lastmetadata has changed but data hasn't. // (2) if oldname!=newname, and lastwritetime has change, we cannot decide what happened. Consider // it as a new entry, for safety. //Console.WriteLine("\t Search found matching ID, ref name="+srch.Name); /*if(srch.Name != entry.Name){ * Console.WriteLine("Incremental GetFilesToBackup() added RENAMED entry : "+entry.SnapFullPath); * entry.BlockMetadata.BlockMetadata.Add(new RenamedOrMovedItem()); * yield return entry; * continue; * }*/ // Existing entry with modified data since ref backup // lastmod < ref lastmod : rename(?). lastmod < ref lastmod : entry data modified if (entry.LastModifiedTime != /*refBackupTimeStart*/ refEntry.LastModifiedTime || entry.FileSize != refEntry.FileSize) { Console.WriteLine("Incremental GetFilesToBackup() added MODIFIED (LastModifiedTime) entry : " + entry.SnapFullPath + ", entry.LastModifiedTime=" + entry.LastModifiedTime + ",refEntry.LastModifiedTime=" + refEntry.LastModifiedTime); entry.ChangeStatus = DataLayoutInfos.HasChanges; yield return(entry); continue; } else if (entry.LastMetadataModifiedTime > refEntry.LastMetadataModifiedTime) { entry.ChangeStatus = DataLayoutInfos.MetadaOnly; // moved entry //if(srch.ParentID != entry.ParentID || srch.Name != entry.Name){ //entry.BlockMetadata.BlockMetadata.Add(new RenamedOrMovedItem()); //entry.ChangeStatus = DataLayoutInfos.MetadaOnly; // .RenameOnly; //} //else // entry.BlockMetadata.BlockMetadata.Add(new UnchangedDataItem()); Console.WriteLine("Incremental GetFilesToBackup() added METADATACHANGE (LastMetadataModifiedTime) entry : " + entry.SnapFullPath); yield return(entry); continue; } // if we get there, entry hasn't changed. //Console.WriteLine("Incremental GetFilesToBackup UNCHANGED "+entry.Name); // } // If we get there, FS entry didn't change since last backup. // But if we are asked to perform a full refresh, return it anyway, with appropriate ChangeFlag entry.ChangeStatus = DataLayoutInfos.NoChange; yield return(entry); } // // end foreach phantomBrdh Console.WriteLine("FileCompareProvider GetNextEntry : end foreach"); // Fs has been enumerated. Now check if some ref index IDs remain unfound (== present inside idsToWatch) // If so, either they have been deleted, or they have been moved out of scope (out of FS backup root directory) // Anyways, we tag them as 'deleted', as, from the backup's root folder point of view, they are not part of // the backup anymore. if (idsToWatch.Count > 0) { Logger.Append(Severity.DEBUG, idsToWatch.Count + " entries seem to have been deleted."); } foreach (long id in idsToWatch) { long useless = 0; IFSEntry deleted = refIndex.SearchItem(id, rootDrive.SystemDrive.MountPoint, out useless); if (deleted != null) { Console.WriteLine("\t entry " + id + " has been deleted, was " + deleted.Name); deleted.ChangeStatus = DataLayoutInfos.Deleted; yield return(deleted); } else { Console.WriteLine("\t entry " + id + " has NOT BEEN DELETED - ERROR!!!"); } } yield break; }
/// <summary> /// Gets the next incremental/diff entry to backup. To efficiently reuse existing code, we instanciate /// a "phantom" BackupRootDriveHandler an use its "GetFull()" FS items provider as an items source to check /// for modifications. /// </summary> /// <returns> /// and IFSEntry /// </returns> /// <param name='path'> /// Path. /// </param> /// <param name='snapshottedPath'> /// Snapshotted path. /// </param> public IEnumerable <IFSEntry> GetNextEntry(BasePath path, string snapshottedPath) { idsToWatch = new List <long>(); fsProv = FSEnumeratorProvider.GetFSEnumeratorProvider().GetFSEnumerator(snapshottedPath); phantomBrd = new BackupRootDriveHandler(rootDrive, this.taskId, 0, 0, 0, BackupLevel.Full, 0, 0, 0); phantomBrd.SetCurrentPath(path); phantomBrd.SubCompletionEvent += new BackupRootDriveHandler.SubCompletionHandler(BubbleUpSubCompletion); bool moveRefEnumerator = true; IFSEntry refEntry = null; IFSEntry realRefEntry = null; foreach (IFSEntry entry in phantomBrd.GetFull(true)) { Console.WriteLine("PhantomBrd current entry : " + entry.Name); // New File (inode/id > last refMaxId known inode/id) if (entry.ID > refMaxId /*|| entry.CreateTime > refBackupTimeStart*/) { Console.WriteLine("Incremental GetFilesToBackup() added NEW entry : " + entry.SnapFullPath); yield return(entry); continue; } try{ //TOREMOVE if (moveRefEnumerator || refEntry == null) { if (idxEnumerator.MoveNext()) // try to position to the same entry, previously backuped /*if(refEntry != null && refEntry.ID == idxEnumerator.Current.ID){ * while(idxEnumerator.Current.ID == refEntry.ID) // in case we meet BigFiles (multiple same id entries), loop. * if(!idxEnumerator.MoveNext()) * break; * }*/ //else { refEntry = idxEnumerator.Current; Console.WriteLine("Ref entry : " + refEntry.Name); } else { Console.WriteLine("CANNOT MoveNext() -- last entry is " + refEntry.Name); } //firstMove = false; } else { moveRefEnumerator = true; Console.WriteLine("moveRef = false"); } } catch (Exception e) { // TOREMOVE Console.WriteLine("Incremental GetFilesToBackup() ERROR : " + e.ToString()); } //Console.WriteLine("Incremental GetFilesToBackup() ------- Refentry id="+refEntry.ID+", name="+refEntry.Name+", curid="+entry.ID+", curname="+entry.Name); //Console.WriteLine ("RefEntry: "+refEntry.ToString()+", curEntry: "+entry.ToString()); //if(refEntry.ID == entry.ID) // Console.WriteLine("Incremental GetFilesToBackup() entry "+entry.SnapFullPath+" __MATCHED__ in ref backup"); if (refEntry.ID != entry.ID) { Console.WriteLine("Incremental cur entry " + entry.ToString() + " DOESN'T_MATCH ref = " + refEntry.ToString()); long refItemPos = 0; // new file potentially reusing an inode/ID number < ref max ID if (entry.CreateTime > refBackupTimeStart || entry.LastModifiedTime > refBackupTimeStart) { moveRefEnumerator = false; yield return(entry); continue; } //other cases : File RenamedOrMovedItem RenamedOrMovedItem deleted realRefEntry = refIndex.SearchItem(entry, rootDrive.SystemDrive.MountPoint, out refItemPos); if (realRefEntry == null) { moveRefEnumerator = false; yield return(entry); continue; } // check if entry has been moved from outside to the current dir else { refEntry = realRefEntry; if (refEntry.ParentID != entry.ParentID) // moved { moveRefEnumerator = false; //entry.ChangeStatus = //continue; } } /*// Check if entry is a newly created file (ctime > last backup start time) but with an ''old'' mtime * if(refIndex.SearchItem(entry, rootDrive.SystemDrive.MountPoint, out searchRowid) == null * && entry.LastMetadataModifiedTime > refBackupTimeStart){ * Console.WriteLine("Found (new?) entry with reused ID : "+entry.ToString()); * * }*/ // else check if refEntry has been deleted /*else{ * Console.WriteLine("Incremental GetFilesToBackup() entry "+refEntry.ToString()+" __DELETED__"); * refEntry.ChangeStatus = DataLayoutInfos.Deleted; * yield return refEntry; * continue; * * }*/ /*if(idsToWatch.Contains(entry.ID)){ // we found it! is was simply moved * Logger.Append (Severity.DEBUG2, "Found wanted entry "+entry.ID+", "+entry.SnapFullPath); * for(int i=idsToWatch.Count-1; i==0; i--) * if(idsToWatch[i] == entry.ID) * idsToWatch.RemoveAt(i); * }*/ //Console.WriteLine("Incremental GetFilesToBackup() entry "+entry.SnapFullPath+"("+entry.ID+") DOES NOT MATCH, got "+refEntry.Name+" ("+refEntry.ID+")"); /*long searchRowid = 0; * if((refEntry = refIndex.SearchItem(entry, rootDrive.SystemDrive.MountPoint, out searchRowid)) != null){ * long fsToIndexOffset = searchRowid - refIndex.RowId; * Console.WriteLine("Incremental GetFilesToBackup() Found ref entry "+entry.SnapFullPath+" at __OFFSET__="+(searchRowid - refIndex.RowId)); * if( fsToIndexOffset < 100){ * * for (int j = (int)fsToIndexOffset; j >0; j--){ * if(idxEnumerator.MoveNext()) * idsToWatch.Add(idxEnumerator.Current.ID); * } * } * //if current FS entry and ref entry mismatch, but current FS entry exists in ref index , * // we put this ref entry on the 'to watch' list : * // It may indicate that ref has been deleted, or, if we meet this 'watched' id during backup, that * // the item has been moved * else{ * //idsToWatch.Add(refEntry.ID); * idxEnumerator.Dispose(); * idxEnumerator = refIndex.GetBaseItemsEnumerator(rootDrive.SystemDrive.OriginalMountPoint, refIndex.RowId).GetEnumerator(); * idxEnumerator.MoveNext(); * Console.WriteLine ("moved ref index enumerator to new root "+idxEnumerator.Current.ID+", name="+idxEnumerator.Current.Name); * } * }*/ /*else{ // new entry reusing "old" inode number/ID * Console.WriteLine("\t NOT found matching ID for name="+entry.Name); * yield return entry; * // this new entry could explain the offset we get between current fs and ref index * moveRefEnumerator = false; * continue; * }*/ } //refEntry = srch; // Entry already existed under the same id. if it also has the same name, continue checks. // else, it may have been (1)renamed, or (2)deleted + inode reused for new file. // (1) it it safe to assume it as only renamed if lastmetadata has changed but data hasn't. // (2) if oldname!=newname, and lastwritetime has change, we cannot decide what happened. Consider // it as a new entry, for safety. //Console.WriteLine("\t Search found matching ID, ref name="+srch.Name); /*if(srch.Name != entry.Name){ * Console.WriteLine("Incremental GetFilesToBackup() added RENAMED entry : "+entry.SnapFullPath); * entry.BlockMetadata.BlockMetadata.Add(new RenamedOrMovedItem()); * yield return entry; * continue; * }*/ // Existing entry with modified data since ref backup // lastmod < ref lastmod : rename(?). lastmod < ref lastmod : entry data modified if (entry.LastModifiedTime != /*refBackupTimeStart*/ refEntry.LastModifiedTime || entry.LastMetadataModifiedTime != refEntry.LastMetadataModifiedTime) { Console.WriteLine("Incremental GetFilesToBackup() added MODIFIED (LastModifiedTime) entry : " + entry.SnapFullPath + ", entry.LastModifiedTime=" + entry.LastModifiedTime + ",refEntry.LastModifiedTime=" + refEntry.LastModifiedTime); entry.ChangeStatus = DataLayoutInfos.HasChanges; yield return(entry); continue; } /*else if(entry.LastMetadataModifiedTime > refEntry.LastMetadataModifiedTime){ * entry.ChangeStatus = DataLayoutInfos.MetadaOnly; * // moved entry * //if(srch.ParentID != entry.ParentID || srch.Name != entry.Name){ * //entry.BlockMetadata.BlockMetadata.Add(new RenamedOrMovedItem()); * entry.ChangeStatus = DataLayoutInfos.MetadaOnly; // .RenameOnly; * //} * //else * // entry.BlockMetadata.BlockMetadata.Add(new UnchangedDataItem()); * Console.WriteLine("Incremental GetFilesToBackup() added METADATACHANGE (LastMetadataModifiedTime) entry : "+entry.SnapFullPath); * yield return entry; * continue; * }*/ // if we get there, entry hasn't changed. //Console.WriteLine("Incremental GetFilesToBackup UNCHANGED "+entry.Name); // } // If we get there, FS entry didn't change since last backup. // But if we are asked to perform a full refresh, return it anyway, with appropriate ChangeFlag if (this.isFullRefreshBackup) { entry.ChangeStatus = DataLayoutInfos.NoChange; yield return(entry); } } // // end foreach phantomBrdh Console.WriteLine("FileCompareProvider GetNextEntry : end foreach"); // Fs has been enumerated. Now check if some ref index IDs remain unfound (== present inside idsToWatch) // If so, either they have been deleted, or they have been moved out of scope (out of FS backup root directory) // Anyways, we tag them as 'deleted', as, from the backup's root folder point of view, they are not part of // the backup anymore. if (idsToWatch.Count > 0) { Logger.Append(Severity.DEBUG, idsToWatch.Count + " entries seem to have been deleted."); } foreach (long id in idsToWatch) { long useless = 0; IFSEntry deleted = refIndex.SearchItem(id, rootDrive.SystemDrive.MountPoint, out useless); if (deleted != null) { Console.WriteLine("\t entry " + id + " has been deleted, was " + deleted.Name); deleted.ChangeStatus = DataLayoutInfos.Deleted; yield return(deleted); } else { Console.WriteLine("\t entry " + id + " has NOT BEEN DELETED - ERROR!!!"); } } yield break; }
public IEnumerable <IFSEntry> GetNextEntry(BasePath path, string snapshottedPath) { //Console.WriteLine("Incremental GetFilesToBackup() 1"); fsProv = FSEnumeratorProvider.GetFSEnumeratorProvider().GetFSEnumerator(snapshottedPath); //Console.WriteLine("Incremental GetFilesToBackup() 2"); bool exclude = false; foreach (var item in fsProv) { //Console.WriteLine("Incremental GetFilesToBackup() 3"); IFSEntry entry = null; IFSEntry refEntry = null; try{ entry = prov.GetItem(item); //Console.WriteLine ("FileCompareProvider.GetNextEntry() : "+entry.SnapshottedFullPath); } catch (Exception e) { // permission errors, deleted file... Logger.Append(Severity.ERROR, "Could not add element '" + entry.SnapFullPath + "' to backup : " + e.Message); Logger.Append(Severity.INFO, "TODO : report exception to hub for task logentry"); continue; } //Console.WriteLine("Incremental GetFilesToBackup() 4"); if (entry.ID > refMaxId || entry.CreateTime > refBackupTimeStart) // new File (inode/id > last refMaxId known inode/id) { Console.WriteLine("Incremental GetFilesToBackup() added NEW entry : " + entry.SnapFullPath); yield return(entry); continue; /// @@@@@@@@@@@@@@@ TO REMOVE to allow "if(entry.Kind == FileType.Directory){" to execute???? } try{ /*else */ if (idxEnumerator.MoveNext()) // try to position to the same entry, previously backuped { refEntry = idxEnumerator.Current; } //Console.WriteLine("Incremental GetFilesToBackup() 5"); if (refEntry.ID == entry.ID) { Console.WriteLine("Incremental GetFilesToBackup() entry " + entry.SnapFullPath + " MATCHED in ref backup"); } else { Console.WriteLine("Incremental GetFilesToBackup() entry " + entry.SnapFullPath + "(" + entry.ID + ") DOES NOT MATCH, got " + refEntry.Name + " (" + refEntry.ID + ")"); IFSEntry srch; if ((srch = refIndex.SearchItem(entry, rootDrive.systemDrive.MountPoint)) != null) { Console.WriteLine("\t found matching ID, ref name=" + srch.Name); } else { Console.WriteLine("\t NOT found matching ID, ref name=" + entry.Name); } } } catch (Exception e) { Console.WriteLine(" *** ERROR : " + e.ToString()); } // entry modified (or created using a used-and-freed id) if (entry.LastModifiedTime >= refBackupTimeStart) { //if(entry.Kind == FileType.Directory) //Console.WriteLine("root path for "+entry.FileName+"="+Directory.GetDirectoryRoot(entry.FileName)+", rooted="+Path.IsPathRooted(entry.FileName)); Console.WriteLine("Incremental GetFilesToBackup() added MODIFIED (LastModifiedTime) entry : " + entry.SnapFullPath); yield return(entry); } else if (entry.LastMetadataModifiedTime >= refBackupTimeStart) { // We will do our best to try to find if it's a renamed file // on *nix, a file is renamed if its inode stays the same, but the ctime and filename changes and the mtime does nots // if mtime and ctime change and inode is an already previously existing number, a file might have // been deleted and another created, taking the freed inode number. // This makes impossible for us to detect files renamed AND modifed, or modified AND renamed // on NTFS a file has a unique ID, but it is hard to get from direftory.enumerate (does not return the right structure) // add the file to the "to be checked" list Console.WriteLine("Incremental GetFilesToBackup() : item " + entry.SnapFullPath + " has undergone METADATA (LastMetadataModifiedTime) change"); //itemsToCompare.Add(entry); } // check for new files with metadata dates in the past (such as packages installations...) //else if(entry.ID > if (entry.Kind == FileType.Directory) { //if(depth <2) //Console.WriteLine ("FileCompareProvider.GetNextEntry() : entering "+entry.SnapshottedFullPath); for (int i = path.ExcludedPaths.Count - 1; i >= 0; i--) { if (entry.SnapFullPath.IndexOf(path.ExcludedPaths[i]) == 0) { Logger.Append(Severity.INFO, "Ignoring path " + entry.SnapFullPath); path.ExcludedPaths.RemoveAt(i); exclude = true; //yield return entry; break; } } depth++; if (depth == 1) { //if(SubCompletionEvent != null) SubCompletionEvent(entry.SnapshottedFullPath.Replace(snapshottedPath, currentPath.Path)); //entry.FileName.Replace(snapshottedPath, currentPath } //snapshottedPath = entry.SnapshottedFullPath; if (!exclude) { // recurse using found directory as basepath snapshottedPath = entry.SnapFullPath; yield return(entry); // return top-dir before entering and browse it foreach (IFSEntry e in GetNextEntry(path, snapshottedPath)) { yield return(e); } } depth--; } //yield return entry; exclude = false; //} } //Logger.Append(Severity.DEBUG, "Done scanning file system, performing index compare..."); //IndexCompare(); }
internal void CreateSyntheticFullIndex(long referenceTask, long task) { BackupIndex refIndex = null; BackupIndex mergeIndex = null; try{ //Console.WriteLine ("CreateSyntheticFullIndex(): 1"); refIndex = new BackupIndex(); //Console.WriteLine ("CreateSyntheticFullIndex(): 2"); mergeIndex = new BackupIndex(task); //Console.WriteLine ("CreateSyntheticFullIndex(): 3"); refIndex.OpenByTaskId(referenceTask); Logger.Append(Severity.DEBUG, "Opened reference index " + referenceTask + "..."); //Console.WriteLine ("CreateSyntheticFullIndex(): 4"); taskIndex.OpenByTaskId(task); Console.WriteLine("CreateSyntheticFullIndex() : opened indexes"); if (refIndex.Header.TaskId != referenceTask) { Logger.Append(Severity.ERROR, "Reference index doesn't handle expected task (wanted " + referenceTask + ", got " + refIndex.Header.TaskId); return; } // synthetic index will have the header of the just-ended task mergeIndex.Header = taskIndex.Header; mergeIndex.WriteHeaders(); BChunk refChunk; //taskChunk = taskIndex.ReadChunk(); // walk the reference (synthetic) index and merge changes from the new backup index. while ((refChunk = refIndex.ReadChunk()) != null) { Console.WriteLine("CreateSFI() : reading chunk " + refChunk.Name + ", " + refChunk.Files.Count + " items"); for (int i = refChunk.Files.Count - 1; i >= 0; i--) { //foreach(IFile item in refChunk.Files){ //Console.WriteLine("CreateSFI() : chunk "+refChunk.Name+", item "+item.OriginalFullPath+", type="+item.Kind); IFSEntry newEntry = SearchItemInActualIndex(refChunk.Files[i]); if (newEntry != null) { Console.WriteLine("CreateSFI() : found updated entry " + newEntry.OriginalFullPath); refChunk.Files.RemoveAt(i); } } if (refChunk.Files.Count > 0) { mergeIndex.AddChunk(refChunk); } } foreach (BChunk newChunk in actualTaskChunks) { mergeIndex.AddChunk(newChunk); } } catch (Exception e) { Logger.Append(Severity.ERROR, "Error creating synthetic full index : " + e.Message + " ---- " + e.StackTrace); } finally{ try{ refIndex.Terminate(); taskIndex.Terminate(); mergeIndex.Terminate(); } catch (Exception e) { // harmless for backup and indexes consistency, but will leave open files descriptors. // However this case should not happen in real-life Logger.Append(Severity.ERROR, "Error closing indexes : " + e.Message); } } }
internal IEnumerable <IFSEntry> GetFull(bool rootCall) { if (snapshottedPath == null) { throw new ArgumentNullException("Current path is null, ensure SetCurrentPath() has been called"); } bool exclude = false; // before enumerating sub-entries, add root path itself to the backup if (rootCall) { IFSEntry basePathEntry = null; try{ Console.WriteLine("brdh GetFilesToBackup() 1: rootCall entry. snapshottedPath=" + snapshottedPath); basePathEntry = prov.GetItemByPath(snapshottedPath); parentId = basePathEntry.ID; Console.WriteLine("brdh GetFilesToBackup() 2: rootCall entry ID=" + basePathEntry.ID); } catch (FileNotFoundException fnf) { Logger.Append(Severity.WARNING, "Basepath '" + snapshottedPath + "' doesn't exist (" + fnf.Message + ")"); if (LogEvent != null) { LogEvent(this, new LogEventArgs(911, Severity.WARNING, snapshottedPath)); } } catch (Exception e) { Logger.Append(Severity.WARNING, "Basepath '" + snapshottedPath + "' couldn't be opened (" + e.Message + ")"); if (LogEvent != null) { LogEvent(this, new LogEventArgs(911, Severity.WARNING, snapshottedPath)); } /*Logger.Append (Severity.ERROR, "Cannot open basepath '"+snapshottedPath+"' : "+e.Message); * throw;*/ } if (basePathEntry != null) { yield return(basePathEntry); } } //Console.WriteLine ("brdh GetFilesToBackup() 3: before GetFSEnumerator "); fsprov = fsEnumerator.GetFSEnumerator(snapshottedPath); //Console.WriteLine ("brdh GetFilesToBackup() 4: after GetFSEnumerator"); foreach (var backupItem in fsprov) { IFSEntry entry = null; try{ entry = prov.GetItem(backupItem); //if(!rootCall) entry.ParentID = parentId; // WRONG! if (entry == null) { continue; } } catch (Exception e) { // permission errors, deleted file... try{ Logger.Append(Severity.WARNING, "Could not add element '" + entry.SnapFullPath + "' to backup : " + e.Message); if (LogEvent != null) { LogEvent(this, new LogEventArgs(912, Severity.WARNING, entry.SnapFullPath)); } } catch (Exception) { Logger.Append(Severity.WARNING, "Could not add element (with unknown name) in folder " + snapshottedPath + " to backup : " + e.Message); if (LogEvent != null) { LogEvent(this, new LogEventArgs(912, Severity.WARNING, "<UNKNOWN>")); } } continue; } if (entry.Kind == FileType.Directory) { //parentId = entry.ID; //if(depth <2) Console.WriteLine ("GetFull() : entering "+entry.SnapshottedFullPath); for (int i = currentPath.ExcludedPaths.Count - 1; i >= 0; i--) { //Console.WriteLine ("GetFull() : checking if excluded path '"+currentPath.ExcludedPaths[i]+"' matches..."); if (entry.SnapFullPath.IndexOf(currentPath.ExcludedPaths[i]) == 0) { Logger.Append(Severity.INFO, "Ignoring path " + entry.SnapFullPath); currentPath.ExcludedPaths.RemoveAt(i); exclude = true; //yield return entry; break; } } depth++; if (depth == 2) { //if(SubCompletionEvent != null) SubCompletionEvent(entry.SnapFullPath.Replace(snapshottedPath, currentPath.Path)); if (SubCompletionEvent != null) { SubCompletionEvent(currentPath.Path); } //entry.FileName.Replace(snapshottedPath, currentPath } snapshottedPath = entry.SnapFullPath; if (!exclude) { foreach (IFSEntry e in GetFull(false)) { //e.ParentID = parentId; e.ParentID = entry.ID; yield return(e); } } depth--; } //entry.ParentID = parentId; yield return(entry); exclude = false; } yield break; }
internal IEnumerable <BChunk> GetNextChunk() { chunkOrder++; BChunk chunk = new BChunk(this.TaskId); chunk.Order = chunkOrder; chunk.RootDriveId = this.backupRootDrive.ID; uint filePosInChunk = 0; long currentSize = 0; while (itemIterator.MoveNext()) { //foreach(IFSEntry ent in GetFilesToBackup()){ IFSEntry ent = itemIterator.Current; if (ent == null) { continue; } // 1st case : we can add more files to the chunk if (ent.FileSize < maxChunkSize) { //try{ filePosInChunk += (uint)ent.FileSize; //IFSEntry f = ent; ent.ChunkStartPos = filePosInChunk; ent.FileStartPos = 0; chunk.Add(ent); currentSize += ent.FileSize; //Console.WriteLine("GetNextChunk() : added new file to chunk - "+itemIterator.Current.FileName); //} //catch(Exception e){ // Logger.Append(Severity.ERROR, "Could not add file "+itemIterator.Current.SnapFullPath+" : "+e.Message); //} } //2nd case : a file is too big to fit into one chunk, split it else { if (chunk.Items.Count > 0) { yield return(chunk); } /*chunk = new BChunk(currentPath.Path, snapshottedPath, this.TaskId); * chunk.Order = chunkOrder; * chunk.RootDriveId = this.backupRootDrive.ID; * filePosInChunk = 0; * currentSize = 0;*/ foreach (BChunk bigFileChunk in GetBigFileChunks(itemIterator.Current, filePosInChunk)) { yield return(bigFileChunk); } chunkOrder++; chunk = new BChunk(this.TaskId); chunk.Order = chunkOrder; chunk.RootDriveId = this.backupRootDrive.ID; filePosInChunk = 0; currentSize = 0; } // 3rd case : if a chunk reaches its max packSize, we create another one if (currentSize > maxChunkSize || chunk.Items.Count > 0 && currentSize > maxPackSize /*|| currentSize == 0 && chunk.Files.Count ==0 */ || chunk.Items.Count > maxChunkFiles) { //Console.WriteLine("GetNextChunk() : chunk reached max files or max size:currentsize="+currentSize+"/"+maxChunkSize // +",chunkfilescount="+chunk.Files.Count+"/"+maxChunkFiles); yield return(chunk); chunkOrder++; chunk = new BChunk(this.TaskId); chunk.Order = chunkOrder; chunk.RootDriveId = this.backupRootDrive.ID; filePosInChunk = 0; currentSize = 0; } } //4th case : // done processing file list but chunk not complete Logger.Append(Severity.TRIVIA, "GetNextChunk() : Done gathering files inside '" + snapshottedPath + "' without reaching chunk max size. " + chunk.Items.Count + " files, " + currentSize / 1024 + "k"); //if(currentSize > 0){ yield return(chunk); //itemIterator = GetFilesToBackup().GetEnumerator(); //yield break; //} }
internal void Add(IFSEntry bf) { entries.Add(bf); }