private void ContinueProducing() { doneBdhProducers++; if (brdEnumerator.MoveNext()) { bdhProducersCount++; BackupRootDrive brdn = (BackupRootDrive)brdEnumerator.Current; var continuedProducer = System.Threading.Tasks.Task.Factory.StartNew(() => { Produce(brdn); }, TaskCreationOptions.LongRunning); } else { areBrdDone = true; } if (areBrdDone && (doneBdhProducers >= bdhProducersCount)) { if (!cancellationTokenSource.IsCancellationRequested) { Logger.Append(Severity.INFO, "Done gathering items to backup."); } chunkBuilderFeed.CompleteAdding(); return; } }
internal void Run() { areBrdDone = false; User.SendPut(backup.TaskId, -1, (int)backup.Parallelism.Value); brdEnumerator = backup.RootDrives.GetEnumerator(); User.StorageSessionReceivedEvent += new User.StorageSessionReceivedHandler(this.SessionReceived); for (int i = 0; i < backup.Parallelism.Value; i++) { if (!brdEnumerator.MoveNext()) { areBrdDone = true; break; } BackupRootDrive brd = (BackupRootDrive)brdEnumerator.Current; bdhProducersCount++; var producer = System.Threading.Tasks.Task.Factory.StartNew(() => { Produce(brd); }, TaskCreationOptions.LongRunning); // WRONG : split paths first producer.ContinueWith(o => UnexpectedError(producer), TaskContinuationOptions.OnlyOnFaulted); } // Start indexer task var indexer = System.Threading.Tasks.Task.Factory.StartNew(() => { DoIndex(); }, cancellationTokenSource.Token, TaskCreationOptions.None, TaskScheduler.Default); indexer.ContinueWith(o => UnexpectedError(indexer), TaskContinuationOptions.OnlyOnFaulted); indexer.ContinueWith(o => ProcessIndex(), TaskContinuationOptions.OnlyOnRanToCompletion /*| TaskContinuationOptions.NotOnFaulted| TaskContinuationOptions.NotOnCanceled*/); /*System.Threading.Tasks.Task.Factory.ContinueWhenAll(producers, z=>{ * * });*/ }
/*internal static IIncrementalProvider GetProviderByName(string name){ * return selectablePlugins[name].Value; * }*/ internal static IIncrementalProvider GetProviderByPriority(BackupRootDrive rd, Dictionary <string, byte[]> initMetadata) { if (initMetadata == null || initMetadata.Count == 0) { return(null); } IIncrementalProvider iip = null; //new FileCompareProvider(referenceBackupStart, referenceBackupEnd, refTaskId); //iip = from plugins where plugins.ContainsKey(rd.systemDrive.MountPoint) and p int maxPrio = 0; foreach (KeyValuePair <string, IIncrementalProvider> prov in selectablePlugins) { if (prov.Key == rd.SystemDrive.OriginalMountPoint && prov.Value.Priority > maxPrio) { Logger.Append(Severity.DEBUG, "Testing incr provider '" + prov.Value.Name + "' for drive '" + rd.SystemDrive.OriginalMountPoint + "'"); foreach (KeyValuePair <string, byte[]> kp in initMetadata) { Logger.Append(Severity.TRIVIA, "Got reference metadata for provider " + kp.Key); } if (initMetadata.ContainsKey(prov.Value.Name)) { prov.Value.SetReferenceMetadata(initMetadata[prov.Value.Name]); } if (prov.Value.CheckCapability()) { iip = prov.Value; maxPrio = prov.Value.Priority; Logger.Append(Severity.INFO, "Incremental provider " + prov.Value.Name + " with priority " + prov.Value.Priority + " is selectable for this task."); } else { Logger.Append(Severity.INFO, "Incremental provider " + prov.Value.Name + " with priority " + prov.Value.Priority + " is NOT selectable for this task."); } } } /*if(Utilities.PlatForm.IsUnixClient()){ * FileCompareProvider fcp = new FileCompareProvider(referenceBackupStart, referenceBackupEnd, refTaskId, rd); * if(fcp.CheckCapability()) iip = fcp; * } * else{ * if(usnp == null) throw new Eception("Provider USNJournalProvider has not been initialized, call PrepareProvidersForBackup first"); * else { * //UsnJournalProvider usn = new UsnJournalProvider(bType, rd, referenceBackupStart, referenceBackupEnd); * //UsnJournalProvider usn = new UsnJournalProvider(rd, referenceBackupStart, referenceBackupEnd); * if(usnp.CheckCapability()) iip = usn; * * } * }*/ if (iip != null) { Logger.Append(Severity.INFO, "Incremental/Differential provider " + iip.GetType().ToString() + ", priority " + iip.Priority + " choosen."); } return(iip); }
/// <summary> /// One 'Produce' task generates chunks for one BackupRootDrive (ie 1 mountpoint/filesystem). /// </summary> /// <param name='bdr'> /// the BackupRootDrive to scan for items /// </param> private void Produce(Queue <BackupRootDrive> queue /*, BlockingCollection<BChunk> myFeed*/) { BlockingCollection <BChunk> myFeed = new BlockingCollection <BChunk>(new ConcurrentQueue <BChunk>(), 1); Console.WriteLine(" ------- Producer() has " + queue.Count + " drive items in its queue"); chunksFeeds.Add(myFeed); //IEnumerator<BChunk> chunkEnumerator = bdh.GetNextChunk().GetEnumerator();//backup.GetNextChunk().GetEnumerator(); while (queue.Count > 0) { BackupRootDrive bdr = queue.Dequeue(); Logger.Append(Severity.INFO, "Collecting items to backup for drive " + bdr.SystemDrive.MountPoint); BackupRootDriveHandler bdh = new BackupRootDriveHandler(bdr, this.backup.Id, backup.BackupSet.MaxChunkSize, backup.BackupSet.MaxChunkSize, backup.BackupSet.MaxChunkFiles, backup.Level, backup.RefStartDate, backup.RefEndDate, backup.ParentTrackingId); bdh.LogEvent += LogReceived; bdh.SubCompletionEvent += new BackupRootDriveHandler.SubCompletionHandler(IncrementSubCompletion); foreach (P2PBackup.Common.BasePath baseP in bdr.Paths) { bdh.SetCurrentPath(baseP); IEnumerator <BChunk> chunkEnumerator = bdh.GetNextChunk().GetEnumerator(); while (chunkEnumerator.MoveNext() && !cancellationTokenSource.IsCancellationRequested) { BChunk chunk = chunkEnumerator.Current; try{ myFeed.Add(chunk, cancellationTokenSource.Token); } catch (OperationCanceledException) { Logger.Append(Severity.TRIVIA, "Producer has been manually cancelled on purpose, stopping..."); return; } catch (Exception e) { Logger.Append(Severity.ERROR, "###################### Produce() : add refused : " + e.Message + " ---- " + e.StackTrace); return; } // stats foreach (IFSEntry item in chunk.Items) { backup.ItemsByType[(int)item.Kind]++; } Logger.Append(Severity.DEBUG, "Basepath " + baseP.Path + " : Added chunk " + chunk.Name + " containing " + chunk.Items.Count + " items "); } } bdh.SubCompletionEvent -= new BackupRootDriveHandler.SubCompletionHandler(IncrementSubCompletion); bdh.LogEvent -= LogReceived; if (!cancellationTokenSource.IsCancellationRequested) { ContinueProducing(); } else { bdh.Dispose(); } } Console.WriteLine("------------------------- PRODUCE(): done collecting ALL, complete feed adding, cancellationTokenSource.IsCancellationRequested=" + cancellationTokenSource.IsCancellationRequested); myFeed.CompleteAdding(); }
internal FileCompareProvider(long task, BackupRootDrive rd) { this.rootDrive = rd; this.taskId = task; prov = ItemProvider.GetProvider(); //Logger.Append(Severity.INFO, "Gathering changed items since "+refBackupStart.ToString()); //depth = 0; /*Index refIndex = new Index(); * refIndex.Open(refTaskId); * refMaxId = refIndex.GetMaxId(rd.systemDrive.MountPoint); * refTaskEnumerator = refIndex.GetItemsEnumerator(rd.systemDrive.MountPoint).GetEnumerator();*/ }
/// <summary> /// One 'Produce' task generates chunks for one BackupRootDrive (ie 1 mountpoint). /// </summary> /// <param name='bdr'> /// the BackupRootDrive to scan for items /// </param> private void Produce(BackupRootDrive bdr) { Logger.Append(Severity.INFO, "Collecting items to backup for drive " + bdr.SystemDrive.MountPoint); BackupRootDriveHandler bdh = new BackupRootDriveHandler(bdr, this.backup.TaskId, backup.MaxChunkSize, backup.MaxChunkSize, backup.MaxChunkFiles, backup.Level, backup.RefStartDate, backup.RefEndDate, backup.RefTaskId); bdh.LogEvent += LogReceived; bdh.SubCompletionEvent += new BackupRootDriveHandler.SubCompletionHandler(IncrementSubCompletion); //IEnumerator<BChunk> chunkEnumerator = bdh.GetNextChunk().GetEnumerator();//backup.GetNextChunk().GetEnumerator(); foreach (P2PBackup.Common.BasePath baseP in bdr.Paths) { bdh.SetCurrentPath(baseP); IEnumerator <BChunk> chunkEnumerator = bdh.GetNextChunk().GetEnumerator(); while (chunkEnumerator.MoveNext() && !cancellationTokenSource.IsCancellationRequested) { BChunk chunk = chunkEnumerator.Current; try{ chunkBuilderFeed.Add(chunk, cancellationTokenSource.Token); } catch (OperationCanceledException) { Logger.Append(Severity.DEBUG2, "Producer has been manually cancelled on purpose, stopping..."); return; } catch (Exception e) { Logger.Append(Severity.ERROR, "###################### Produce() : add refused : " + e.Message + " ---- " + e.StackTrace); return; } // stats foreach (IFSEntry item in chunk.Files) { backup.ItemsByType[(int)item.Kind]++; } Logger.Append(Severity.DEBUG, "Added chunk " + chunk.Name + " containing " + chunk.Files.Count + " items "); } } bdh.SubCompletionEvent -= new BackupRootDriveHandler.SubCompletionHandler(IncrementSubCompletion); bdh.LogEvent -= LogReceived; Logger.Append(Severity.INFO, "Producer has done collecting items to backup for drive " + bdr.SystemDrive.MountPoint); if (!cancellationTokenSource.IsCancellationRequested) { ContinueProducing(); } else { bdh.Dispose(); } }
internal FileCompareProvider_old(long task, BackupRootDrive rd) { this.rootDrive = rd; this.taskId = task; prov = ItemProvider.GetProvider(); //Logger.Append(Severity.INFO, "Gathering changed items since "+refBackupStart.ToString()); //itemsToCompare = new List<IFile>(); depth = 0; // get the full/synth index of reference backup, to check for renames/deletions/creations /*Index refIndex = new Index(); * refIndex.Open(refTaskId); * refMaxId = refIndex.GetMaxId(rd.systemDrive.MountPoint); * refTaskEnumerator = refIndex.GetItemsEnumerator(rd.systemDrive.MountPoint).GetEnumerator();*/ }
internal BackupRootDriveHandler(BackupRootDrive brd, long taskId, long maxChkSize, long maxPack, int maxChunkFiles, P2PBackup.Common.BackupLevel bType, long refStartDate, long refEndDate, long referenceTaskid /*, long refMaxId*/) { backupRootDrive = brd; maxChunkSize = maxChkSize; maxPackSize = maxPack; this.maxChunkFiles = maxChunkFiles; nbItems = 0; this.backupType = bType; this.depth = 0; this.subCompletionNb = 0; this.completionBase = 0; this.refStartDate = refStartDate; this.refEndDate = refEndDate; this.referenceTaskid = referenceTaskid; this.MaxItemID = 0; //refMaxId; this.TaskId = taskId; //if(path.Type == BasePath.PathType.FS) //itemIterator = GetFilesToBackup().GetEnumerator(); prov = ItemProvider.GetProvider(); fsEnumerator = FSEnumeratorProvider.GetFSEnumeratorProvider(); }
/// <summary> /// Signals all providers that a backup is going to occur. /// Used and unused providers can then gather their metadata, that we will store into index for later use. /// For example, Usnjournal provider will give us USN journal ID ans position (last usn record number) allowing to perform /// subsequent incr backup based on these reference IDs. /// </summary> /// <returns> /// a list of providername-metadata pairs (name-drivename-metadata) /// </returns> /// <param name='path'> /// Path. /// </param> internal static Dictionary <string, byte[]> SignalBackupBegin(long taskId, BackupRootDrive rd) { Dictionary <string, byte[]> provMetadataPairs = new Dictionary <string, byte[]>(); try{ #if OS_WIN if (!Utilities.PlatForm.IsUnixClient()) { UsnJournalProvider usnp = new UsnJournalProvider(taskId, rd); usnp.SignalBackup(); //if(usnp.CheckCapability()){ provMetadataPairs.Add(usnp.Name, usnp.GetMetadata()); selectablePlugins.Add(new KeyValuePair <string, IIncrementalProvider>(rd.SystemDrive.OriginalMountPoint, usnp)); //} //else // re-set it to null, ,as we won't use it //usnp = null; } #endif #if OS_UNIX /*else{ * * }*/ //FileCompareProvider fcp = new FileCompareProvider(taskId, rd); FileComparer fcp = new FileComparer(taskId, rd); fcp.SignalBackup(); //if(fcp.CheckCapability()){ provMetadataPairs.Add(fcp.Name, fcp.GetMetadata()); selectablePlugins.Add(new KeyValuePair <string, IIncrementalProvider>(rd.SystemDrive.OriginalMountPoint, fcp)); //} #endif } catch (Exception e) { Logger.Append(Severity.TRIVIA, e.ToString()); } // Yes. In case of exception the provider is simply not added, and that's an expected behavior (as of now) return(provMetadataPairs); }
internal UsnJournalProvider(long refTaskId, BackupRootDrive rd) { prevJournalId = 0; prevTransactionId = 0; brd = rd; }
internal FileCompareProvider(long task, BackupRootDrive rd, bool isFullRefreshBackup) : this(task, rd) { this.isFullRefreshBackup = isFullRefreshBackup; }
internal FileComparer(long task, BackupRootDrive rd) { this.rootDrive = rd; this.taskId = task; prov = ItemProvider.GetProvider(); }
private List <BackupRootDrive> GetBackupInvolvedRootDrives() { // DEBUG print basepaths /*foreach(BasePath bsp in bs.BasePaths) * Console.WriteLine ("2##### bp path="+bsp.Path+", type="+bsp.Type); * Console.ReadLine();*/ Int16 rootDriveId = 0; char[] pathSeparators = new char[] { Path.DirectorySeparatorChar, Path.AltDirectorySeparatorChar, '/', '\\' }; // gather all paths to backup and send them to the storagelayout (allows it to build a partial layout including only the layout necessary for this task) var taskPathsToBackup = (from bp in this.BackupSet.BasePaths where bp != null && !string.IsNullOrEmpty(bp.Type) && bp.Type.ToLower().StartsWith("fs") select bp.Path).ToList(); slManager = new StorageLayoutManager(taskPathsToBackup); slManager.LogEvent += this.LogReceivedEvent; //List<FileSystem> allFSes = new List<FileSystem>(); //string storageP = bs.BasePaths[0]. //Logger.Append (Severity.TRIVIA, "Building storage layout using provider '"+storageProvs[0]+"'..."); this.StorageLayout = slManager.BuildStorageLayout(this.BackupSet.StorageLayoutProvider, this.BackupSet.ProxyingInfo); //sl.GetAllFileSystems(sl.Entries, ref allFSes); Logger.Append(Severity.DEBUG, "Got " + this.StorageLayout.GetAllFileSystems(null).Count() + " raw FSes"); // we sort drives by reverse mountpoint length : on unix this allows us to find, for example, // '/home' before '/' when asked to backup '/home/user' var fsMountsByNameLength = from sd in this.StorageLayout.GetAllFileSystems(null) where (!string.IsNullOrEmpty(sd.MountPoint)) orderby sd.MountPoint.Length descending select sd; Logger.Append(Severity.DEBUG, "Got " + fsMountsByNameLength.Count() + " FSes"); List <BackupRootDrive> drives = new List <BackupRootDrive>(); // We first expand the basepaths defined by backupset configuration // in order to manage nested mountpoints (eg : backups tells to save /usr, but this path contains /usr/local // which is a mountpoint to another drive for (int i = this.BackupSet.BasePaths.Count - 1; i >= 0; i--) { if (this.BackupSet.BasePaths[i].Type != null && this.BackupSet.BasePaths[i].Type.ToLower().StartsWith("object:")) //BasePath.PathType.OBJECT) { continue; } foreach (FileSystem filesystem in fsMountsByNameLength) { if (filesystem.DriveFormat == "proc" || filesystem.DriveFormat == "sysfs" || filesystem.DriveFormat == "debugfs" || filesystem.DriveFormat == "devpts" || filesystem.DriveFormat == "procfs") { Logger.Append(Severity.TRIVIA, "GetBackupInvolvedRootDrives() : excluded non-backupable fs " + filesystem.MountPoint); Logger.Append(Severity.INFO, "Excluded fs " + filesystem.MountPoint + " from " + this.BackupSet.BasePaths[i].Path + " (non-backupable fs)"); this.BackupSet.BasePaths[i].ExcludedPaths.Add(filesystem.MountPoint); continue; } //Console.WriteLine ("basepath :i="+i+", lulute 1"); //if(IsDriveExcluded(bs.BasePaths[i], filesystem)) continue; if (string.IsNullOrEmpty(filesystem.OriginalMountPoint)) { Logger.Append(Severity.NOTICE, "Proxied Filesystem '" + filesystem.MountPoint + "' has unknown original mountpoint, will be backuped with current mountpoint as root"); filesystem.OriginalMountPoint = filesystem.MountPoint; } //Console.WriteLine ("basepath :i="+i+", lulute 2"); if (this.BackupSet.BasePaths[i].Path == "*" || filesystem.OriginalMountPoint.IndexOf(this.BackupSet.BasePaths[i].Path) == 0 && this.BackupSet.BasePaths[i].Path != filesystem.Path) { BasePath bp = new BasePath(); bp.Path = filesystem.MountPoint; bp.Type = "FS"; //BasePath.PathType.FS; // inherit include/exclude rules bp.IncludePolicy = this.BackupSet.BasePaths[i].IncludePolicy; bp.ExcludePolicy = this.BackupSet.BasePaths[i].ExcludePolicy; this.BackupSet.BasePaths.Add(bp); Logger.Append(Severity.TRIVIA, "Expanded config path " + this.BackupSet.BasePaths[i].Path + " to " + bp.Path); } //Console.WriteLine ("basepath :i="+i+", lulute 3"); } // remove original wildcard basepaths, now they have been expanded if (this.BackupSet.BasePaths[i].Path == "*") { this.BackupSet.BasePaths.RemoveAt(i); } } StringComparison sComp; if (!Utilities.PlatForm.IsUnixClient()) { sComp = StringComparison.InvariantCultureIgnoreCase; } else { sComp = StringComparison.InvariantCulture; } FactorizePaths(sComp); FactorizePaths(sComp); //debug print raw basepaths foreach (BasePath basep in this.BackupSet.BasePaths) { Console.WriteLine("¤¤¤¤¤¤¤ raw basepath : " + basep.ToString()); } // SYSTEM EXCLUDES HERE!!! // get system-wide exclusions rules, expand and them apply them. // we re-generate the BasePaths list for the last time. List <string> systemExcludes = PathExcluderFactory.GetPathsExcluder().GetPathsToExclude(/*this.HandledBy>0*/); for (int i = 0; i < systemExcludes.Count; i++) { systemExcludes[i] = ExpandFilesystemPath(systemExcludes[i]); Console.WriteLine("sys excl : ~~~~~~~~~~~~~" + systemExcludes[i]); } // order paths by length desc : avoids adding an exclude rule 'c:\mydatadir\2' on 'c:\' instead of 'c:\mydatadir\' // since DESC length ordering will return' c:\mydatadir' before 'c:\' //var basePathsByLength = bs.BasePaths.OrderByDescending( path=> path.Path.Length); foreach (BasePath sortedBp in this.BackupSet.BasePaths.OrderByDescending(path => path.Path.Length)) { for (int j = systemExcludes.Count - 1; j >= 0; j--) { // TODO!! on windows and only windows, use StringComparison.InvariantCultureIgnoreCase if (systemExcludes[j].IndexOf(sortedBp.Path, sComp) == 0) { sortedBp.ExcludedPaths.Add(systemExcludes[j]); systemExcludes.RemoveAt(j); } } } foreach (BasePath path in this.BackupSet.BasePaths) { // search if filesystem matches wanted backup path foreach (FileSystem fsMount in fsMountsByNameLength) { //Console.WriteLine ("basepath.Path="+path.Path+", current fs path="+fsMount.Path+", mntpt="+fsMount.MountPoint+", origmntpt="+fsMount.OriginalMountPoint); // if drive is explicitely deselected (excluded), don't add it to list //if(IsDriveExcluded(path, sysDrive)) // continue; // special case of "*" has been treated before and splitted into 1 BasePath per filesystem, so ignore it if (path.Path == "*") { Console.WriteLine("WOW WOW WOW met '*' entry, should have been expanded before!!!! " + path.Path + ", type=" + path.Type); //Console.ReadLine(); continue; } if (/*path.Path == "*" ||*/path.Path.IndexOf(fsMount.MountPoint, sComp) == 0 || (path.Path.EndsWith(Path.PathSeparator + "*") && path.Path.Substring(0, path.Path.LastIndexOfAny(pathSeparators)) == fsMount.Path.Substring(0, fsMount.Path.LastIndexOfAny(pathSeparators))) ) { //Console.WriteLine ("basepath.Path="+path.Path+" MATCHES"); // 1/2 exclude weird/pseudo fs if (fsMount.DriveFormat == "proc" || fsMount.DriveFormat == "sysfs" || fsMount.DriveFormat == "debugfs" || fsMount.DriveFormat == "devpts" || fsMount.DriveFormat == "procfs") { Console.WriteLine("GetBackupInvolvedRootDrives() : excluded mountpoint " + path.Path + " (non-backupable fs)"); continue; } /*if(udi.DriveFormat */ // first pass : add basepaths as defined in backupset configuration // if drive doesn't exist yet in rootdrives, add it. Else, add path to existing rootdrive bool found = false; foreach (BackupRootDrive rd in drives) { //Console.WriteLine (" @@@@ GetBInvoldedRd :cur rd="+rd.SystemDrive.OriginalMountPoint+", fsmountpath="+fsMount.Path); if (rd.SystemDrive.OriginalMountPoint == fsMount.OriginalMountPoint) { rd.Paths.Add(path); found = true; } } if (found == false /* && !IsDriveExcluded(path, sysDrive)*/) { Console.WriteLine(" @@@@ GetBInvoldedRd : added new rootdrives to list : " + fsMount.OriginalMountPoint + " for path " + path.Path); BackupRootDrive rootDrive = new BackupRootDrive(); rootDrive.SystemDrive = fsMount; rootDrive.Paths.Add(path); rootDrive.ID = rootDriveId; drives.Add(rootDrive); rootDriveId++; } //Console.WriteLine("match : path "+path.Path+", drive "+sysDrive.MountPoint); break; //avoid continuing scanning until '/' (would be false positive) } } } return(drives); }