private bool AddToParity(FileRecord r) { string fullPath = r.FullPath; // file may have been deleted, or attributes may have changed since we scanned, so refresh if (!r.RefreshAttributes()) { LogFile.Log("{0} no longer exists.", r.FullPath); return false; } if (!r.Drive.ExtendMetaFile(r)) { FireErrorMessage(String.Format("Unable to expand {0} to add {1}. File will be skipped this update.", r.Drive.MetaFile, fullPath)); return false; } bool success = false; try { if (r.Length > 0) { // See if we can find an empty chunk in the parity we can re-use. // We don't want just any empty spot, we want the smallest one // that is large enough to contain the file, to minimize // fragmentation. A chunk that is exactly the same size is ideal. List<FreeNode> freeList = r.Drive.GetFreeList(); UInt32 startBlock = FreeNode.FindBest(freeList, r.LengthInBlocks); if (startBlock == FreeNode.INVALID_BLOCK) startBlock = r.Drive.MaxBlock; UInt32 endBlock = startBlock + r.LengthInBlocks; // compute how much space this update is going to require long required = 0; if (endBlock > parity.MaxBlock) // File is going on the end, so we are going to need additional space for the growing parityX.dat file required = ((long)(endBlock - parity.MaxBlock)) * Parity.BLOCK_SIZE; long available = parity.FreeSpace; if ((available != -1) && (available < required)) { FireErrorMessage(String.Format("Insufficient space available on {0} to process " + "{1}. File will be skipped this update. (Required: {2} " + "Available: {3})", Config.ParityDir, fullPath, Utils.SmartSize(required), Utils.SmartSize(available))); return false; } r.StartBlock = startBlock; if (LogFile.Verbose) LogFile.Log("Adding {0} to blocks {1} to {2}...", fullPath, startBlock, endBlock - 1); else LogFile.Log("Adding {0}...", fullPath); r.Drive.Status = "Adding " + fullPath; // pre-allocate actual needed parity space before even trying to add the file if (endBlock > parity.MaxBlock) { LogFile.Log(String.Format("Extending parity by {0} blocks for add...", endBlock - parity.MaxBlock)); if (!ExtendParity(endBlock)) { if (!cancel) FireErrorMessage(String.Format("Unable to extend parity space for {0}. File will be skipped this update.", fullPath)); return false; } LogFile.Log("Parity extended"); } if (!XORFileWithParity(r, false)) { if (!cancel) // assume FireErrorMessage was already called LogFile.Log("Could not add {0} to parity. File will be skipped.", r.FullPath); return false; } } r.Drive.AddFile(r); r.Drive.SaveFileList(); success = true; } finally { if (!success) r.Drive.RestoreMetaFile(); // restore the backup filesX.dat created by ExtendMetaFile() } return true; }
private void WriteFileHeader(FileStream f, UInt32 count) { FileRecord.WriteUInt32(f, META_FILE_VERSION); FileRecord.WriteUInt32(f, count); }
/// <summary> /// XORs the data from the given file with the parity data. This either adds the file to /// parity or removes it from parity if it was already there. If checkHash is true, /// it verifies the file's hash matches the hash on record before commiting the parity. /// If false, it updates the file's hash on record. /// </summary> private bool XORFileWithParity(FileRecord r, bool checkHash) { if (!File.Exists(r.FullPath)) return false; if (r.Length == 0) return true; using (ParityChange change = new ParityChange(parity, Config, r.StartBlock, r.LengthInBlocks)) { byte[] data = new byte[Parity.BLOCK_SIZE]; MD5 hash = MD5.Create(); hash.Initialize(); UInt32 endBlock = r.StartBlock + r.LengthInBlocks; UInt32 totalProgresBlocks = r.LengthInBlocks + (UInt32)(TEMP_FLUSH_PERCENT * r.LengthInBlocks); FileStream f; try { f = new FileStream(r.FullPath, FileMode.Open, FileAccess.Read, FileShare.Read); } catch (Exception e) { FireErrorMessage(String.Format("Error opening {0}: {1}", r.FullPath, e.Message)); return false; } try { for (UInt32 b = r.StartBlock; b < endBlock; b++) { Int32 bytesRead; try { bytesRead = f.Read(data, 0, Parity.BLOCK_SIZE); } catch (Exception e) { FireErrorMessage(String.Format("Error reading {0}: {1}", r.FullPath, e.Message)); return false; } if (b == (endBlock - 1)) hash.TransformFinalBlock(data, 0, bytesRead); else hash.TransformBlock(data, 0, bytesRead, data, 0); while (bytesRead < Parity.BLOCK_SIZE) data[bytesRead++] = 0; change.Reset(true); change.AddData(data); change.Write(); currentUpdateBlocks++; r.Drive.Progress = (double)(b - r.StartBlock) / totalProgresBlocks; Progress = (double)currentUpdateBlocks / totalUpdateBlocks; if (cancel) return false; } } catch (Exception e) { env.LogCrash(e); FireErrorMessage(String.Format("Unexpected error while processing {0}: {1}", r.FullPath, e.Message)); return false; } finally { f.Dispose(); } if (checkHash) { if (!Utils.HashCodesMatch(hash.Hash, r.HashCode)) { LogFile.Log("Tried to remove existing file but hash codes don't match."); return false; } } else r.HashCode = hash.Hash; FlushTempParity(r.Drive, change); // commit the parity change to disk } r.Drive.Progress = 0; return true; }
/// <summary> /// Update a parity set to reflect the latest changes /// </summary> public void Update(bool scanFirst = false) { cancel = false; if (Empty) { LogFile.Log("No existing parity data found. Creating new snapshot."); Create(); return; } try { if (scanFirst) // get the current list of files on each drive and compare to old state ScanAll(); if (cancel) return; Progress = 0; // count total blocks for this update, for progress reporting currentUpdateBlocks = 0; totalUpdateBlocks = 0; foreach (DataDrive d in drives) { foreach (FileRecord r in d.Adds) totalUpdateBlocks += r.LengthInBlocks; foreach (FileRecord r in d.Deletes) totalUpdateBlocks += r.LengthInBlocks; } // now process deletes int deleteCount = 0; long deleteSize = 0; DateTime start = DateTime.Now; foreach (DataDrive d in drives) { FileRecord[] deleteList = new FileRecord[d.Deletes.Count]; d.Deletes.CopyTo(deleteList); foreach (FileRecord r in deleteList) { if (RemoveFromParity(r)) { deleteCount++; deleteSize += r.Length; d.Deletes.Remove(r); } if (cancel) return; } d.UpdateStatus(); } if (deleteCount > 0) { TimeSpan elapsed = DateTime.Now - start; LogFile.Log("{0} file{1} ({2}) removed in {3:F2} sec", deleteCount, deleteCount == 1 ? "" : "s", Utils.SmartSize(deleteSize), elapsed.TotalSeconds); } // now process adds int addCount = 0; long addSize = 0; start = DateTime.Now; foreach (DataDrive d in drives) { FileRecord[] addList = new FileRecord[d.Adds.Count]; d.Adds.CopyTo(addList); foreach (FileRecord r in addList) { if (AddToParity(r)) { addCount++; addSize += r.Length; d.Adds.Remove(r); } if (cancel) return; } d.UpdateStatus(); } if (addCount > 0) { TimeSpan elapsed = DateTime.Now - start; LogFile.Log("{0} file{1} ({2}) added in {3:F2} sec", addCount, addCount == 1 ? "" : "s", Utils.SmartSize(addSize), elapsed.TotalSeconds); } // possibly reclaim unused parity space if any files were deleted off the end UInt32 maxParityBlock = MaxParityBlock(); if (maxParityBlock < parity.MaxBlock) { UInt32 blocks = parity.MaxBlock - maxParityBlock; LogFile.Log(String.Format("Reclaiming {0} blocks of unused parity space...", blocks)); parity.Trim(MaxParityBlock()); LogFile.Log(Utils.SmartSize((long)blocks * Parity.BLOCK_SIZE) + " freed on parity drive."); } } finally { foreach (DataDrive d in drives) { d.UpdateStatus(); d.Status = ""; } // make sure all progress bars are reset foreach (DataDrive d in drives) d.UpdateFinished(); parity.Close(); } }
/// <summary> /// Adds the file to the master files list /// </summary> public void AddFile(FileRecord r) { string filename = r.Name.ToLower(); Debug.Assert(!files.ContainsKey(filename)); files[filename] = r; FileCount++; }
private bool RemoveFromParity(FileRecord r) { // make a backup copy of the meta file first. If this fails, we know we won't be able to complete the remove. if (!r.Drive.BackupMetaFile()) { FireErrorMessage("Error removing " + r.FullPath + ": could not back up " + r.Drive.MetaFile); return false; } bool success = false; try { if (r.Length > 0) { string fullPath = r.FullPath; UInt32 startBlock = r.StartBlock; UInt32 endBlock = startBlock + r.LengthInBlocks; if (LogFile.Verbose) LogFile.Log("Removing {0} from blocks {1} to {2}...", fullPath, startBlock, endBlock - 1); else LogFile.Log("Removing {0}...", fullPath); r.Drive.Status = "Removing " + fullPath; // Optimization: if the file still exists and is unmodified, we can remove it much faster this way if (!r.Modified && XORFileWithParity(r, true)) { r.Drive.RemoveFile(r); r.Drive.SaveFileList(); success = true; // so finally() clause doesn't try to restore backup return true; } UInt32 totalProgresBlocks = r.LengthInBlocks + (UInt32)(TEMP_FLUSH_PERCENT * r.LengthInBlocks); // Recalulate parity from scratch for all blocks that contained the deleted file's data. using (ParityChange change = new ParityChange(parity, Config, startBlock, r.LengthInBlocks)) try { byte[] data = new byte[Parity.BLOCK_SIZE]; for (UInt32 b = startBlock; b < endBlock; b++) { change.Reset(false); foreach (DataDrive d in drives) { if (d == r.Drive) continue; // Note it's possible that this file may also have been deleted. That's OK, ReadFileData // returns false and we don't try to add the deleted file to the parity. FileRecord f; try { if (d.ReadBlock(b, data, out f)) change.AddData(data); } catch (Exception e) { FireErrorMessage(e.Message); return false; } } change.Write(); currentUpdateBlocks++; r.Drive.Progress = (double)(b - startBlock) / totalProgresBlocks; Progress = (double)currentUpdateBlocks / totalUpdateBlocks; if (cancel) return false; } FlushTempParity(r.Drive, change); } catch (Exception e) { env.LogCrash(e); FireErrorMessage(String.Format("Error removing {0}: {1}", r.FullPath, e.Message)); return false; } } r.Drive.RemoveFile(r); r.Drive.SaveFileList(); success = true; } finally { if (!success) r.Drive.RestoreMetaFile(); // restore the backup filesX.dat created by BackupMetaFile() } return true; }
/// <summary> /// Compute the hash code for the file on disk /// </summary> private byte[] ComputeHash(FileRecord r, bool showProgress = false) { using (FileStream s = new FileStream(r.FullPath, FileMode.Open, FileAccess.Read)) using (MD5 hash = MD5.Create()) { hash.Initialize(); byte[] buf = new byte[Parity.BLOCK_SIZE]; int read; int block = 0; if (showProgress) Progress = 0; while (!cancelScan && ((read = s.Read(buf, 0, Parity.BLOCK_SIZE)) > 0)) { hash.TransformBlock(buf, 0, read, buf, 0); if (showProgress) Progress = (double)++block / r.LengthInBlocks; } if (cancelScan) return null; hash.TransformFinalBlock(buf, 0, 0); /* Uncomment to see hash values StringBuilder sb = new StringBuilder(); foreach (byte b in hash.Hash) sb.Append(String.Format("{0:X2}", b)); Status = sb.ToString(); */ return hash.Hash; } }
public static int CompareByStartBlock(FileRecord r1, FileRecord r2) { if (r1.StartBlock < r2.StartBlock) return -1; else return 1; }
/// <summary> /// Removes the file from the master files list /// </summary> public void RemoveFile(FileRecord r) { string filename = r.Name.ToLower(); Debug.Assert(files.ContainsKey(filename)); files.Remove(filename); FileCount--; }
private void AppendFileRecord(FileRecord r) { if (!File.Exists(MetaFilePath)) using (FileStream fNew = new FileStream(MetaFilePath, FileMode.Create, FileAccess.Write)) { WriteFileHeader(fNew, 0); // unknown count } using (FileStream f = new FileStream(MetaFilePath, FileMode.Append, FileAccess.Write)) r.WriteToFile(f); }
/// <summary> /// Reads a block of data from the drive. Returns the File containing the block, if any, in r. /// Returns true if data was read. /// Returns false if no file contains this block. r will be null. /// If the file doens't exist, returns false, and sets r to the file. /// If the file exists but has been modified, reads the block and returns true. It is the /// caller's reponsibility to check r.Modified and handle appropriately. /// Throws an exception if opening or reading the file fails. /// </summary> public bool ReadBlock(UInt32 block, byte[] data, out FileRecord r) { r = FindFileContaining(block); if (r == null) return false; lock (fileCloseLock) { if (r != currentOpenFile) { if (currentOpenFileStream != null) { currentOpenFileStream.Dispose(); currentOpenFileStream = null; } if (!File.Exists(r.FullPath)) return false; try { currentOpenFileStream = new FileStream(r.FullPath, FileMode.Open, FileAccess.Read, FileShare.Read); } catch (Exception e) { throw new Exception(String.Format("Error opening {0}: {1}", r.FullPath, e.Message), e); } currentOpenFile = r; } fileCloseTimer.Stop(); fileCloseTimer.Start(); Status = "Reading " + currentOpenFile.FullPath; DriveStatus = DriveStatus.ReadingFile; try { currentOpenFileStream.Position = ((long)(block - r.StartBlock)) * Parity.BLOCK_SIZE; int bytesRead = currentOpenFileStream.Read(data, 0, data.Length); while (bytesRead < data.Length) data[bytesRead++] = 0; } catch (Exception e) { throw new Exception(String.Format("Error reading {0}: {1}", r.FullPath, e.Message), e); } } return true; }
/// <summary> /// Verify that the hash on record for this file matches the actual file currently on disk /// </summary> public bool HashCheck(FileRecord r) { if (r.Length == 0) return true; // zero length files cannot fail a hash check else return Utils.HashCodesMatch(ComputeHash(r), r.HashCode); }
/// <summary> /// Backs up and then extendes the existing filesX.dat by the amount necessary to store the given new file record /// </summary> public bool ExtendMetaFile(FileRecord add) { if (!BackupMetaFile()) return false; string fileName = MetaFilePath; if (!File.Exists(fileName)) // this is a valid case if a new drive has just been added to the array using (FileStream f = new FileStream(fileName, FileMode.Create, FileAccess.Write)) WriteFileHeader(f, 0); FileInfo fi = new FileInfo(fileName); try { using (FileStream f = new FileStream(fileName, FileMode.Open, FileAccess.Write)) f.SetLength(fi.Length + add.RecordSize); } catch (Exception e) { LogFile.Log(String.Format("Could not extend {0}: {1}", fileName, e.Message)); RestoreMetaFile(); return false; } return true; }
private void Scan(DirectoryInfo dir, List <Regex> ignores, ProgressEstimator progress = null) { if (cancelScan) { return; } // never allow scanning of our own parity folder if (Utils.PathsAreEqual(dir.FullName, config.ParityDir)) { LogFile.Log("Warning: skipping " + dir.FullName + " because it is the parity folder."); return; } Status = "Scanning " + dir.FullName; if (scanProgress != null) { Progress = scanProgress.Progress; } DirectoryInfo[] subDirs; try { subDirs = dir.GetDirectories(); } catch (Exception e) { if (progress == null) { throw; } LogFile.Log("Warning: Could not enumerate subdirectories of {0}: {1}", dir.FullName, e.Message); return; } FileInfo[] fileInfos; try { fileInfos = dir.GetFiles(); } catch (Exception e) { LogFile.Log("Warning: Could not enumerate files in {0}: {1}", dir.FullName, e.Message); return; } ProgressEstimator folderProgress; if (scanProgress == null) { scanProgress = new ProgressEstimator(); scanProgress.Reset(subDirs.Length); folderProgress = scanProgress; } else { folderProgress = progress.BeginSubPhase(subDirs.Length); } foreach (DirectoryInfo d in subDirs) { if (cancelScan) { return; } if ((config.IgnoreHidden && (d.Attributes & FileAttributes.Hidden) != 0) || ((d.Attributes & FileAttributes.System) != 0)) { folderProgress.EndPhase(); continue; } string subDir = Path.Combine(dir.FullName, d.Name); //if (subDir.Length >= MAX_FOLDER) // LogFile.Log("Warning: skipping folder \"" + subDir + "\" because the path is too long."); //else Scan(d, ignores, folderProgress); folderProgress.EndPhase(); } Progress = scanProgress.Progress; string relativePath = Utils.StripRoot(root, dir.FullName); foreach (FileInfo f in fileInfos) { if (cancelScan) { return; } // have to use Path.Combine here because accessing the f.FullName property throws // an exception if the path is too long string fullName = Path.Combine(dir.FullName, f.Name); try { //if (fullName.Length >= MAX_PATH) { // LogFile.Log("Warning: skipping file \"" + fullName + "\" because the path is too long"); // continue; //} if (f.Attributes == (FileAttributes)(-1)) { continue; } if (config.IgnoreHidden && (f.Attributes & FileAttributes.Hidden) != 0) { continue; } if ((f.Attributes & FileAttributes.System) != 0) { continue; } bool ignore = false; foreach (Regex regex in ignores) { if (regex.IsMatch(f.Name.ToLower())) { ignore = true; break; } } if (ignore) { if (LogFile.Verbose) { LogFile.Log("Skipping \"{0}\" because it matches an ignore", f.FullName); } ignoreCount++; continue; } FileRecord r = new FileRecord(f, relativePath, this); scanFiles.Add(r); seenFileNames[r.Name.ToLower()] = r; } catch (Exception e) { errorFiles.Add(fullName.ToLower()); FireErrorMessage(String.Format("Error scanning \"{0}\": {1}", fullName, e.Message)); } } }
private void HandleFileCloseTimer(object sender, ElapsedEventArgs args) { lock (fileCloseLock) { if (currentOpenFileStream != null) { currentOpenFileStream.Dispose(); currentOpenFileStream = null; currentOpenFile = null; UpdateStatus(); } } }
/// <summary> /// Compare the old list of files with the new list in order to /// determine which files had been added, removed, moved, or edited. /// </summary> private void Compare() { adds.Clear(); deletes.Clear(); moves.Clear(); editCount = 0; // build list of new files we haven't seen before (adds) foreach (FileRecord r in scanFiles) { if (!files.ContainsKey(r.Name.ToLower())) { adds.Add(r); } } // build list of old files we don't see now (deletes) foreach (var kvp in files) { if (!seenFileNames.ContainsKey(kvp.Key) && !errorFiles.Contains(kvp.Value.FullPath.ToLower())) { deletes.Add(kvp.Value); } } // some of the files in add/delete list might actually be moves, check for that foreach (FileRecord a in adds) { byte[] hashCode = null; if (a.Length > 0) { foreach (FileRecord d in deletes) { if (a.Length == d.Length && a.LastWriteTime == d.LastWriteTime) { // probably the same file, but we need to check the hash to be sure if (hashCode == null) { Status = "Checking " + a.FullPath; hashCode = ComputeHash(a, true); } if (cancelScan) { return; } if (Utils.HashCodesMatch(hashCode, d.HashCode)) { LogFile.Log("{0} moved to {1}", Utils.MakeFullPath(root, d.Name), Utils.MakeFullPath(root, a.Name)); moves[d.Name.ToLower()] = a; } } } } } // remove the moved files from the add and delete lists foreach (var kvp in moves) { FileRecord delete = null; foreach (FileRecord r in deletes) { if (String.Equals(kvp.Key, r.Name.ToLower())) { delete = r; break; } } if (delete != null) { deletes.Remove(delete); } adds.Remove(kvp.Value); } // now check for edits bool saveFileList = false; foreach (var kvp in files) { if (cancelScan) { return; } FileRecord n; // a file can only be edited if the file name was seen this can if (seenFileNames.TryGetValue(kvp.Key, out n)) { // if we detect an edit, we add the "new" version of the file to the adds list, // because it has the new attributes and we want those saved later. The old value goes // into the edits and deletes lists. if (kvp.Value.Length != n.Length) { editCount++; deletes.Add(kvp.Value); adds.Add(n); } else if (kvp.Value.LastWriteTime != n.LastWriteTime) { // length hasn't changed but timestamp says file was modified, check hash code to be sure it has changed Status = "Checking " + kvp.Value.FullPath; if (!HashCheck(kvp.Value)) { editCount++; deletes.Add(kvp.Value); adds.Add(n); } else { // file hasn't actually changed, but we still need to update the LastWriteTime so we don't // keep re-checking it on every scan kvp.Value.LastWriteTime = n.LastWriteTime; saveFileList = true; } } } } if (saveFileList) { SaveFileList(); } }
private bool RecoverFile(FileRecord r, string path) { string fullPath = Utils.MakeFullPath(path, r.Name); r.Drive.Status = "Recovering " + r.Name + " ..."; LogFile.Log(r.Drive.Status); r.Drive.Progress = 0; try { // make sure the destination directory exists Directory.CreateDirectory(Path.GetDirectoryName(fullPath)); MD5 hash = MD5.Create(); hash.Initialize(); using (FileStream f = new FileStream(fullPath, FileMode.Create, FileAccess.Write)) { ParityBlock parityBlock = new ParityBlock(parity); long leftToWrite = r.Length; UInt32 block = r.StartBlock; while (leftToWrite > 0) { RecoverBlock(r.Drive, block, parityBlock); int blockSize = leftToWrite > Parity.BLOCK_SIZE ? Parity.BLOCK_SIZE : (int)leftToWrite; f.Write(parityBlock.Data, 0, blockSize); hash.TransformBlock(parityBlock.Data, 0, blockSize, parityBlock.Data, 0); leftToWrite -= Parity.BLOCK_SIZE; block++; r.Drive.Progress = (double)(block - r.StartBlock) / r.LengthInBlocks; Progress = (double)(recoverBlocks + (block - r.StartBlock)) / recoverTotalBlocks; if (cancel) { f.Close(); File.Delete(fullPath); return false; } } hash.TransformFinalBlock(parityBlock.Data, 0, 0); } r.Drive.Progress = 0; File.SetCreationTime(fullPath, r.CreationTime); File.SetLastWriteTime(fullPath, r.LastWriteTime); File.SetAttributes(fullPath, r.Attributes); if (r.Length > 0 && !Utils.HashCodesMatch(hash.Hash, r.HashCode)) { FireErrorMessage("Hash verify failed for \"" + fullPath + "\". Recovered file is probably corrupt."); return false; } else return true; } catch (Exception e) { FireErrorMessage("Error recovering \"" + fullPath + "\": " + e.Message); return false; } finally { // no matter what happens, keep the progress bar advancing by the right amount recoverBlocks += r.LengthInBlocks; Progress = (double)recoverBlocks / recoverTotalBlocks; } }
public static FileRecord LoadFromFile(FileStream f, DataDrive drive) { FileRecord rec = new FileRecord(); rec.Name = ReadString(f); if (rec.Name.Length == 0) return null; rec.Length = ReadLong(f); rec.Attributes = (FileAttributes)ReadUInt32(f); rec.CreationTime = ReadDateTime(f); rec.LastWriteTime = ReadDateTime(f); rec.StartBlock = ReadUInt32(f); rec.HashCode = new byte[16]; rec.drive = drive; if (f.Read(rec.HashCode, 0, 16) != 16) return null; if (rec.Name[0] == '\\') rec.Name = rec.Name.TrimStart('\\'); return rec; }
private void Scan(DirectoryInfo dir, List<Regex> ignores, ProgressEstimator progress = null) { if (cancelScan) return; // never allow scanning of our own parity folder if (Utils.PathsAreEqual(dir.FullName, config.ParityDir)) { LogFile.Log("Warning: skipping " + dir.FullName + " because it is the parity folder."); return; } Status = "Scanning " + dir.FullName; if (scanProgress != null) Progress = scanProgress.Progress; DirectoryInfo[] subDirs; try { subDirs = dir.GetDirectories(); } catch (Exception e) { if (progress == null) throw; LogFile.Log("Warning: Could not enumerate subdirectories of {0}: {1}", dir.FullName, e.Message); return; } FileInfo[] fileInfos; try { fileInfos = dir.GetFiles(); } catch (Exception e) { LogFile.Log("Warning: Could not enumerate files in {0}: {1}", dir.FullName, e.Message); return; } ProgressEstimator folderProgress; if (scanProgress == null) { scanProgress = new ProgressEstimator(); scanProgress.Reset(subDirs.Length); folderProgress = scanProgress; } else folderProgress = progress.BeginSubPhase(subDirs.Length); foreach (DirectoryInfo d in subDirs) { if (cancelScan) return; if ((config.IgnoreHidden && (d.Attributes & FileAttributes.Hidden) != 0) || ((d.Attributes & FileAttributes.System) != 0)) { folderProgress.EndPhase(); continue; } string subDir = Path.Combine(dir.FullName, d.Name); if (subDir.Length >= MAX_FOLDER) LogFile.Log("Warning: skipping folder \"" + subDir + "\" because the path is too long."); else Scan(d, ignores, folderProgress); folderProgress.EndPhase(); } Progress = scanProgress.Progress; string relativePath = Utils.StripRoot(root, dir.FullName); foreach (FileInfo f in fileInfos) { if (cancelScan) return; // have to use Path.Combine here because accessing the f.FullName property throws // an exception if the path is too long string fullName = Path.Combine(dir.FullName, f.Name); try { if (fullName.Length >= MAX_PATH) { LogFile.Log("Warning: skipping file \"" + fullName + "\" because the path is too long"); continue; } if (f.Attributes == (FileAttributes)(-1)) continue; if (config.IgnoreHidden && (f.Attributes & FileAttributes.Hidden) != 0) continue; if ((f.Attributes & FileAttributes.System) != 0) continue; bool ignore = false; foreach (Regex regex in ignores) if (regex.IsMatch(f.Name.ToLower())) { ignore = true; break; } if (ignore) { if (LogFile.Verbose) LogFile.Log("Skipping \"{0}\" because it matches an ignore", f.FullName); ignoreCount++; continue; } FileRecord r = new FileRecord(f, relativePath, this); scanFiles.Add(r); seenFileNames[r.Name.ToLower()] = r; } catch (Exception e) { errorFiles.Add(fullName.ToLower()); FireErrorMessage(String.Format("Error scanning \"{0}\": {1}", fullName, e.Message)); } } }