public override void EraseOldFileSystemResidentFiles(VolumeInfo volume, DirectoryInfo tempDirectory, IErasureMethod method, FileSystemEntriesEraseProgress callback) { //Squeeze files smaller than one MFT record until the volume and the MFT is full. long MFTRecordSize = NtfsApi.GetMftRecordSegmentSize(volume); long lastFileSize = MFTRecordSize; try { for ( ; ;) { //Open this stream string fileName = GenerateRandomFileName(tempDirectory, 18); FileStream strm = new FileStream(fileName, FileMode.CreateNew, FileAccess.Write, FileShare.None, 8, FileOptions.WriteThrough); try { //Stretch the file size to use up some of the resident space. strm.SetLength(lastFileSize); //Then run the erase task method.Erase(strm, long.MaxValue, Host.Instance.Prngs.ActivePrng, null); //Call the callback function if one is provided. We'll provide a dummy //value since we really have no idea how much of the MFT we can clean. if (callback != null) { callback((int)(MFTRecordSize - lastFileSize), (int)MFTRecordSize); } } catch (IOException) { if (lastFileSize-- == 0) { break; } } finally { //Close the stream handle strm.Close(); //Then reset the time the file was created. ResetFileTimes(new FileInfo(fileName)); } } } catch (IOException) { //OK, enough squeezing: there isn't enough space to even create a new MFT record. } }
public override void EraseDirectoryStructures(VolumeInfo info, FileSystemEntriesEraseProgress callback) { DirectoryInfo tempDir = new DirectoryInfo(FileSystem.GenerateRandomFileName( new DirectoryInfo(info.MountPoints[0]), 32)); tempDir.Create(); try { long mftSize = NtfsApi.GetMftValidSize(info); long mftRecordSegmentSize = NtfsApi.GetMftRecordSegmentSize(info); int pollingInterval = (int)Math.Min(Math.Max(1, mftSize / info.ClusterSize / 20), 128); int totalFiles = (int)Math.Max(1L, mftSize / mftRecordSegmentSize); int filesCreated = 0; while (true) { ++filesCreated; using (FileStream strm = new FileStream(FileSystem.GenerateRandomFileName( tempDir, 220), FileMode.CreateNew, FileAccess.Write)) { } if (filesCreated % pollingInterval == 0) { if (callback != null) { int halfFilesCreated = filesCreated / 2; callback(halfFilesCreated, Math.Max(halfFilesCreated, totalFiles)); } if (mftSize < NtfsApi.GetMftValidSize(info)) break; } } } catch (IOException) { } finally { FileInfo[] files = tempDir.GetFiles("*", SearchOption.AllDirectories); for (int i = 0; i < files.Length; ++i) { if (callback != null && i % 50 == 0) callback(files.Length + i, files.Length * 2); DeleteFile(files[i]); } DeleteFolder(tempDir); } }
public override void EraseDirectoryStructures(VolumeInfo info, FileSystemEntriesEraseProgress callback) { using (FileStream stream = info.Open(FileAccess.ReadWrite, FileShare.ReadWrite)) { int directoriesCleaned = 0; FatApi api = GetFatApi(info, stream); HashSet<uint> eraseQueueClusters = new HashSet<uint>(); List<FatDirectoryEntry> eraseQueue = new List<FatDirectoryEntry>(); { FatDirectoryEntry entry = api.LoadDirectory(string.Empty); eraseQueue.Add(entry); eraseQueueClusters.Add(entry.Cluster); } using (VolumeLock volumeLock = info.LockVolume(stream)) { while (eraseQueue.Count != 0) { if (callback != null) callback(directoriesCleaned, directoriesCleaned + eraseQueue.Count); FatDirectoryBase currentDir = api.LoadDirectory(eraseQueue[0].FullName); eraseQueue.RemoveAt(0); foreach (KeyValuePair<string, FatDirectoryEntry> entry in currentDir.Items) if (entry.Value.EntryType == FatDirectoryEntryType.Directory) { if (eraseQueueClusters.Contains(entry.Value.Cluster)) continue; eraseQueueClusters.Add(entry.Value.Cluster); eraseQueue.Add(entry.Value); } currentDir.ClearDeletedEntries(); ++directoriesCleaned; } } } }
public override void EraseDirectoryStructures(VolumeInfo info, FileSystemEntriesEraseProgress callback) { //Create a directory to hold all the temporary files DirectoryInfo tempDir = new DirectoryInfo(GenerateRandomFileName( info.MountPoints[0], 32)); tempDir.Create(); try { //Get the size of the MFT long mftSize = NtfsApi.GetMftValidSize(info); long mftRecordSegmentSize = NtfsApi.GetMftRecordSegmentSize(info); int pollingInterval = (int)Math.Min(Math.Max(1, mftSize / info.ClusterSize / 20), 128); int totalFiles = (int)Math.Max(1L, mftSize / mftRecordSegmentSize); int filesCreated = 0; while (true) { ++filesCreated; string fileName = GenerateRandomFileName(tempDir, 220); File.Create(fileName).Close(); ResetFileTimes(new FileInfo(fileName)); if (filesCreated % pollingInterval == 0) { //Call back to our progress function: this is the first half of the //procedure so divide the effective progress by 2. if (callback != null) { int halfFilesCreated = filesCreated / 2; callback(halfFilesCreated, Math.Max(halfFilesCreated, totalFiles)); } //Check if the MFT has grown. if (mftSize < NtfsApi.GetMftValidSize(info)) { break; } } } } catch (IOException) { } finally { //Clear up all the temporary files FileInfo[] files = tempDir.GetFiles("*", SearchOption.AllDirectories); for (int i = 0; i < files.Length; ++i) { if (callback != null && i % 50 == 0) { callback(files.Length + i, files.Length * 2); } files[i].Delete(); } DeleteFolder(tempDir, true); } }
public override void EraseOldFileSystemResidentFiles(VolumeInfo volume, DirectoryInfo tempDirectory, ErasureMethod method, FileSystemEntriesEraseProgress callback) { try { long oldMFTSize = NtfsApi.GetMftValidSize(volume); for (; ; ) { using (FileStream strm = new FileStream( GenerateRandomFileName(tempDirectory, 18), FileMode.CreateNew, FileAccess.Write, FileShare.None, 8, FileOptions.WriteThrough)) { long streamSize = 0; try { while (true) { strm.SetLength(++streamSize); method.Erase(strm, long.MaxValue, PrngManager.GetInstance(ManagerLibrary.Settings.ActivePrng), null); } } catch (IOException) { if (streamSize == 1) return; } } if (NtfsApi.GetMftValidSize(volume) > oldMFTSize) break; } } catch (IOException) { } }
public abstract void EraseOldFileSystemResidentFiles(VolumeInfo volume, DirectoryInfo tempDirectory, ErasureMethod method, FileSystemEntriesEraseProgress callback);
public abstract void EraseDirectoryStructures(VolumeInfo info, FileSystemEntriesEraseProgress callback);
public override void EraseDirectoryStructures(VolumeInfo info, FileSystemEntriesEraseProgress callback) { using (FileStream stream = info.Open(FileAccess.ReadWrite, FileShare.ReadWrite)) using (FatApi api = GetFatApi(info, stream)) { int directoriesCleaned = 0; HashSet <uint> eraseQueueClusters = new HashSet <uint>(); List <FatDirectoryEntry> eraseQueue = new List <FatDirectoryEntry>(); try { { FatDirectoryEntry entry = api.LoadDirectory(string.Empty); eraseQueue.Add(entry); eraseQueueClusters.Add(entry.Cluster); } while (eraseQueue.Count != 0) { if (callback != null) { callback(directoriesCleaned, directoriesCleaned + eraseQueue.Count); } FatDirectoryBase currentDir = api.LoadDirectory(eraseQueue[0].FullName); eraseQueue[0].Dispose(); eraseQueue.RemoveAt(0); //Queue the subfolders in this directory foreach (KeyValuePair <string, FatDirectoryEntry> entry in currentDir.Items) { if (entry.Value.EntryType == FatDirectoryEntryType.Directory) { //Check that we don't have the same cluster queued twice (e.g. for //long/8.3 file names) if (eraseQueueClusters.Contains(entry.Value.Cluster)) { continue; } eraseQueueClusters.Add(entry.Value.Cluster); eraseQueue.Add(entry.Value); } } currentDir.ClearDeletedEntries(); ++directoriesCleaned; } } catch (SharingViolationException) { Logger.Log(S._("Could not erase directory entries on the volume {0} because " + "the volume is currently in use.")); } finally { foreach (FatDirectoryEntry entry in eraseQueue) { entry.Dispose(); } } } }
public override void EraseOldFileSystemResidentFiles(VolumeInfo volume, DirectoryInfo tempDirectory, IErasureMethod method, FileSystemEntriesEraseProgress callback) { //Nothing to be done here. FAT doesn't store files in its FAT. }
public abstract void EraseDirectoryStructures(VolumeInfo info, FileSystemEntriesEraseProgress callback);
public abstract void EraseOldFileSystemResidentFiles(Util.VolumeInfo volume, DirectoryInfo tempDirectory, IErasureMethod method, FileSystemEntriesEraseProgress callback);