public bool Equals(ParityBlock block) { for (int i = 0; i < Parity.BLOCK_SIZE; i++) if (data[i] != block.Data[i]) return false; return true; }
public bool Equals(ParityBlock block) { for (int i = 0; i < Parity.BLOCK_SIZE; i++) { if (data[i] != block.Data[i]) { return(false); } } return(true); }
public ParityChange(Parity parity, Config config, UInt32 startBlock, UInt32 lengthInBlocks) { this.parity = parity; this.startBlock = startBlock; tempDir = config.TempDir; writingToMMF = true; UInt32 maxMMFBlocks = Parity.LengthInBlocks((long)config.MaxTempRAM * 1024 * 1024); UInt32 mmfBlocks = (lengthInBlocks < maxMMFBlocks) ? lengthInBlocks : maxMMFBlocks; try { mmf = MemoryMappedFile.CreateNew("disparity.tmp", (long)mmfBlocks * Parity.BLOCK_SIZE); mmfStream = mmf.CreateViewStream(); } catch (Exception e) { LogFile.Log("Could not create memory mapped file: " + e.Message); // We'll use a temp file only mmf = null; mmfStream = null; writingToMMF = false; } tempFileStream = null; parityBlock = new ParityBlock(parity); block = startBlock; }
private void RecoverBlock(DataDrive drive, UInt32 block, ParityBlock parity) { FileRecord r; parity.Load(block); foreach (DataDrive d in drives) if (d != drive) { string error = ""; try { if (d.ReadBlock(block, tempBuf, out r)) { parity.Add(tempBuf); if (r.Modified) error = String.Format("Warning: {0} has been modified. Some recovered files may be corrupt.", r.FullPath); } else if (r != null && !File.Exists(r.FullPath)) error = String.Format("Warning: {0} could not be found. Some recovered files may be corrupt.", r.FullPath); } catch (Exception e) { error = e.Message; // ReadBlock should have constructed a nice error message for us } if (error != "" && errorFiles.Add(error)) FireErrorMessage(error); } }
/// <summary> /// Create a new snapshot from scratch /// </summary> private void Create() { DateTime start = DateTime.Now; // TO DO: check free space on parity drive here? UInt32 totalBlocks = 1; // make it one so no chance of divide-by-zero below foreach (DataDrive d in drives) { d.BeginFileEnum(); UInt32 scanBlocks = d.TotalScanBlocks; if (scanBlocks > totalBlocks) totalBlocks = scanBlocks; } try { ParityBlock parityBlock = new ParityBlock(parity); byte[] dataBuf = new byte[Parity.BLOCK_SIZE]; UInt32 block = 0; bool done = false; while (!done) { done = true; foreach (DataDrive d in drives) if (d.GetNextBlock(done ? parityBlock.Data : dataBuf)) if (done) done = false; else parityBlock.Add(dataBuf); if (!done) parityBlock.Write(block); Progress = (double)block / totalBlocks; block++; if (cancel) { // we can't salvage an initial update that was cancelled so we'll have to start again from scratch next time. LogFile.Log("Initial update cancelled. Resetting parity to empty."); Erase(); return; } } } catch (Exception e) { LogFile.Log("Fatal error on initial update: " + e.Message); LogFile.Log(e.StackTrace); // can't recover from errors either, must also start over from scratch Erase(); throw new UpdateFailedException(e.Message); } finally { foreach (DataDrive d in drives) d.EndFileEnum(); parity.Close(); if (!cancel) Empty = false; } }
public void Verify() { cancel = false; VerifyErrors = 0; VerifyRecovers = 0; UInt32 maxBlock = MaxParityBlock(); List<FileRecord> suspectFiles = new List<FileRecord>(); DateTime lastStatus = DateTime.Now; TimeSpan minTimeDelta = TimeSpan.FromMilliseconds(100); // don't update status more than 10x per second Progress = 0; FileRecord r; ParityBlock parityBlock = new ParityBlock(parity); ParityBlock calculatedParityBlock = new ParityBlock(parity); byte[] buf = new byte[Parity.BLOCK_SIZE]; for (UInt32 block = 0; block < maxBlock; block++) { parityBlock.Load(block); bool firstRead = true; foreach (DataDrive d in drives) try { if (firstRead) { if (d.ReadBlock(block, calculatedParityBlock.Data, out r)) firstRead = false; } else if (d.ReadBlock(block, buf, out r)) calculatedParityBlock.Add(buf); } catch (Exception e) { FireErrorMessage(e.Message); } if (firstRead) // no blocks were read, this block should be empty calculatedParityBlock.Clear(); if (!calculatedParityBlock.Equals(parityBlock)) { FireErrorMessage(String.Format("Block {0} does not match", block)); VerifyErrors++; bool reported = false; bool canRecover = true; foreach (DataDrive dr in drives) { FileRecord f = dr.FileFromBlock(block); if (f == null) continue; if (f.Modified) canRecover = false; if (!suspectFiles.Contains(f)) { suspectFiles.Add(f); if (!reported) { FireErrorMessage("Block " + block + " contains data from the following file or files (each file will only be reported once per verify pass):"); reported = true; } string error = f.FullPath; if (!File.Exists(f.FullPath)) error += " (MISSING)"; else if (f.Modified) error += " (MODIFIED)"; FireErrorMessage(error); } } if (canRecover) { parity.WriteBlock(block, calculatedParityBlock.Data); FireErrorMessage("Block " + block + " repaired."); VerifyRecovers++; } else FireErrorMessage("Cannot repair block + " + block + " because one or more files are modified or missing."); } if ((DateTime.Now - lastStatus) > minTimeDelta) { Status = String.Format("{0} of {1} parity blocks verified. Errors found: {2} Errors fixed: {3}", block, maxBlock, VerifyErrors, VerifyRecovers); lastStatus = DateTime.Now; } Progress = (double)block / maxBlock; if (cancel) break; } }
private bool RecoverFile(FileRecord r, string path) { string fullPath = Utils.MakeFullPath(path, r.Name); r.Drive.Status = "Recovering " + r.Name + " ..."; LogFile.Log(r.Drive.Status); r.Drive.Progress = 0; try { // make sure the destination directory exists Directory.CreateDirectory(Path.GetDirectoryName(fullPath)); MD5 hash = MD5.Create(); hash.Initialize(); using (FileStream f = new FileStream(fullPath, FileMode.Create, FileAccess.Write)) { ParityBlock parityBlock = new ParityBlock(parity); long leftToWrite = r.Length; UInt32 block = r.StartBlock; while (leftToWrite > 0) { RecoverBlock(r.Drive, block, parityBlock); int blockSize = leftToWrite > Parity.BLOCK_SIZE ? Parity.BLOCK_SIZE : (int)leftToWrite; f.Write(parityBlock.Data, 0, blockSize); hash.TransformBlock(parityBlock.Data, 0, blockSize, parityBlock.Data, 0); leftToWrite -= Parity.BLOCK_SIZE; block++; r.Drive.Progress = (double)(block - r.StartBlock) / r.LengthInBlocks; Progress = (double)(recoverBlocks + (block - r.StartBlock)) / recoverTotalBlocks; if (cancel) { f.Close(); File.Delete(fullPath); return false; } } hash.TransformFinalBlock(parityBlock.Data, 0, 0); } r.Drive.Progress = 0; File.SetCreationTime(fullPath, r.CreationTime); File.SetLastWriteTime(fullPath, r.LastWriteTime); File.SetAttributes(fullPath, r.Attributes); if (r.Length > 0 && !Utils.HashCodesMatch(hash.Hash, r.HashCode)) { FireErrorMessage("Hash verify failed for \"" + fullPath + "\". Recovered file is probably corrupt."); return false; } else return true; } catch (Exception e) { FireErrorMessage("Error recovering \"" + fullPath + "\": " + e.Message); return false; } finally { // no matter what happens, keep the progress bar advancing by the right amount recoverBlocks += r.LengthInBlocks; Progress = (double)recoverBlocks / recoverTotalBlocks; } }