/// <exception cref="System.IO.IOException"/> private long VerifyEditLogs(FSNamesystem namesystem, FSImage fsimage, string logFileName , long startTxId) { long numEdits = -1; // Verify that we can read in all the transactions that we have written. // If there were any corruptions, it is likely that the reading in // of these transactions will throw an exception. foreach (Storage.StorageDirectory sd in fsimage.GetStorage().DirIterable(NNStorage.NameNodeDirType .Edits)) { FilePath editFile = new FilePath(sd.GetCurrentDir(), logFileName); System.Console.Out.WriteLine("Verifying file: " + editFile); FSEditLogLoader loader = new FSEditLogLoader(namesystem, startTxId); long numEditsThisLog = loader.LoadFSEdits(new EditLogFileInputStream(editFile), startTxId ); System.Console.Out.WriteLine("Number of edits: " + numEditsThisLog); NUnit.Framework.Assert.IsTrue(numEdits == -1 || numEditsThisLog == numEdits); numEdits = numEditsThisLog; } NUnit.Framework.Assert.IsTrue(numEdits != -1); return(numEdits); }
/// <exception cref="System.IO.IOException"/> internal static FSEditLogLoader.EditLogValidation ValidateEditLog(FilePath file) { Org.Apache.Hadoop.Hdfs.Server.Namenode.EditLogFileInputStream @in; try { @in = new Org.Apache.Hadoop.Hdfs.Server.Namenode.EditLogFileInputStream(file); @in.GetVersion(true); } catch (EditLogFileInputStream.LogHeaderCorruptException e) { // causes us to read the header // If the header is malformed or the wrong value, this indicates a corruption Log.Warn("Log file " + file + " has no valid header", e); return(new FSEditLogLoader.EditLogValidation(0, HdfsConstants.InvalidTxid, true)); } try { return(FSEditLogLoader.ValidateEditLog(@in)); } finally { IOUtils.CloseStream(@in); } }
/// <summary>Apply the batch of edits to the local namespace.</summary> /// <exception cref="System.IO.IOException"/> private void ApplyEdits(long firstTxId, int numTxns, byte[] data) { lock (this) { Preconditions.CheckArgument(firstTxId == lastAppliedTxId + 1, "Received txn batch starting at %s but expected %s" , firstTxId, lastAppliedTxId + 1); System.Diagnostics.Debug.Assert(backupInputStream.Length() == 0, "backup input stream is not empty" ); try { if (Log.IsTraceEnabled()) { Log.Debug("data:" + StringUtils.ByteToHexString(data)); } FSEditLogLoader logLoader = new FSEditLogLoader(GetNamesystem(), lastAppliedTxId); int logVersion = storage.GetLayoutVersion(); backupInputStream.SetBytes(data, logVersion); long numTxnsAdvanced = logLoader.LoadEditRecords(backupInputStream, true, lastAppliedTxId + 1, null, null); if (numTxnsAdvanced != numTxns) { throw new IOException("Batch of txns starting at txnid " + firstTxId + " was supposed to contain " + numTxns + " transactions, but we were only able to advance by " + numTxnsAdvanced ); } lastAppliedTxId = logLoader.GetLastAppliedTxId(); FSImage.UpdateCountForQuota(GetNamesystem().dir.GetBlockStoragePolicySuite(), GetNamesystem ().dir.rootDir); } finally { // inefficient! backupInputStream.Clear(); } } }
public virtual void TestEditLog() { // start a cluster Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; FileSystem fileSys = null; try { conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Build(); cluster.WaitActive(); fileSys = cluster.GetFileSystem(); FSNamesystem namesystem = cluster.GetNamesystem(); for (IEnumerator <URI> it = cluster.GetNameDirs(0).GetEnumerator(); it.HasNext();) { FilePath dir = new FilePath(it.Next().GetPath()); System.Console.Out.WriteLine(dir); } FSImage fsimage = namesystem.GetFSImage(); FSEditLog editLog = fsimage.GetEditLog(); // set small size of flush buffer editLog.SetOutputBufferCapacity(2048); // Create threads and make them run transactions concurrently. Sharpen.Thread[] threadId = new Sharpen.Thread[NumThreads]; for (int i = 0; i < NumThreads; i++) { TestSecurityTokenEditLog.Transactions trans = new TestSecurityTokenEditLog.Transactions (namesystem, NumTransactions); threadId[i] = new Sharpen.Thread(trans, "TransactionThread-" + i); threadId[i].Start(); } // wait for all transactions to get over for (int i_1 = 0; i_1 < NumThreads; i_1++) { try { threadId[i_1].Join(); } catch (Exception) { i_1--; } } // retry editLog.Close(); // Verify that we can read in all the transactions that we have written. // If there were any corruptions, it is likely that the reading in // of these transactions will throw an exception. // namesystem.GetDelegationTokenSecretManager().StopThreads(); int numKeys = namesystem.GetDelegationTokenSecretManager().GetNumberOfKeys(); int expectedTransactions = NumThreads * opsPerTrans * NumTransactions + numKeys + 2; // + 2 for BEGIN and END txns foreach (Storage.StorageDirectory sd in fsimage.GetStorage().DirIterable(NNStorage.NameNodeDirType .Edits)) { FilePath editFile = NNStorage.GetFinalizedEditsFile(sd, 1, 1 + expectedTransactions - 1); System.Console.Out.WriteLine("Verifying file: " + editFile); FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0); long numEdits = loader.LoadFSEdits(new EditLogFileInputStream(editFile), 1); NUnit.Framework.Assert.AreEqual("Verification for " + editFile, expectedTransactions , numEdits); } } finally { if (fileSys != null) { fileSys.Close(); } if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="System.IO.IOException"/> private bool TryConvergeJournalSpool() { Preconditions.CheckState(bnState == BackupImage.BNState.JournalOnly, "bad state: %s" , bnState); // This section is unsynchronized so we can continue to apply // ahead of where we're reading, concurrently. Since the state // is JOURNAL_ONLY at this point, we know that lastAppliedTxId // doesn't change, and curSegmentTxId only increases while (lastAppliedTxId < editLog.GetCurSegmentTxId() - 1) { long target = editLog.GetCurSegmentTxId(); Log.Info("Loading edits into backupnode to try to catch up from txid " + lastAppliedTxId + " to " + target); FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector (); storage.InspectStorageDirs(inspector); editLog.RecoverUnclosedStreams(); IEnumerable <EditLogInputStream> editStreamsAll = editLog.SelectInputStreams(lastAppliedTxId , target - 1); // remove inprogress IList <EditLogInputStream> editStreams = Lists.NewArrayList(); foreach (EditLogInputStream s in editStreamsAll) { if (s.GetFirstTxId() != editLog.GetCurSegmentTxId()) { editStreams.AddItem(s); } } LoadEdits(editStreams, GetNamesystem()); } // now, need to load the in-progress file lock (this) { if (lastAppliedTxId != editLog.GetCurSegmentTxId() - 1) { Log.Debug("Logs rolled while catching up to current segment"); return(false); } // drop lock and try again to load local logs EditLogInputStream stream = null; ICollection <EditLogInputStream> editStreams = GetEditLog().SelectInputStreams(GetEditLog ().GetCurSegmentTxId(), GetEditLog().GetCurSegmentTxId()); foreach (EditLogInputStream s in editStreams) { if (s.GetFirstTxId() == GetEditLog().GetCurSegmentTxId()) { stream = s; } break; } if (stream == null) { Log.Warn("Unable to find stream starting with " + editLog.GetCurSegmentTxId() + ". This indicates that there is an error in synchronization in BackupImage" ); return(false); } try { long remainingTxns = GetEditLog().GetLastWrittenTxId() - lastAppliedTxId; Log.Info("Going to finish converging with remaining " + remainingTxns + " txns from in-progress stream " + stream); FSEditLogLoader loader = new FSEditLogLoader(GetNamesystem(), lastAppliedTxId); loader.LoadFSEdits(stream, lastAppliedTxId + 1); lastAppliedTxId = loader.GetLastAppliedTxId(); System.Diagnostics.Debug.Assert(lastAppliedTxId == GetEditLog().GetLastWrittenTxId ()); } finally { FSEditLog.CloseAllStreams(editStreams); } Log.Info("Successfully synced BackupNode with NameNode at txnid " + lastAppliedTxId ); SetState(BackupImage.BNState.InSync); } return(true); }
/// <exception cref="System.IO.IOException"/> public static long CountTransactionsInStream(EditLogInputStream @in) { FSEditLogLoader.EditLogValidation validation = FSEditLogLoader.ValidateEditLog(@in ); return((validation.GetEndTxId() - @in.GetFirstTxId()) + 1); }