/// <exception cref="System.IO.IOException"/> internal virtual void AnalyzeStorage() { this.state = sd.AnalyzeStorage(HdfsServerConstants.StartupOption.Regular, this); if (state == Storage.StorageState.Normal) { ReadProperties(sd); } }
/// <summary>Load one storage directory.</summary> /// <remarks>Load one storage directory. Recover from previous transitions if required. /// </remarks> /// <param name="datanode">datanode instance</param> /// <param name="nsInfo">namespace information</param> /// <param name="dataDir">the root path of the storage directory</param> /// <param name="startOpt">startup option</param> /// <returns>the StorageDirectory successfully loaded.</returns> /// <exception cref="System.IO.IOException"/> private Storage.StorageDirectory LoadStorageDirectory(DataNode datanode, NamespaceInfo nsInfo, FilePath dataDir, HdfsServerConstants.StartupOption startOpt) { Storage.StorageDirectory sd = new Storage.StorageDirectory(dataDir, null, true); try { Storage.StorageState curState = sd.AnalyzeStorage(startOpt, this); switch (curState) { case Storage.StorageState.Normal: { // sd is locked but not opened break; } case Storage.StorageState.NonExistent: { Log.Info("Block pool storage directory " + dataDir + " does not exist"); throw new IOException("Storage directory " + dataDir + " does not exist"); } case Storage.StorageState.NotFormatted: { // format Log.Info("Block pool storage directory " + dataDir + " is not formatted for " + nsInfo .GetBlockPoolID()); Log.Info("Formatting ..."); Format(sd, nsInfo); break; } default: { // recovery part is common sd.DoRecover(curState); break; } } // 2. Do transitions // Each storage directory is treated individually. // During startup some of them can upgrade or roll back // while others could be up-to-date for the regular startup. DoTransition(datanode, sd, nsInfo, startOpt); if (GetCTime() != nsInfo.GetCTime()) { throw new IOException("Data-node and name-node CTimes must be the same."); } // 3. Update successfully loaded storage. SetServiceLayoutVersion(GetServiceLayoutVersion()); WriteProperties(sd); return(sd); } catch (IOException ioe) { sd.Unlock(); throw; } }
/// <summary> /// Analyze backup storage directories for consistency.<br /> /// Recover from incomplete checkpoints if required.<br /> /// Read VERSION and fstime files if exist.<br /> /// Do not load image or edits. /// </summary> /// <exception cref="System.IO.IOException">if the node should shutdown.</exception> internal virtual void RecoverCreateRead() { for (IEnumerator <Storage.StorageDirectory> it = storage.DirIterator(); it.HasNext ();) { Storage.StorageDirectory sd = it.Next(); Storage.StorageState curState; try { curState = sd.AnalyzeStorage(HdfsServerConstants.StartupOption.Regular, storage); switch (curState) { case Storage.StorageState.NonExistent: { // sd is locked but not opened // fail if any of the configured storage dirs are inaccessible throw new InconsistentFSStateException(sd.GetRoot(), "checkpoint directory does not exist or is not accessible." ); } case Storage.StorageState.NotFormatted: { // for backup node all directories may be unformatted initially Log.Info("Storage directory " + sd.GetRoot() + " is not formatted."); Log.Info("Formatting ..."); sd.ClearDirectory(); // create empty current break; } case Storage.StorageState.Normal: { break; } default: { // recovery is possible sd.DoRecover(curState); break; } } if (curState != Storage.StorageState.NotFormatted) { // read and verify consistency with other directories storage.ReadProperties(sd); } } catch (IOException ioe) { sd.Unlock(); throw; } } }
/// <summary>Analyze checkpoint directories.</summary> /// <remarks> /// Analyze checkpoint directories. /// Create directories if they do not exist. /// Recover from an unsuccessful checkpoint if necessary. /// </remarks> /// <exception cref="System.IO.IOException"/> internal virtual void RecoverCreate(bool format) { storage.AttemptRestoreRemovedStorage(); storage.UnlockAll(); for (IEnumerator <Storage.StorageDirectory> it = storage.DirIterator(); it.HasNext ();) { Storage.StorageDirectory sd = it.Next(); bool isAccessible = true; try { // create directories if don't exist yet if (!sd.GetRoot().Mkdirs()) { } } catch (SecurityException) { // do nothing, directory is already created isAccessible = false; } if (!isAccessible) { throw new InconsistentFSStateException(sd.GetRoot(), "cannot access checkpoint directory." ); } if (format) { // Don't confirm, since this is just the secondary namenode. Log.Info("Formatting storage directory " + sd); sd.ClearDirectory(); } Storage.StorageState curState; try { curState = sd.AnalyzeStorage(HdfsServerConstants.StartupOption.Regular, storage); switch (curState) { case Storage.StorageState.NonExistent: { // sd is locked but not opened // fail if any of the configured checkpoint dirs are inaccessible throw new InconsistentFSStateException(sd.GetRoot(), "checkpoint directory does not exist or is not accessible." ); } case Storage.StorageState.NotFormatted: { break; } case Storage.StorageState.Normal: { // it's ok since initially there is no current and VERSION // Read the VERSION file. This verifies that: // (a) the VERSION file for each of the directories is the same, // and (b) when we connect to a NN, we can verify that the remote // node matches the same namespace that we ran on previously. storage.ReadProperties(sd); break; } default: { // recovery is possible sd.DoRecover(curState); break; } } } catch (IOException ioe) { sd.Unlock(); throw; } } }