Esempio n. 1
0
        /// <summary>invalidate storage by removing the second and third storage directories</summary>
        /// <exception cref="System.IO.IOException"/>
        public virtual void InvalidateStorage(FSImage fi, ICollection <FilePath> filesToInvalidate
                                              )
        {
            AList <Storage.StorageDirectory>       al = new AList <Storage.StorageDirectory>(2);
            IEnumerator <Storage.StorageDirectory> it = fi.GetStorage().DirIterator();

            while (it.HasNext())
            {
                Storage.StorageDirectory sd = it.Next();
                if (filesToInvalidate.Contains(sd.GetRoot()))
                {
                    Log.Info("causing IO error on " + sd.GetRoot());
                    al.AddItem(sd);
                }
            }
            // simulate an error
            fi.GetStorage().ReportErrorsOnDirectories(al);
            foreach (JournalSet.JournalAndStream j in fi.GetEditLog().GetJournals())
            {
                if (j.GetManager() is FileJournalManager)
                {
                    FileJournalManager fm = (FileJournalManager)j.GetManager();
                    if (fm.GetStorageDirectory().GetRoot().Equals(path2) || fm.GetStorageDirectory().
                        GetRoot().Equals(path3))
                    {
                        EditLogOutputStream mockStream = Org.Mockito.Mockito.Spy(j.GetCurrentStream());
                        j.SetCurrentStreamForTests(mockStream);
                        Org.Mockito.Mockito.DoThrow(new IOException("Injected fault: write")).When(mockStream
                                                                                                   ).Write(Org.Mockito.Mockito.AnyObject <FSEditLogOp>());
                    }
                }
            }
        }
Esempio n. 2
0
        /// <summary>
        /// Analyze backup storage directories for consistency.<br />
        /// Recover from incomplete checkpoints if required.<br />
        /// Read VERSION and fstime files if exist.<br />
        /// Do not load image or edits.
        /// </summary>
        /// <exception cref="System.IO.IOException">if the node should shutdown.</exception>
        internal virtual void RecoverCreateRead()
        {
            for (IEnumerator <Storage.StorageDirectory> it = storage.DirIterator(); it.HasNext
                     ();)
            {
                Storage.StorageDirectory sd = it.Next();
                Storage.StorageState     curState;
                try
                {
                    curState = sd.AnalyzeStorage(HdfsServerConstants.StartupOption.Regular, storage);
                    switch (curState)
                    {
                    case Storage.StorageState.NonExistent:
                    {
                        // sd is locked but not opened
                        // fail if any of the configured storage dirs are inaccessible
                        throw new InconsistentFSStateException(sd.GetRoot(), "checkpoint directory does not exist or is not accessible."
                                                               );
                    }

                    case Storage.StorageState.NotFormatted:
                    {
                        // for backup node all directories may be unformatted initially
                        Log.Info("Storage directory " + sd.GetRoot() + " is not formatted.");
                        Log.Info("Formatting ...");
                        sd.ClearDirectory();
                        // create empty current
                        break;
                    }

                    case Storage.StorageState.Normal:
                    {
                        break;
                    }

                    default:
                    {
                        // recovery is possible
                        sd.DoRecover(curState);
                        break;
                    }
                    }
                    if (curState != Storage.StorageState.NotFormatted)
                    {
                        // read and verify consistency with other directories
                        storage.ReadProperties(sd);
                    }
                }
                catch (IOException ioe)
                {
                    sd.Unlock();
                    throw;
                }
            }
        }
        /// <summary>Upgrade to any release after 0.22 (0.22 included) release e.g.</summary>
        /// <remarks>
        /// Upgrade to any release after 0.22 (0.22 included) release e.g. 0.22 =&gt; 0.23
        /// Upgrade procedure is as follows:
        /// <ol>
        /// <li>If <SD>/current/<bpid>/previous exists then delete it</li>
        /// <li>Rename <SD>/current/<bpid>/current to
        /// <SD>/current/bpid/current/previous.tmp</li>
        /// <li>Create new <SD>current/<bpid>/current directory</li>
        /// <ol>
        /// <li>Hard links for block files are created from previous.tmp to current</li>
        /// <li>Save new version file in current directory</li>
        /// </ol>
        /// <li>Rename previous.tmp to previous</li> </ol>
        /// </remarks>
        /// <param name="bpSd">storage directory <SD>/current/<bpid></param>
        /// <param name="nsInfo">Namespace Info from the namenode</param>
        /// <exception cref="System.IO.IOException">on error</exception>
        internal virtual void DoUpgrade(DataNode datanode, Storage.StorageDirectory bpSd,
                                        NamespaceInfo nsInfo)
        {
            // Upgrading is applicable only to release with federation or after
            if (!DataNodeLayoutVersion.Supports(LayoutVersion.Feature.Federation, layoutVersion
                                                ))
            {
                return;
            }
            Log.Info("Upgrading block pool storage directory " + bpSd.GetRoot() + ".\n   old LV = "
                     + this.GetLayoutVersion() + "; old CTime = " + this.GetCTime() + ".\n   new LV = "
                     + HdfsConstants.DatanodeLayoutVersion + "; new CTime = " + nsInfo.GetCTime());
            // get <SD>/previous directory
            string dnRoot = GetDataNodeStorageRoot(bpSd.GetRoot().GetCanonicalPath());

            Storage.StorageDirectory dnSdStorage = new Storage.StorageDirectory(new FilePath(
                                                                                    dnRoot));
            FilePath dnPrevDir = dnSdStorage.GetPreviousDir();

            // If <SD>/previous directory exists delete it
            if (dnPrevDir.Exists())
            {
                DeleteDir(dnPrevDir);
            }
            FilePath bpCurDir  = bpSd.GetCurrentDir();
            FilePath bpPrevDir = bpSd.GetPreviousDir();

            System.Diagnostics.Debug.Assert(bpCurDir.Exists(), "BP level current directory must exist."
                                            );
            CleanupDetachDir(new FilePath(bpCurDir, DataStorage.StorageDirDetached));
            // 1. Delete <SD>/current/<bpid>/previous dir before upgrading
            if (bpPrevDir.Exists())
            {
                DeleteDir(bpPrevDir);
            }
            FilePath bpTmpDir = bpSd.GetPreviousTmp();

            System.Diagnostics.Debug.Assert(!bpTmpDir.Exists(), "previous.tmp directory must not exist."
                                            );
            // 2. Rename <SD>/current/<bpid>/current to
            //    <SD>/current/<bpid>/previous.tmp
            Rename(bpCurDir, bpTmpDir);
            // 3. Create new <SD>/current with block files hardlinks and VERSION
            LinkAllBlocks(datanode, bpTmpDir, bpCurDir);
            this.layoutVersion = HdfsConstants.DatanodeLayoutVersion;
            System.Diagnostics.Debug.Assert(this.namespaceID == nsInfo.GetNamespaceID(), "Data-node and name-node layout versions must be the same."
                                            );
            this.cTime = nsInfo.GetCTime();
            WriteProperties(bpSd);
            // 4.rename <SD>/current/<bpid>/previous.tmp to
            // <SD>/current/<bpid>/previous
            Rename(bpTmpDir, bpPrevDir);
            Log.Info("Upgrade of block pool " + blockpoolID + " at " + bpSd.GetRoot() + " is complete"
                     );
        }
Esempio n. 4
0
 /// <summary>Check whether the path is a valid DataNode data directory.</summary>
 private static void CheckDir(FilePath dataDir)
 {
     Storage.StorageDirectory sd = new Storage.StorageDirectory(dataDir);
     NUnit.Framework.Assert.IsTrue(sd.GetRoot().IsDirectory());
     NUnit.Framework.Assert.IsTrue(sd.GetCurrentDir().IsDirectory());
     NUnit.Framework.Assert.IsTrue(sd.GetVersionFile().IsFile());
 }
Esempio n. 5
0
        public virtual void TestAddVolumeFailures()
        {
            StartDFSCluster(1, 1);
            string         dataDir    = cluster.GetDataDirectory();
            DataNode       dn         = cluster.GetDataNodes()[0];
            IList <string> newDirs    = Lists.NewArrayList();
            int            NumNewDirs = 4;

            for (int i = 0; i < NumNewDirs; i++)
            {
                FilePath newVolume = new FilePath(dataDir, "new_vol" + i);
                newDirs.AddItem(newVolume.ToString());
                if (i % 2 == 0)
                {
                    // Make addVolume() fail.
                    newVolume.CreateNewFile();
                }
            }
            string newValue = dn.GetConf().Get(DFSConfigKeys.DfsDatanodeDataDirKey) + "," + Joiner
                              .On(",").Join(newDirs);

            try
            {
                dn.ReconfigurePropertyImpl(DFSConfigKeys.DfsDatanodeDataDirKey, newValue);
                NUnit.Framework.Assert.Fail("Expect to throw IOException.");
            }
            catch (ReconfigurationException e)
            {
                string   errorMessage = e.InnerException.Message;
                string[] messages     = errorMessage.Split("\\r?\\n");
                NUnit.Framework.Assert.AreEqual(2, messages.Length);
                Assert.AssertThat(messages[0], CoreMatchers.ContainsString("new_vol0"));
                Assert.AssertThat(messages[1], CoreMatchers.ContainsString("new_vol2"));
            }
            // Make sure that vol0 and vol2's metadata are not left in memory.
            FsDatasetSpi <object> dataset = dn.GetFSDataset();

            foreach (FsVolumeSpi volume in dataset.GetVolumes())
            {
                Assert.AssertThat(volume.GetBasePath(), IS.Is(CoreMatchers.Not(CoreMatchers.AnyOf
                                                                                   (IS.Is(newDirs[0]), IS.Is(newDirs[2])))));
            }
            DataStorage storage = dn.GetStorage();

            for (int i_1 = 0; i_1 < storage.GetNumStorageDirs(); i_1++)
            {
                Storage.StorageDirectory sd = storage.GetStorageDir(i_1);
                Assert.AssertThat(sd.GetRoot().ToString(), IS.Is(CoreMatchers.Not(CoreMatchers.AnyOf
                                                                                      (IS.Is(newDirs[0]), IS.Is(newDirs[2])))));
            }
            // The newly effective conf does not have vol0 and vol2.
            string[] effectiveVolumes = dn.GetConf().Get(DFSConfigKeys.DfsDatanodeDataDirKey)
                                        .Split(",");
            NUnit.Framework.Assert.AreEqual(4, effectiveVolumes.Length);
            foreach (string ev in effectiveVolumes)
            {
                Assert.AssertThat(StorageLocation.Parse(ev).GetFile().GetCanonicalPath(), IS.Is(CoreMatchers.Not
                                                                                                    (CoreMatchers.AnyOf(IS.Is(newDirs[0]), IS.Is(newDirs[2])))));
            }
        }
Esempio n. 6
0
		/// <exception cref="System.Exception"/>
		public virtual void TestJournalLocking()
		{
			Assume.AssumeTrue(journal.GetStorage().GetStorageDir(0).IsLockSupported());
			Storage.StorageDirectory sd = journal.GetStorage().GetStorageDir(0);
			FilePath lockFile = new FilePath(sd.GetRoot(), Storage.StorageFileLock);
			// Journal should be locked, since the format() call locks it.
			GenericTestUtils.AssertExists(lockFile);
			journal.NewEpoch(FakeNsinfo, 1);
			try
			{
				new Journal(conf, TestLogDir, Jid, HdfsServerConstants.StartupOption.Regular, mockErrorReporter
					);
				NUnit.Framework.Assert.Fail("Did not fail to create another journal in same dir");
			}
			catch (IOException ioe)
			{
				GenericTestUtils.AssertExceptionContains("Cannot lock storage", ioe);
			}
			journal.Close();
			// Journal should no longer be locked after the close() call.
			// Hence, should be able to create a new Journal in the same dir.
			Journal journal2 = new Journal(conf, TestLogDir, Jid, HdfsServerConstants.StartupOption
				.Regular, mockErrorReporter);
			journal2.NewEpoch(FakeNsinfo, 2);
			journal2.Close();
		}
        /*
         * Finalize the block pool storage by deleting <BP>/previous directory
         * that holds the snapshot.
         */
        /// <exception cref="System.IO.IOException"/>
        internal virtual void DoFinalize(FilePath dnCurDir)
        {
            FilePath bpRoot = GetBpRoot(blockpoolID, dnCurDir);

            Storage.StorageDirectory bpSd = new Storage.StorageDirectory(bpRoot);
            // block pool level previous directory
            FilePath prevDir = bpSd.GetPreviousDir();

            if (!prevDir.Exists())
            {
                return;
            }
            // already finalized
            string dataDirPath = bpSd.GetRoot().GetCanonicalPath();

            Log.Info("Finalizing upgrade for storage directory " + dataDirPath + ".\n   cur LV = "
                     + this.GetLayoutVersion() + "; cur CTime = " + this.GetCTime());
            System.Diagnostics.Debug.Assert(bpSd.GetCurrentDir().Exists(), "Current directory must exist."
                                            );
            // rename previous to finalized.tmp
            FilePath tmpDir = bpSd.GetFinalizedTmp();

            Rename(prevDir, tmpDir);
            // delete finalized.tmp dir in a separate thread
            new Daemon(new _Runnable_618(tmpDir, dataDirPath)).Start();
        }
Esempio n. 8
0
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Sharpen.TimeoutException"/>
        /// <exception cref="System.Exception"/>
        /// <exception cref="Org.Apache.Hadoop.Conf.ReconfigurationException"/>
        public virtual void TestAddBackRemovedVolume()
        {
            StartDFSCluster(1, 2);
            // Create some data on every volume.
            CreateFile(new Path("/test"), 32);
            DataNode      dn            = cluster.GetDataNodes()[0];
            Configuration conf          = dn.GetConf();
            string        oldDataDir    = conf.Get(DFSConfigKeys.DfsDatanodeDataDirKey);
            string        keepDataDir   = oldDataDir.Split(",")[0];
            string        removeDataDir = oldDataDir.Split(",")[1];

            dn.ReconfigurePropertyImpl(DFSConfigKeys.DfsDatanodeDataDirKey, keepDataDir);
            for (int i = 0; i < cluster.GetNumNameNodes(); i++)
            {
                string bpid = cluster.GetNamesystem(i).GetBlockPoolId();
                BlockPoolSliceStorage bpsStorage = dn.GetStorage().GetBPStorage(bpid);
                // Make sure that there is no block pool level storage under removeDataDir.
                for (int j = 0; j < bpsStorage.GetNumStorageDirs(); j++)
                {
                    Storage.StorageDirectory sd = bpsStorage.GetStorageDir(j);
                    NUnit.Framework.Assert.IsFalse(sd.GetRoot().GetAbsolutePath().StartsWith(new FilePath
                                                                                                 (removeDataDir).GetAbsolutePath()));
                }
                NUnit.Framework.Assert.AreEqual(dn.GetStorage().GetBPStorage(bpid).GetNumStorageDirs
                                                    (), 1);
            }
            // Bring the removed directory back. It only successes if all metadata about
            // this directory were removed from the previous step.
            dn.ReconfigurePropertyImpl(DFSConfigKeys.DfsDatanodeDataDirKey, oldDataDir);
        }
Esempio n. 9
0
        /* Flag if there is at least one storage dir that doesn't contain the newest
         * fstime */
        /* Flag set false if there are any "previous" directories found */
        // Track the name and edits dir with the latest times
        /// <exception cref="System.IO.IOException"/>
        internal override void InspectDirectory(Storage.StorageDirectory sd)
        {
            // Was the file just formatted?
            if (!sd.GetVersionFile().Exists())
            {
                hasOutOfDateStorageDirs = true;
                return;
            }
            bool imageExists = false;
            bool editsExists = false;

            // Determine if sd is image, edits or both
            if (sd.GetStorageDirType().IsOfType(NNStorage.NameNodeDirType.Image))
            {
                imageExists = NNStorage.GetStorageFile(sd, NNStorage.NameNodeFile.Image).Exists();
                imageDirs.AddItem(sd.GetRoot().GetCanonicalPath());
            }
            if (sd.GetStorageDirType().IsOfType(NNStorage.NameNodeDirType.Edits))
            {
                editsExists = NNStorage.GetStorageFile(sd, NNStorage.NameNodeFile.Edits).Exists();
                editsDirs.AddItem(sd.GetRoot().GetCanonicalPath());
            }
            long checkpointTime = ReadCheckpointTime(sd);

            checkpointTimes.AddItem(checkpointTime);
            if (sd.GetStorageDirType().IsOfType(NNStorage.NameNodeDirType.Image) && (latestNameCheckpointTime
                                                                                     < checkpointTime) && imageExists)
            {
                latestNameCheckpointTime = checkpointTime;
                latestNameSD             = sd;
            }
            if (sd.GetStorageDirType().IsOfType(NNStorage.NameNodeDirType.Edits) && (latestEditsCheckpointTime
                                                                                     < checkpointTime) && editsExists)
            {
                latestEditsCheckpointTime = checkpointTime;
                latestEditsSD             = sd;
            }
            // check that we have a valid, non-default checkpointTime
            if (checkpointTime <= 0L)
            {
                hasOutOfDateStorageDirs = true;
            }
            // set finalized flag
            isUpgradeFinalized = isUpgradeFinalized && !sd.GetPreviousDir().Exists();
        }
Esempio n. 10
0
        /// <summary>Check whether the root is a valid BlockPoolSlice storage.</summary>
        private static void CheckDir(FilePath root, string bpid)
        {
            Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
            FilePath bpRoot             = new FilePath(sd.GetCurrentDir(), bpid);

            Storage.StorageDirectory bpSd = new Storage.StorageDirectory(bpRoot);
            NUnit.Framework.Assert.IsTrue(bpSd.GetRoot().IsDirectory());
            NUnit.Framework.Assert.IsTrue(bpSd.GetCurrentDir().IsDirectory());
            NUnit.Framework.Assert.IsTrue(bpSd.GetVersionFile().IsFile());
        }
Esempio n. 11
0
        /*
         * Roll back to old snapshot at the block pool level
         * If previous directory exists:
         * <ol>
         * <li>Rename <SD>/current/<bpid>/current to removed.tmp</li>
         * <li>Rename * <SD>/current/<bpid>/previous to current</li>
         * <li>Remove removed.tmp</li>
         * </ol>
         *
         * Do nothing if previous directory does not exist.
         * @param bpSd Block pool storage directory at <SD>/current/<bpid>
         */
        /// <exception cref="System.IO.IOException"/>
        internal virtual void DoRollback(Storage.StorageDirectory bpSd, NamespaceInfo nsInfo
                                         )
        {
            FilePath prevDir = bpSd.GetPreviousDir();

            // regular startup if previous dir does not exist
            if (!prevDir.Exists())
            {
                return;
            }
            // read attributes out of the VERSION file of previous directory
            Org.Apache.Hadoop.Hdfs.Server.Datanode.BlockPoolSliceStorage prevInfo = new Org.Apache.Hadoop.Hdfs.Server.Datanode.BlockPoolSliceStorage
                                                                                        ();
            prevInfo.ReadPreviousVersionProperties(bpSd);
            // We allow rollback to a state, which is either consistent with
            // the namespace state or can be further upgraded to it.
            // In another word, we can only roll back when ( storedLV >= software LV)
            // && ( DN.previousCTime <= NN.ctime)
            if (!(prevInfo.GetLayoutVersion() >= HdfsConstants.DatanodeLayoutVersion && prevInfo
                  .GetCTime() <= nsInfo.GetCTime()))
            {
                // cannot rollback
                throw new InconsistentFSStateException(bpSd.GetRoot(), "Cannot rollback to a newer state.\nDatanode previous state: LV = "
                                                       + prevInfo.GetLayoutVersion() + " CTime = " + prevInfo.GetCTime() + " is newer than the namespace state: LV = "
                                                       + HdfsConstants.DatanodeLayoutVersion + " CTime = " + nsInfo.GetCTime());
            }
            Log.Info("Rolling back storage directory " + bpSd.GetRoot() + ".\n   target LV = "
                     + nsInfo.GetLayoutVersion() + "; target CTime = " + nsInfo.GetCTime());
            FilePath tmpDir = bpSd.GetRemovedTmp();

            System.Diagnostics.Debug.Assert(!tmpDir.Exists(), "removed.tmp directory must not exist."
                                            );
            // 1. rename current to tmp
            FilePath curDir = bpSd.GetCurrentDir();

            System.Diagnostics.Debug.Assert(curDir.Exists(), "Current directory must exist.");
            Rename(curDir, tmpDir);
            // 2. rename previous to current
            Rename(prevDir, curDir);
            // 3. delete removed.tmp dir
            DeleteDir(tmpDir);
            Log.Info("Rollback of " + bpSd.GetRoot() + " is complete");
        }
Esempio n. 12
0
        /// <exception cref="System.IO.IOException"/>
        protected internal override void SetFieldsFromProperties(Properties props, Storage.StorageDirectory
                                                                 sd)
        {
            SetLayoutVersion(props, sd);
            SetNamespaceID(props, sd);
            SetcTime(props, sd);
            string sbpid = props.GetProperty("blockpoolID");

            SetBlockPoolID(sd.GetRoot(), sbpid);
        }
Esempio n. 13
0
        /// <summary>Finalize the upgrade.</summary>
        /// <remarks>
        /// Finalize the upgrade. The previous dir, if any, will be renamed and
        /// removed. After this is completed, rollback is no longer allowed.
        /// </remarks>
        /// <param name="sd">the storage directory to finalize</param>
        /// <exception cref="System.IO.IOException">in the event of error</exception>
        internal static void DoFinalize(Storage.StorageDirectory sd)
        {
            FilePath prevDir = sd.GetPreviousDir();

            if (!prevDir.Exists())
            {
                // already discarded
                Log.Info("Directory " + prevDir + " does not exist.");
                Log.Info("Finalize upgrade for " + sd.GetRoot() + " is not required.");
                return;
            }
            Log.Info("Finalizing upgrade of storage directory " + sd.GetRoot());
            Preconditions.CheckState(sd.GetCurrentDir().Exists(), "Current directory must exist."
                                     );
            FilePath tmpDir = sd.GetFinalizedTmp();

            // rename previous to tmp and remove
            NNStorage.Rename(prevDir, tmpDir);
            NNStorage.DeleteDir(tmpDir);
            Log.Info("Finalize upgrade for " + sd.GetRoot() + " is complete.");
        }
Esempio n. 14
0
 /// <summary>Perform the upgrade of the storage dir to the given storage info.</summary>
 /// <remarks>
 /// Perform the upgrade of the storage dir to the given storage info. The new
 /// storage info is written into the current directory, and the previous.tmp
 /// directory is renamed to previous.
 /// </remarks>
 /// <param name="sd">the storage directory to upgrade</param>
 /// <param name="storage">info about the new upgraded versions.</param>
 /// <exception cref="System.IO.IOException">in the event of error</exception>
 public static void DoUpgrade(Storage.StorageDirectory sd, Storage storage)
 {
     Log.Info("Performing upgrade of storage directory " + sd.GetRoot());
     try
     {
         // Write the version file, since saveFsImage only makes the
         // fsimage_<txid>, and the directory is otherwise empty.
         storage.WriteProperties(sd);
         FilePath prevDir = sd.GetPreviousDir();
         FilePath tmpDir  = sd.GetPreviousTmp();
         Preconditions.CheckState(!prevDir.Exists(), "previous directory must not exist for upgrade."
                                  );
         Preconditions.CheckState(tmpDir.Exists(), "previous.tmp directory must exist for upgrade."
                                  );
         // rename tmp to previous
         NNStorage.Rename(tmpDir, prevDir);
     }
     catch (IOException ioe)
     {
         Log.Error("Unable to rename temp to previous for " + sd.GetRoot(), ioe);
         throw;
     }
 }
Esempio n. 15
0
 /// <summary>Remove block pool level storage directory.</summary>
 /// <param name="absPathToRemove">
 /// the absolute path of the root for the block pool
 /// level storage to remove.
 /// </param>
 internal virtual void Remove(FilePath absPathToRemove)
 {
     Preconditions.CheckArgument(absPathToRemove.IsAbsolute());
     Log.Info("Removing block level storage: " + absPathToRemove);
     for (IEnumerator <Storage.StorageDirectory> it = this.storageDirs.GetEnumerator();
          it.HasNext();)
     {
         Storage.StorageDirectory sd = it.Next();
         if (sd.GetRoot().GetAbsoluteFile().Equals(absPathToRemove))
         {
             it.Remove();
             break;
         }
     }
 }
Esempio n. 16
0
 /// <summary>
 /// Validate and set clusterId from
 /// <see cref="Sharpen.Properties"/>
 ///
 /// </summary>
 /// <exception cref="Org.Apache.Hadoop.Hdfs.Server.Common.InconsistentFSStateException
 ///     "/>
 protected internal virtual void SetClusterId(Properties props, int layoutVersion,
                                              Storage.StorageDirectory sd)
 {
     // Set cluster ID in version that supports federation
     if (LayoutVersion.Supports(GetServiceLayoutFeatureMap(), LayoutVersion.Feature.Federation
                                , layoutVersion))
     {
         string cid = GetProperty(props, sd, "clusterID");
         if (!(clusterID.Equals(string.Empty) || cid.Equals(string.Empty) || clusterID.Equals
                   (cid)))
         {
             throw new InconsistentFSStateException(sd.GetRoot(), "cluster Id is incompatible with others."
                                                    );
         }
         clusterID = cid;
     }
 }
Esempio n. 17
0
        /// <summary>
        /// Perform any steps that must succeed across all storage dirs/JournalManagers
        /// involved in an upgrade before proceeding onto the actual upgrade stage.
        /// </summary>
        /// <remarks>
        /// Perform any steps that must succeed across all storage dirs/JournalManagers
        /// involved in an upgrade before proceeding onto the actual upgrade stage. If
        /// a call to any JM's or local storage dir's doPreUpgrade method fails, then
        /// doUpgrade will not be called for any JM. The existing current dir is
        /// renamed to previous.tmp, and then a new, empty current dir is created.
        /// </remarks>
        /// <param name="conf">
        /// configuration for creating
        /// <see cref="EditLogFileOutputStream"/>
        /// </param>
        /// <param name="sd">the storage directory to perform the pre-upgrade procedure.</param>
        /// <exception cref="System.IO.IOException">in the event of error</exception>
        internal static void DoPreUpgrade(Configuration conf, Storage.StorageDirectory sd
                                          )
        {
            Log.Info("Starting upgrade of storage directory " + sd.GetRoot());
            // rename current to tmp
            RenameCurToTmp(sd);
            FilePath       curDir       = sd.GetCurrentDir();
            FilePath       tmpDir       = sd.GetPreviousTmp();
            IList <string> fileNameList = IOUtils.ListDirectory(tmpDir, new _FilenameFilter_121
                                                                    (tmpDir));

            foreach (string s in fileNameList)
            {
                FilePath prevFile = new FilePath(tmpDir, s);
                FilePath newFile  = new FilePath(curDir, prevFile.GetName());
                Files.CreateLink(newFile.ToPath(), prevFile.ToPath());
            }
        }
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        public virtual void TestFinalizeErrorReportedToNNStorage()
        {
            FilePath f = new FilePath(TestEditLog.TestDir + "/filejournaltestError");
            // abort after 10th roll
            NNStorage storage = TestEditLog.SetupEdits(Sharpen.Collections.SingletonList <URI>
                                                           (f.ToURI()), 10, new TestEditLog.AbortSpec(10, 0));

            Storage.StorageDirectory sd = storage.DirIterator(NNStorage.NameNodeDirType.Edits
                                                              ).Next();
            FileJournalManager jm         = new FileJournalManager(conf, sd, storage);
            string             sdRootPath = sd.GetRoot().GetAbsolutePath();

            FileUtil.Chmod(sdRootPath, "-w", true);
            try
            {
                jm.FinalizeLogSegment(0, 1);
            }
            finally
            {
                FileUtil.Chmod(sdRootPath, "+w", true);
                NUnit.Framework.Assert.IsTrue(storage.GetRemovedStorageDirs().Contains(sd));
            }
        }
Esempio n. 19
0
        /// <summary>
        /// Return true if this storage dir can roll back to the previous storage
        /// state, false otherwise.
        /// </summary>
        /// <remarks>
        /// Return true if this storage dir can roll back to the previous storage
        /// state, false otherwise. The NN will refuse to run the rollback operation
        /// unless at least one JM or fsimage storage directory can roll back.
        /// </remarks>
        /// <param name="storage">the storage info for the current state</param>
        /// <param name="prevStorage">the storage info for the previous (unupgraded) state</param>
        /// <param name="targetLayoutVersion">the layout version we intend to roll back to</param>
        /// <returns>true if this JM can roll back, false otherwise.</returns>
        /// <exception cref="System.IO.IOException">in the event of error</exception>
        internal static bool CanRollBack(Storage.StorageDirectory sd, StorageInfo storage
                                         , StorageInfo prevStorage, int targetLayoutVersion)
        {
            FilePath prevDir = sd.GetPreviousDir();

            if (!prevDir.Exists())
            {
                // use current directory then
                Log.Info("Storage directory " + sd.GetRoot() + " does not contain previous fs state."
                         );
                // read and verify consistency with other directories
                storage.ReadProperties(sd);
                return(false);
            }
            // read and verify consistency of the prev dir
            prevStorage.ReadPreviousVersionProperties(sd);
            if (prevStorage.GetLayoutVersion() != targetLayoutVersion)
            {
                throw new IOException("Cannot rollback to storage version " + prevStorage.GetLayoutVersion
                                          () + " using this version of the NameNode, which uses storage version " + targetLayoutVersion
                                      + ". " + "Please use the previous version of HDFS to perform the rollback.");
            }
            return(true);
        }
Esempio n. 20
0
        /// <summary>Perform rollback of the storage dir to the previous state.</summary>
        /// <remarks>
        /// Perform rollback of the storage dir to the previous state. The existing
        /// current dir is removed, and the previous dir is renamed to current.
        /// </remarks>
        /// <param name="sd">the storage directory to roll back.</param>
        /// <exception cref="System.IO.IOException">in the event of error</exception>
        internal static void DoRollBack(Storage.StorageDirectory sd)
        {
            FilePath prevDir = sd.GetPreviousDir();

            if (!prevDir.Exists())
            {
                return;
            }
            FilePath tmpDir = sd.GetRemovedTmp();

            Preconditions.CheckState(!tmpDir.Exists(), "removed.tmp directory must not exist for rollback."
                                     + "Consider restarting for recovery.");
            // rename current to tmp
            FilePath curDir = sd.GetCurrentDir();

            Preconditions.CheckState(curDir.Exists(), "Current directory must exist for rollback."
                                     );
            NNStorage.Rename(curDir, tmpDir);
            // rename previous to current
            NNStorage.Rename(prevDir, curDir);
            // delete tmp dir
            NNStorage.DeleteDir(tmpDir);
            Log.Info("Rollback of " + sd.GetRoot() + " is complete.");
        }
Esempio n. 21
0
 internal virtual FilePath GetRoot()
 {
     return(sd.GetRoot());
 }
Esempio n. 22
0
 /// <summary>
 /// Analyze whether a transition of the BP state is required and
 /// perform it if necessary.
 /// </summary>
 /// <remarks>
 /// Analyze whether a transition of the BP state is required and
 /// perform it if necessary.
 /// <br />
 /// Rollback if previousLV &gt;= LAYOUT_VERSION && prevCTime &lt;= namenode.cTime.
 /// Upgrade if this.LV &gt; LAYOUT_VERSION || this.cTime &lt; namenode.cTime Regular
 /// startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
 /// </remarks>
 /// <param name="sd">storage directory <SD>/current/<bpid></param>
 /// <param name="nsInfo">namespace info</param>
 /// <param name="startOpt">startup option</param>
 /// <exception cref="System.IO.IOException"/>
 private void DoTransition(DataNode datanode, Storage.StorageDirectory sd, NamespaceInfo
                           nsInfo, HdfsServerConstants.StartupOption startOpt)
 {
     if (startOpt == HdfsServerConstants.StartupOption.Rollback && sd.GetPreviousDir()
         .Exists())
     {
         Preconditions.CheckState(!GetTrashRootDir(sd).Exists(), sd.GetPreviousDir() + " and "
                                  + GetTrashRootDir(sd) + " should not " + " both be present.");
         DoRollback(sd, nsInfo);
     }
     else
     {
         // rollback if applicable
         if (startOpt == HdfsServerConstants.StartupOption.Rollback && !sd.GetPreviousDir(
                 ).Exists())
         {
             // Restore all the files in the trash. The restored files are retained
             // during rolling upgrade rollback. They are deleted during rolling
             // upgrade downgrade.
             int restored = RestoreBlockFilesFromTrash(GetTrashRootDir(sd));
             Log.Info("Restored " + restored + " block files from trash.");
         }
     }
     ReadProperties(sd);
     CheckVersionUpgradable(this.layoutVersion);
     System.Diagnostics.Debug.Assert(this.layoutVersion >= HdfsConstants.DatanodeLayoutVersion
                                     , "Future version is not allowed");
     if (GetNamespaceID() != nsInfo.GetNamespaceID())
     {
         throw new IOException("Incompatible namespaceIDs in " + sd.GetRoot().GetCanonicalPath
                                   () + ": namenode namespaceID = " + nsInfo.GetNamespaceID() + "; datanode namespaceID = "
                               + GetNamespaceID());
     }
     if (!blockpoolID.Equals(nsInfo.GetBlockPoolID()))
     {
         throw new IOException("Incompatible blockpoolIDs in " + sd.GetRoot().GetCanonicalPath
                                   () + ": namenode blockpoolID = " + nsInfo.GetBlockPoolID() + "; datanode blockpoolID = "
                               + blockpoolID);
     }
     if (this.layoutVersion == HdfsConstants.DatanodeLayoutVersion && this.cTime == nsInfo
         .GetCTime())
     {
         return;
     }
     // regular startup
     if (this.layoutVersion > HdfsConstants.DatanodeLayoutVersion)
     {
         int restored = RestoreBlockFilesFromTrash(GetTrashRootDir(sd));
         Log.Info("Restored " + restored + " block files from trash " + "before the layout upgrade. These blocks will be moved to "
                  + "the previous directory during the upgrade");
     }
     if (this.layoutVersion > HdfsConstants.DatanodeLayoutVersion || this.cTime < nsInfo
         .GetCTime())
     {
         DoUpgrade(datanode, sd, nsInfo);
         // upgrade
         return;
     }
     // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
     // must shutdown
     throw new IOException("Datanode state: LV = " + this.GetLayoutVersion() + " CTime = "
                           + this.GetCTime() + " is newer than the namespace state: LV = " + nsInfo.GetLayoutVersion
                               () + " CTime = " + nsInfo.GetCTime());
 }
Esempio n. 23
0
        /// <summary>
        /// Test that DataStorage and BlockPoolSliceStorage remove the failed volume
        /// after failure.
        /// </summary>
        /// <exception cref="System.Exception"/>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Sharpen.TimeoutException"/>
        public virtual void TestFailedVolumeBeingRemovedFromDataNode()
        {
            Path file1 = new Path("/test1");

            DFSTestUtil.CreateFile(fs, file1, 1024, (short)2, 1L);
            DFSTestUtil.WaitReplication(fs, file1, (short)2);
            FilePath dn0Vol1 = new FilePath(dataDir, "data" + (2 * 0 + 1));

            DataNodeTestUtils.InjectDataDirFailure(dn0Vol1);
            DataNode dn0 = cluster.GetDataNodes()[0];
            long     lastDiskErrorCheck = dn0.GetLastDiskErrorCheck();

            dn0.CheckDiskErrorAsync();
            // Wait checkDiskError thread finish to discover volume failure.
            while (dn0.GetLastDiskErrorCheck() == lastDiskErrorCheck)
            {
                Sharpen.Thread.Sleep(100);
            }
            // Verify dn0Vol1 has been completely removed from DN0.
            // 1. dn0Vol1 is removed from DataStorage.
            DataStorage storage = dn0.GetStorage();

            NUnit.Framework.Assert.AreEqual(1, storage.GetNumStorageDirs());
            for (int i = 0; i < storage.GetNumStorageDirs(); i++)
            {
                Storage.StorageDirectory sd = storage.GetStorageDir(i);
                NUnit.Framework.Assert.IsFalse(sd.GetRoot().GetAbsolutePath().StartsWith(dn0Vol1.
                                                                                         GetAbsolutePath()));
            }
            string bpid = cluster.GetNamesystem().GetBlockPoolId();
            BlockPoolSliceStorage bpsStorage = storage.GetBPStorage(bpid);

            NUnit.Framework.Assert.AreEqual(1, bpsStorage.GetNumStorageDirs());
            for (int i_1 = 0; i_1 < bpsStorage.GetNumStorageDirs(); i_1++)
            {
                Storage.StorageDirectory sd = bpsStorage.GetStorageDir(i_1);
                NUnit.Framework.Assert.IsFalse(sd.GetRoot().GetAbsolutePath().StartsWith(dn0Vol1.
                                                                                         GetAbsolutePath()));
            }
            // 2. dn0Vol1 is removed from FsDataset
            FsDatasetSpi <FsVolumeSpi> data = dn0.GetFSDataset();

            foreach (FsVolumeSpi volume in data.GetVolumes())
            {
                Assert.AssertNotEquals(new FilePath(volume.GetBasePath()).GetAbsoluteFile(), dn0Vol1
                                       .GetAbsoluteFile());
            }
            // 3. all blocks on dn0Vol1 have been removed.
            foreach (ReplicaInfo replica in FsDatasetTestUtil.GetReplicas(data, bpid))
            {
                NUnit.Framework.Assert.IsNotNull(replica.GetVolume());
                Assert.AssertNotEquals(new FilePath(replica.GetVolume().GetBasePath()).GetAbsoluteFile
                                           (), dn0Vol1.GetAbsoluteFile());
            }
            // 4. dn0Vol1 is not in DN0's configuration and dataDirs anymore.
            string[] dataDirStrs = dn0.GetConf().Get(DFSConfigKeys.DfsDatanodeDataDirKey).Split
                                       (",");
            NUnit.Framework.Assert.AreEqual(1, dataDirStrs.Length);
            NUnit.Framework.Assert.IsFalse(dataDirStrs[0].Contains(dn0Vol1.GetAbsolutePath())
                                           );
        }
Esempio n. 24
0
            /// <summary>Analyze checkpoint directories.</summary>
            /// <remarks>
            /// Analyze checkpoint directories.
            /// Create directories if they do not exist.
            /// Recover from an unsuccessful checkpoint if necessary.
            /// </remarks>
            /// <exception cref="System.IO.IOException"/>
            internal virtual void RecoverCreate(bool format)
            {
                storage.AttemptRestoreRemovedStorage();
                storage.UnlockAll();
                for (IEnumerator <Storage.StorageDirectory> it = storage.DirIterator(); it.HasNext
                         ();)
                {
                    Storage.StorageDirectory sd = it.Next();
                    bool isAccessible           = true;
                    try
                    {
                        // create directories if don't exist yet
                        if (!sd.GetRoot().Mkdirs())
                        {
                        }
                    }
                    catch (SecurityException)
                    {
                        // do nothing, directory is already created
                        isAccessible = false;
                    }
                    if (!isAccessible)
                    {
                        throw new InconsistentFSStateException(sd.GetRoot(), "cannot access checkpoint directory."
                                                               );
                    }
                    if (format)
                    {
                        // Don't confirm, since this is just the secondary namenode.
                        Log.Info("Formatting storage directory " + sd);
                        sd.ClearDirectory();
                    }
                    Storage.StorageState curState;
                    try
                    {
                        curState = sd.AnalyzeStorage(HdfsServerConstants.StartupOption.Regular, storage);
                        switch (curState)
                        {
                        case Storage.StorageState.NonExistent:
                        {
                            // sd is locked but not opened
                            // fail if any of the configured checkpoint dirs are inaccessible
                            throw new InconsistentFSStateException(sd.GetRoot(), "checkpoint directory does not exist or is not accessible."
                                                                   );
                        }

                        case Storage.StorageState.NotFormatted:
                        {
                            break;
                        }

                        case Storage.StorageState.Normal:
                        {
                            // it's ok since initially there is no current and VERSION
                            // Read the VERSION file. This verifies that:
                            // (a) the VERSION file for each of the directories is the same,
                            // and (b) when we connect to a NN, we can verify that the remote
                            // node matches the same namespace that we ran on previously.
                            storage.ReadProperties(sd);
                            break;
                        }

                        default:
                        {
                            // recovery is possible
                            sd.DoRecover(curState);
                            break;
                        }
                        }
                    }
                    catch (IOException ioe)
                    {
                        sd.Unlock();
                        throw;
                    }
                }
            }
Esempio n. 25
0
 public override string ToString()
 {
     return(string.Format("FileJournalManager(root=%s)", sd.GetRoot()));
 }
Esempio n. 26
0
 private FilePath GetTrashRootDir(Storage.StorageDirectory sd)
 {
     return(new FilePath(sd.GetRoot(), TrashRootDir));
 }
Esempio n. 27
0
        /// <summary>This is called when using bootstrapStandby for HA upgrade.</summary>
        /// <remarks>
        /// This is called when using bootstrapStandby for HA upgrade. The SBN should
        /// also create previous directory so that later when it starts, it understands
        /// that the cluster is in the upgrade state. This function renames the old
        /// current directory to previous.tmp.
        /// </remarks>
        /// <exception cref="System.IO.IOException"/>
        private bool DoPreUpgrade(NNStorage storage, NamespaceInfo nsInfo)
        {
            bool isFormatted = false;
            IDictionary <Storage.StorageDirectory, Storage.StorageState> dataDirStates = new Dictionary
                                                                                         <Storage.StorageDirectory, Storage.StorageState>();

            try
            {
                isFormatted = FSImage.RecoverStorageDirs(HdfsServerConstants.StartupOption.Upgrade
                                                         , storage, dataDirStates);
                if (dataDirStates.Values.Contains(Storage.StorageState.NotFormatted))
                {
                    // recoverStorageDirs returns true if there is a formatted directory
                    isFormatted = false;
                    System.Console.Error.WriteLine("The original storage directory is not formatted."
                                                   );
                }
            }
            catch (InconsistentFSStateException e)
            {
                // if the storage is in a bad state,
                Log.Warn("The storage directory is in an inconsistent state", e);
            }
            finally
            {
                storage.UnlockAll();
            }
            // if there is InconsistentFSStateException or the storage is not formatted,
            // format the storage. Although this format is done through the new
            // software, since in HA setup the SBN is rolled back through
            // "-bootstrapStandby", we should still be fine.
            if (!isFormatted && !Format(storage, nsInfo))
            {
                return(false);
            }
            // make sure there is no previous directory
            FSImage.CheckUpgrade(storage);
            // Do preUpgrade for each directory
            for (IEnumerator <Storage.StorageDirectory> it = storage.DirIterator(false); it.HasNext
                     ();)
            {
                Storage.StorageDirectory sd = it.Next();
                try
                {
                    NNUpgradeUtil.RenameCurToTmp(sd);
                }
                catch (IOException e)
                {
                    Log.Error("Failed to move aside pre-upgrade storage " + "in image directory " + sd
                              .GetRoot(), e);
                    throw;
                }
            }
            storage.SetStorageInfo(nsInfo);
            storage.SetBlockPoolID(nsInfo.GetBlockPoolID());
            return(true);
        }
Esempio n. 28
0
        /// <exception cref="System.IO.IOException"/>
        internal override void InspectDirectory(Storage.StorageDirectory sd)
        {
            // Was the directory just formatted?
            if (!sd.GetVersionFile().Exists())
            {
                Log.Info("No version file in " + sd.GetRoot());
                needToSave |= true;
                return;
            }
            // Check for a seen_txid file, which marks a minimum transaction ID that
            // must be included in our load plan.
            try
            {
                maxSeenTxId = Math.Max(maxSeenTxId, NNStorage.ReadTransactionIdFile(sd));
            }
            catch (IOException ioe)
            {
                Log.Warn("Unable to determine the max transaction ID seen by " + sd, ioe);
                return;
            }
            FilePath currentDir = sd.GetCurrentDir();

            FilePath[] filesInStorage;
            try
            {
                filesInStorage = FileUtil.ListFiles(currentDir);
            }
            catch (IOException ioe)
            {
                Log.Warn("Unable to inspect storage directory " + currentDir, ioe);
                return;
            }
            foreach (FilePath f in filesInStorage)
            {
                Log.Debug("Checking file " + f);
                string name = f.GetName();
                // Check for fsimage_*
                Matcher imageMatch = this.MatchPattern(name);
                if (imageMatch != null)
                {
                    if (sd.GetStorageDirType().IsOfType(NNStorage.NameNodeDirType.Image))
                    {
                        try
                        {
                            long txid = long.Parse(imageMatch.Group(1));
                            foundImages.AddItem(new FSImageStorageInspector.FSImageFile(sd, f, txid));
                        }
                        catch (FormatException)
                        {
                            Log.Error("Image file " + f + " has improperly formatted " + "transaction ID");
                        }
                    }
                    else
                    {
                        // skip
                        Log.Warn("Found image file at " + f + " but storage directory is " + "not configured to contain images."
                                 );
                    }
                }
            }
            // set finalized flag
            isUpgradeFinalized = isUpgradeFinalized && !sd.GetPreviousDir().Exists();
        }