public virtual void TestClearQuota()
        {
            Path dir = new Path("/TestSnapshot");

            hdfs.Mkdirs(dir);
            hdfs.AllowSnapshot(dir);
            hdfs.SetQuota(dir, HdfsConstants.QuotaDontSet, HdfsConstants.QuotaDontSet);
            INodeDirectory dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory();

            NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable());
            NUnit.Framework.Assert.AreEqual(0, dirNode.GetDiffs().AsList().Count);
            hdfs.SetQuota(dir, HdfsConstants.QuotaDontSet - 1, HdfsConstants.QuotaDontSet - 1
                          );
            dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory();
            NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable());
            NUnit.Framework.Assert.AreEqual(0, dirNode.GetDiffs().AsList().Count);
            hdfs.SetQuota(dir, HdfsConstants.QuotaReset, HdfsConstants.QuotaReset);
            dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory();
            NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable());
            NUnit.Framework.Assert.AreEqual(0, dirNode.GetDiffs().AsList().Count);
            // allow snapshot on dir and create snapshot s1
            SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s1");
            // clear quota of dir
            hdfs.SetQuota(dir, HdfsConstants.QuotaReset, HdfsConstants.QuotaReset);
            // dir should still be a snapshottable directory
            dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory();
            NUnit.Framework.Assert.IsTrue(dirNode.IsSnapshottable());
            NUnit.Framework.Assert.AreEqual(1, dirNode.GetDiffs().AsList().Count);
            SnapshottableDirectoryStatus[] status = hdfs.GetSnapshottableDirListing();
            NUnit.Framework.Assert.AreEqual(1, status.Length);
            NUnit.Framework.Assert.AreEqual(dir, status[0].GetFullPath());
            Path subDir = new Path(dir, "sub");

            hdfs.Mkdirs(subDir);
            hdfs.CreateSnapshot(dir, "s2");
            Path file = new Path(subDir, "file");

            DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed);
            hdfs.SetQuota(dir, HdfsConstants.QuotaReset, HdfsConstants.QuotaReset);
            INode subNode = fsdir.GetINode4Write(subDir.ToString());

            NUnit.Framework.Assert.IsTrue(subNode.AsDirectory().IsWithSnapshot());
            IList <DirectoryWithSnapshotFeature.DirectoryDiff> diffList = subNode.AsDirectory(
                ).GetDiffs().AsList();

            NUnit.Framework.Assert.AreEqual(1, diffList.Count);
            Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s2 = dirNode.GetSnapshot
                                                                              (DFSUtil.String2Bytes("s2"));
            NUnit.Framework.Assert.AreEqual(s2.GetId(), diffList[0].GetSnapshotId());
            IList <INode> createdList = diffList[0].GetChildrenDiff().GetList(Diff.ListType.Created
                                                                              );

            NUnit.Framework.Assert.AreEqual(1, createdList.Count);
            NUnit.Framework.Assert.AreSame(fsdir.GetINode4Write(file.ToString()), createdList
                                           [0]);
        }
Пример #2
0
        /// <summary>
        /// Remove the snapshot with the given name from
        /// <see cref="snapshotsByNames"/>
        /// ,
        /// and delete all the corresponding DirectoryDiff.
        /// </summary>
        /// <param name="snapshotRoot">The directory where we take snapshots</param>
        /// <param name="snapshotName">The name of the snapshot to be removed</param>
        /// <param name="collectedBlocks">Used to collect information to update blocksMap</param>
        /// <returns>
        /// The removed snapshot. Null if no snapshot with the given name
        /// exists.
        /// </returns>
        /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotException"/>
        public virtual Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot RemoveSnapshot
            (BlockStoragePolicySuite bsps, INodeDirectory snapshotRoot, string snapshotName,
            INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes)
        {
            int i = SearchSnapshot(DFSUtil.String2Bytes(snapshotName));

            if (i < 0)
            {
                throw new SnapshotException("Cannot delete snapshot " + snapshotName + " from path "
                                            + snapshotRoot.GetFullPathName() + ": the snapshot does not exist.");
            }
            else
            {
                Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = snapshotsByNames
                                                                                    [i];
                int prior = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.FindLatestSnapshot
                                (snapshotRoot, snapshot.GetId());
                try
                {
                    QuotaCounts counts = snapshotRoot.CleanSubtree(bsps, snapshot.GetId(), prior, collectedBlocks
                                                                   , removedINodes);
                    INodeDirectory parent = snapshotRoot.GetParent();
                    if (parent != null)
                    {
                        // there will not be any WithName node corresponding to the deleted
                        // snapshot, thus only update the quota usage in the current tree
                        parent.AddSpaceConsumed(counts.Negation(), true);
                    }
                }
                catch (QuotaExceededException e)
                {
                    INode.Log.Error("BUG: removeSnapshot increases namespace usage.", e);
                }
                // remove from snapshotsByNames after successfully cleaning the subtree
                snapshotsByNames.Remove(i);
                return(snapshot);
            }
        }
Пример #3
0
        /// <exception cref="System.IO.IOException"/>
        private static FileDiff LoadFileDiff(FileDiff posterior, DataInput @in, FSImageFormat.Loader
                                             loader)
        {
            // 1. Read the id of the Snapshot root to identify the Snapshot
            Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = loader.GetSnapshot
                                                                                    (@in);
            // 2. Load file size
            long fileSize = @in.ReadLong();
            // 3. Load snapshotINode
            INodeFileAttributes snapshotINode = @in.ReadBoolean() ? loader.LoadINodeFileAttributes
                                                    (@in) : null;

            return(new FileDiff(snapshot.GetId(), snapshotINode, posterior, fileSize));
        }
Пример #4
0
                                    > Read(DataInput @in, FSImageFormat.Loader loader)
        {
            snapshotCounter = @in.ReadInt();
            numSnapshots.Set(@in.ReadInt());
            // read snapshots
            IDictionary <int, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot> snapshotMap
                = new Dictionary <int, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot>
                      ();

            for (int i = 0; i < numSnapshots.Get(); i++)
            {
                Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                                                             .Read(@in, loader);
                snapshotMap[s.GetId()] = s;
            }
            return(snapshotMap);
        }
Пример #5
0
        /// <summary>
        /// Load
        /// <see cref="DirectoryDiff"/>
        /// from fsimage.
        /// </summary>
        /// <param name="parent">The directory that the SnapshotDiff belongs to.</param>
        /// <param name="in">
        /// The
        /// <see cref="System.IO.DataInput"/>
        /// instance to read.
        /// </param>
        /// <param name="loader">
        /// The
        /// <see cref="Loader"/>
        /// instance that this loading procedure is
        /// using.
        /// </param>
        /// <returns>
        /// A
        /// <see cref="DirectoryDiff"/>
        /// .
        /// </returns>
        /// <exception cref="System.IO.IOException"/>
        private static DirectoryWithSnapshotFeature.DirectoryDiff LoadDirectoryDiff(INodeDirectory
                                                                                    parent, DataInput @in, FSImageFormat.Loader loader)
        {
            // 1. Read the full path of the Snapshot root to identify the Snapshot
            Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = loader.GetSnapshot
                                                                                    (@in);
            // 2. Load DirectoryDiff#childrenSize
            int childrenSize = @in.ReadInt();
            // 3. Load DirectoryDiff#snapshotINode
            INodeDirectoryAttributes snapshotINode = LoadSnapshotINodeInDirectoryDiff(snapshot
                                                                                      , @in, loader);
            // 4. Load the created list in SnapshotDiff#Diff
            IList <INode> createdList = LoadCreatedList(parent, @in);
            // 5. Load the deleted list in SnapshotDiff#Diff
            IList <INode> deletedList = LoadDeletedList(parent, createdList, @in, loader);
            // 6. Compose the SnapshotDiff
            IList <DirectoryWithSnapshotFeature.DirectoryDiff> diffs = parent.GetDiffs().AsList
                                                                           ();

            DirectoryWithSnapshotFeature.DirectoryDiff sdiff = new DirectoryWithSnapshotFeature.DirectoryDiff
                                                                   (snapshot.GetId(), snapshotINode, diffs.IsEmpty() ? null : diffs[0], childrenSize
                                                                   , createdList, deletedList, snapshotINode == snapshot.GetRoot());
            return(sdiff);
        }
Пример #6
0
 public static int GetSnapshotId(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                 s)
 {
     return(s == null ? CurrentStateId : s.GetId());
 }
Пример #7
0
 /// <summary>
 /// Recursively compute the difference between snapshots under a given
 /// directory/file.
 /// </summary>
 /// <param name="snapshotRoot">The directory where snapshots were taken.</param>
 /// <param name="node">The directory/file under which the diff is computed.</param>
 /// <param name="parentPath">
 /// Relative path (corresponding to the snapshot root) of
 /// the node's parent.
 /// </param>
 /// <param name="diffReport">data structure used to store the diff.</param>
 private void ComputeDiffRecursively(INodeDirectory snapshotRoot, INode node, IList
                                     <byte[]> parentPath, SnapshotDiffInfo diffReport)
 {
     Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot earlierSnapshot = diffReport
                                                                                .IsFromEarlier() ? diffReport.GetFrom() : diffReport.GetTo();
     Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot laterSnapshot = diffReport
                                                                              .IsFromEarlier() ? diffReport.GetTo() : diffReport.GetFrom();
     byte[][] relativePath = Sharpen.Collections.ToArray(parentPath, new byte[parentPath
                                                                              .Count][]);
     if (node.IsDirectory())
     {
         DirectoryWithSnapshotFeature.ChildrenDiff diff = new DirectoryWithSnapshotFeature.ChildrenDiff
                                                              ();
         INodeDirectory dir = node.AsDirectory();
         DirectoryWithSnapshotFeature sf = dir.GetDirectoryWithSnapshotFeature();
         if (sf != null)
         {
             bool change = sf.ComputeDiffBetweenSnapshots(earlierSnapshot, laterSnapshot, diff
                                                          , dir);
             if (change)
             {
                 diffReport.AddDirDiff(dir, relativePath, diff);
             }
         }
         ReadOnlyList <INode> children = dir.GetChildrenList(earlierSnapshot.GetId());
         foreach (INode child in children)
         {
             byte[] name      = child.GetLocalNameBytes();
             bool   toProcess = diff.SearchIndex(Diff.ListType.Deleted, name) < 0;
             if (!toProcess && child is INodeReference.WithName)
             {
                 byte[][] renameTargetPath = FindRenameTargetPath(snapshotRoot, (INodeReference.WithName
                                                                                 )child, laterSnapshot == null ? Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                                                  .CurrentStateId : laterSnapshot.GetId());
                 if (renameTargetPath != null)
                 {
                     toProcess = true;
                     diffReport.SetRenameTarget(child.GetId(), renameTargetPath);
                 }
             }
             if (toProcess)
             {
                 parentPath.AddItem(name);
                 ComputeDiffRecursively(snapshotRoot, child, parentPath, diffReport);
                 parentPath.Remove(parentPath.Count - 1);
             }
         }
     }
     else
     {
         if (node.IsFile() && node.AsFile().IsWithSnapshot())
         {
             INodeFile file   = node.AsFile();
             bool      change = file.GetFileWithSnapshotFeature().ChangedBetweenSnapshots(file, earlierSnapshot
                                                                                          , laterSnapshot);
             if (change)
             {
                 diffReport.AddFileDiff(file, relativePath);
             }
         }
     }
 }