Пример #1
0
        internal virtual void Validate()
        {
            // check parent up to snapshotRootIndex if this is a snapshot path
            int i = 0;

            if (inodes[i] != null)
            {
                for (i++; i < inodes.Length && inodes[i] != null; i++)
                {
                    INodeDirectory parent_i   = inodes[i].GetParent();
                    INodeDirectory parent_i_1 = inodes[i - 1].GetParent();
                    if (parent_i != inodes[i - 1] && (parent_i_1 == null || !parent_i_1.IsSnapshottable
                                                          () || parent_i != parent_i_1))
                    {
                        throw new Exception("inodes[" + i + "].getParent() != inodes[" + (i - 1) + "]\n  inodes["
                                            + i + "]=" + inodes[i].ToDetailString() + "\n  inodes[" + (i - 1) + "]=" + inodes
                                            [i - 1].ToDetailString() + "\n this=" + ToString(false));
                    }
                }
            }
            if (i != inodes.Length)
            {
                throw new Exception("i = " + i + " != " + inodes.Length + ", this=" + ToString(false
                                                                                               ));
            }
        }
Пример #2
0
            private void LoadRootINode(FsImageProto.INodeSection.INode p)
            {
                INodeDirectory root    = LoadINodeDirectory(p, parent.GetLoaderContext());
                QuotaCounts    q       = root.GetQuotaCounts();
                long           nsQuota = q.GetNameSpace();
                long           dsQuota = q.GetStorageSpace();

                if (nsQuota != -1 || dsQuota != -1)
                {
                    dir.rootDir.GetDirectoryWithQuotaFeature().SetQuota(nsQuota, dsQuota);
                }
                EnumCounters <StorageType> typeQuotas = q.GetTypeSpaces();

                if (typeQuotas.AnyGreaterOrEqual(0))
                {
                    dir.rootDir.GetDirectoryWithQuotaFeature().SetQuota(typeQuotas);
                }
                dir.rootDir.CloneModificationTime(root);
                dir.rootDir.ClonePermissionStatus(root);
                AclFeature af = root.GetFeature(typeof(AclFeature));

                if (af != null)
                {
                    dir.rootDir.AddAclFeature(af);
                }
                // root dir supports having extended attributes according to POSIX
                XAttrFeature f = root.GetXAttrFeature();

                if (f != null)
                {
                    dir.rootDir.AddXAttrFeature(f);
                }
                dir.AddRootDirToEncryptionZone(f);
            }
Пример #3
0
        /// <summary>Get a listing of all the snapshots of a snapshottable directory</summary>
        /// <exception cref="System.IO.IOException"/>
        private static DirectoryListing GetSnapshotsListing(FSDirectory fsd, string src,
                                                            byte[] startAfter)
        {
            Preconditions.CheckState(fsd.HasReadLock());
            Preconditions.CheckArgument(src.EndsWith(HdfsConstants.SeparatorDotSnapshotDir),
                                        "%s does not end with %s", src, HdfsConstants.SeparatorDotSnapshotDir);
            string dirPath = FSDirectory.NormalizePath(Sharpen.Runtime.Substring(src, 0, src.
                                                                                 Length - HdfsConstants.DotSnapshotDir.Length));
            INode          node              = fsd.GetINode(dirPath);
            INodeDirectory dirNode           = INodeDirectory.ValueOf(node, dirPath);
            DirectorySnapshottableFeature sf = dirNode.GetDirectorySnapshottableFeature();

            if (sf == null)
            {
                throw new SnapshotException("Directory is not a snapshottable directory: " + dirPath
                                            );
            }
            ReadOnlyList <Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot> snapshots =
                sf.GetSnapshotList();
            int skipSize = ReadOnlyList.Util.BinarySearch(snapshots, startAfter);

            skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
            int numOfListing = Math.Min(snapshots.Size() - skipSize, fsd.GetLsLimit());

            HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
            for (int i = 0; i < numOfListing; i++)
            {
                Snapshot.Root sRoot = snapshots.Get(i + skipSize).GetRoot();
                listing[i] = CreateFileStatus(fsd, src, sRoot.GetLocalNameBytes(), sRoot, BlockStoragePolicySuite
                                              .IdUnspecified, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId
                                              , false, INodesInPath.FromINode(sRoot));
            }
            return(new DirectoryListing(listing, snapshots.Size() - skipSize - numOfListing));
        }
Пример #4
0
        /// <summary>
        /// Guarded by
        /// <see cref="FSNamesystem.ReadLock()"/>
        ///
        /// </summary>
        /// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"/>
        private void CheckSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode, int
                                    snapshotId, FsAction access, bool ignoreEmptyDir)
        {
            if (inode == null || !inode.IsDirectory())
            {
                return;
            }
            Stack <INodeDirectory> directories = new Stack <INodeDirectory>();

            for (directories.Push(inode.AsDirectory()); !directories.IsEmpty();)
            {
                INodeDirectory       d     = directories.Pop();
                ReadOnlyList <INode> cList = d.GetChildrenList(snapshotId);
                if (!(cList.IsEmpty() && ignoreEmptyDir))
                {
                    //TODO have to figure this out with inodeattribute provider
                    Check(GetINodeAttrs(pathByNameArr, pathIdx, d, snapshotId), inode.GetFullPathName
                              (), access);
                }
                foreach (INode child in cList)
                {
                    if (child.IsDirectory())
                    {
                        directories.Push(child.AsDirectory());
                    }
                }
            }
        }
Пример #5
0
 /// <summary>Set the namespace, storagespace and typespace quota for a directory.</summary>
 /// <remarks>
 /// Set the namespace, storagespace and typespace quota for a directory.
 /// Note: This does not support ".inodes" relative path.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 internal static void SetQuota(FSDirectory fsd, string src, long nsQuota, long ssQuota
                               , StorageType type)
 {
     if (fsd.IsPermissionEnabled())
     {
         FSPermissionChecker pc = fsd.GetPermissionChecker();
         pc.CheckSuperuserPrivilege();
     }
     fsd.WriteLock();
     try
     {
         INodeDirectory changed = UnprotectedSetQuota(fsd, src, nsQuota, ssQuota, type);
         if (changed != null)
         {
             QuotaCounts q = changed.GetQuotaCounts();
             if (type == null)
             {
                 fsd.GetEditLog().LogSetQuota(src, q.GetNameSpace(), q.GetStorageSpace());
             }
             else
             {
                 fsd.GetEditLog().LogSetQuotaByStorageType(src, q.GetTypeSpaces().Get(type), type);
             }
         }
     }
     finally
     {
         fsd.WriteUnlock();
     }
 }
Пример #6
0
 /// <summary>
 /// Serialize a
 /// <see cref="INodeDirectory"/>
 /// </summary>
 /// <param name="node">The node to write</param>
 /// <param name="out">
 /// The
 /// <see cref="System.IO.DataOutput"/>
 /// where the fields are written
 /// </param>
 /// <exception cref="System.IO.IOException"/>
 public static void WriteINodeDirectory(INodeDirectory node, DataOutput @out)
 {
     WriteLocalName(node, @out);
     @out.WriteLong(node.GetId());
     @out.WriteShort(0);
     // replication
     @out.WriteLong(node.GetModificationTime());
     @out.WriteLong(0);
     // access time
     @out.WriteLong(0);
     // preferred block size
     @out.WriteInt(-1);
     // # of blocks
     WriteQuota(node.GetQuotaCounts(), @out);
     if (node.IsSnapshottable())
     {
         @out.WriteBoolean(true);
     }
     else
     {
         @out.WriteBoolean(false);
         @out.WriteBoolean(node.IsWithSnapshot());
     }
     WritePermissionStatus(node, @out);
 }
Пример #7
0
        /// <summary>Is this inode in the latest snapshot?</summary>
        public bool IsInLatestSnapshot(int latestSnapshotId)
        {
            if (latestSnapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.
                CurrentStateId || latestSnapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                .NoSnapshotId)
            {
                return(false);
            }
            // if parent is a reference node, parent must be a renamed node. We can
            // stop the check at the reference node.
            if (parent != null && parent.IsReference())
            {
                return(true);
            }
            INodeDirectory parentDir = GetParent();

            if (parentDir == null)
            {
                // root
                return(true);
            }
            if (!parentDir.IsInLatestSnapshot(latestSnapshotId))
            {
                return(false);
            }
            Org.Apache.Hadoop.Hdfs.Server.Namenode.INode child = parentDir.GetChild(GetLocalNameBytes
                                                                                        (), latestSnapshotId);
            if (this == child)
            {
                return(true);
            }
            return(child != null && child.IsReference() && this == child.AsReference().GetReferredINode
                       ());
        }
Пример #8
0
            /// <exception cref="System.IO.IOException"/>
            internal void LoadINodeDirectorySection(InputStream @in)
            {
                IList <INodeReference> refList = parent.GetLoaderContext().GetRefList();

                while (true)
                {
                    FsImageProto.INodeDirectorySection.DirEntry e = FsImageProto.INodeDirectorySection.DirEntry
                                                                    .ParseDelimitedFrom(@in);
                    // note that in is a LimitedInputStream
                    if (e == null)
                    {
                        break;
                    }
                    INodeDirectory p = dir.GetInode(e.GetParent()).AsDirectory();
                    foreach (long id in e.GetChildrenList())
                    {
                        INode child = dir.GetInode(id);
                        AddToParent(p, child);
                    }
                    foreach (int refId in e.GetRefChildrenList())
                    {
                        INodeReference @ref = refList[refId];
                        AddToParent(p, @ref);
                    }
                }
            }
Пример #9
0
        public virtual void TestAclGroupTraverseDenyOnlyDefaultEntries()
        {
            INodeDirectory inodeDir = CreateINodeDirectory(inodeRoot, "dir1", "bruce", "execs"
                                                           , (short)0x1ed);
            INodeFile inodeFile = CreateINodeFile(inodeDir, "file1", "bruce", "execs", (short
                                                                                        )0x1a4);

            AddAcl(inodeDir, AclTestHelpers.AclEntry(AclEntryScope.Access, AclEntryType.User,
                                                     FsAction.All), AclTestHelpers.AclEntry(AclEntryScope.Access, AclEntryType.Group,
                                                                                            FsAction.None), AclTestHelpers.AclEntry(AclEntryScope.Access, AclEntryType.Other
                                                                                                                                    , FsAction.ReadExecute), AclTestHelpers.AclEntry(AclEntryScope.Default, AclEntryType
                                                                                                                                                                                     .User, FsAction.All), AclTestHelpers.AclEntry(AclEntryScope.Default, AclEntryType
                                                                                                                                                                                                                                   .Group, "sales", FsAction.None), AclTestHelpers.AclEntry(AclEntryScope.Default,
                                                                                                                                                                                                                                                                                            AclEntryType.Group, FsAction.None), AclTestHelpers.AclEntry(AclEntryScope.Default
                                                                                                                                                                                                                                                                                                                                                        , AclEntryType.Other, FsAction.ReadExecute));
            AssertPermissionGranted(Bruce, "/dir1/file1", FsAction.ReadWrite);
            AssertPermissionGranted(Diana, "/dir1/file1", FsAction.Read);
            AssertPermissionDenied(Clark, "/dir1/file1", FsAction.Read);
            AssertPermissionDenied(Clark, "/dir1/file1", FsAction.Write);
            AssertPermissionDenied(Clark, "/dir1/file1", FsAction.Execute);
            AssertPermissionDenied(Clark, "/dir1/file1", FsAction.ReadWrite);
            AssertPermissionDenied(Clark, "/dir1/file1", FsAction.ReadExecute);
            AssertPermissionDenied(Clark, "/dir1/file1", FsAction.WriteExecute);
            AssertPermissionDenied(Clark, "/dir1/file1", FsAction.All);
        }
Пример #10
0
 /// <summary>
 /// Check if the given INode (or one of its descendants) is snapshottable and
 /// already has snapshots.
 /// </summary>
 /// <param name="target">The given INode</param>
 /// <param name="snapshottableDirs">
 /// The list of directories that are snapshottable
 /// but do not have snapshots yet
 /// </param>
 /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotException"/>
 internal static void CheckSnapshot(INode target, IList <INodeDirectory> snapshottableDirs
                                    )
 {
     if (target.IsDirectory())
     {
         INodeDirectory targetDir         = target.AsDirectory();
         DirectorySnapshottableFeature sf = targetDir.GetDirectorySnapshottableFeature();
         if (sf != null)
         {
             if (sf.GetNumSnapshots() > 0)
             {
                 string fullPath = targetDir.GetFullPathName();
                 throw new SnapshotException("The directory " + fullPath + " cannot be deleted since "
                                             + fullPath + " is snapshottable and already has snapshots");
             }
             else
             {
                 if (snapshottableDirs != null)
                 {
                     snapshottableDirs.AddItem(targetDir);
                 }
             }
         }
         foreach (INode child in targetDir.GetChildrenList(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                                           .CurrentStateId))
         {
             CheckSnapshot(child, snapshottableDirs);
         }
     }
 }
Пример #11
0
 public DstReference(INodeDirectory parent, INodeReference.WithCount referred, int
                     dstSnapshotId)
     : base(parent, referred)
 {
     this.dstSnapshotId = dstSnapshotId;
     referred.AddReference(this);
 }
Пример #12
0
 private void CheckStoragespace(INodeDirectory dir, long computed)
 {
     if (-1 != quota.GetStorageSpace() && usage.GetStorageSpace() != computed)
     {
         NameNode.Log.Error("BUG: Inconsistent storagespace for directory " + dir.GetFullPathName
                                () + ". Cached = " + usage.GetStorageSpace() + " != Computed = " + computed);
     }
 }
Пример #13
0
 public WithName(INodeDirectory parent, INodeReference.WithCount referred, byte[]
                 name, int lastSnapshotId)
     : base(parent, referred)
 {
     this.name           = name;
     this.lastSnapshotId = lastSnapshotId;
     referred.AddReference(this);
 }
            public CopyWithQuota(INodeDirectory dir)
                : base(dir)
            {
                Preconditions.CheckArgument(dir.IsQuotaSet());
                QuotaCounts q = dir.GetQuotaCounts();

                this.quota = new QuotaCounts.Builder().QuotaCount(q).Build();
            }
Пример #15
0
        internal static void AddFiles(FSEditLog editLog, int numFiles, short replication,
                                      int blocksPerFile, long startingBlockId, long blockSize, FileNameGenerator nameGenerator
                                      )
        {
            PermissionStatus p = new PermissionStatus("joeDoe", "people", new FsPermission((short
                                                                                            )0x1ff));
            INodeId        inodeId  = new INodeId();
            INodeDirectory dirInode = new INodeDirectory(inodeId.NextValue(), null, p, 0L);

            editLog.LogMkDir(BasePath, dirInode);
            BlockInfoContiguous[] blocks = new BlockInfoContiguous[blocksPerFile];
            for (int iB = 0; iB < blocksPerFile; ++iB)
            {
                blocks[iB] = new BlockInfoContiguous(new Block(0, blockSize, BlockGenerationStamp
                                                               ), replication);
            }
            long currentBlockId = startingBlockId;
            long bidAtSync      = startingBlockId;

            for (int iF = 0; iF < numFiles; iF++)
            {
                for (int iB_1 = 0; iB_1 < blocksPerFile; ++iB_1)
                {
                    blocks[iB_1].SetBlockId(currentBlockId++);
                }
                INodeFile inode = new INodeFile(inodeId.NextValue(), null, p, 0L, 0L, blocks, replication
                                                , blockSize, unchecked ((byte)0));
                inode.ToUnderConstruction(string.Empty, string.Empty);
                // Append path to filename with information about blockIDs
                string path = "_" + iF + "_B" + blocks[0].GetBlockId() + "_to_B" + blocks[blocksPerFile
                                                                                          - 1].GetBlockId() + "_";
                string filePath = nameGenerator.GetNextFileName(string.Empty);
                filePath = filePath + path;
                // Log the new sub directory in edits
                if ((iF % nameGenerator.GetFilesPerDirectory()) == 0)
                {
                    string currentDir = nameGenerator.GetCurrentDir();
                    dirInode = new INodeDirectory(inodeId.NextValue(), null, p, 0L);
                    editLog.LogMkDir(currentDir, dirInode);
                }
                INodeFile fileUc = new INodeFile(inodeId.NextValue(), null, p, 0L, 0L, BlockInfoContiguous
                                                 .EmptyArray, replication, blockSize);
                fileUc.ToUnderConstruction(string.Empty, string.Empty);
                editLog.LogOpenFile(filePath, fileUc, false, false);
                editLog.LogCloseFile(filePath, inode);
                if (currentBlockId - bidAtSync >= 2000)
                {
                    // sync every 2K blocks
                    editLog.LogSync();
                    bidAtSync = currentBlockId;
                }
            }
            System.Console.Out.WriteLine("Created edits log in directory " + edits_dir);
            System.Console.Out.WriteLine(" containing " + numFiles + " File-Creates, each file with "
                                         + blocksPerFile + " blocks");
            System.Console.Out.WriteLine(" blocks range: " + startingBlockId + " to " + (currentBlockId
                                                                                         - 1));
        }
Пример #16
0
 /// <exception cref="System.IO.IOException"/>
 private void Save(OutputStream @out, INodeDirectory n)
 {
     FsImageProto.INodeSection.INodeDirectory.Builder b = BuildINodeDirectory(n, parent
                                                                              .GetSaverContext());
     FsImageProto.INodeSection.INode r = ((FsImageProto.INodeSection.INode)BuildINodeCommon
                                              (n).SetType(FsImageProto.INodeSection.INode.Type.Directory).SetDirectory(b).Build
                                              ());
     r.WriteDelimitedTo(@out);
 }
Пример #17
0
 internal override void RecordModification(int latestSnapshotId)
 {
     if (IsInLatestSnapshot(latestSnapshotId))
     {
         INodeDirectory parent = GetParent();
         parent.SaveChild2Snapshot(this, latestSnapshotId, new Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeSymlink
                                       (this));
     }
 }
Пример #18
0
        public virtual void SetUp()
        {
            Configuration conf = new Configuration();
            FSNamesystem  fsn  = Org.Mockito.Mockito.Mock <FSNamesystem>();

            Org.Mockito.Mockito.DoAnswer(new _Answer_81()).When(fsn).CreateFsOwnerPermissions
                (Matchers.Any <FsPermission>());
            dir       = new FSDirectory(fsn, conf);
            inodeRoot = dir.GetRoot();
        }
Пример #19
0
        /// <exception cref="System.IO.IOException"/>
        private static void SetDirStoragePolicy(FSDirectory fsd, INodeDirectory inode, byte
                                                policyId, int latestSnapshotId)
        {
            IList <XAttr> existingXAttrs = XAttrStorage.ReadINodeXAttrs(inode);
            XAttr         xAttr          = BlockStoragePolicySuite.BuildXAttr(policyId);
            IList <XAttr> newXAttrs      = FSDirXAttrOp.SetINodeXAttrs(fsd, existingXAttrs, Arrays.
                                                                       AsList(xAttr), EnumSet.Of(XAttrSetFlag.Create, XAttrSetFlag.Replace));

            XAttrStorage.UpdateINodeXAttrs(inode, newXAttrs, latestSnapshotId);
        }
Пример #20
0
        /// <exception cref="System.IO.IOException"/>
        private static INodeDirectory CreateINodeDirectory(INodeDirectory parent, string
                                                           name, string owner, string group, short perm)
        {
            PermissionStatus permStatus = PermissionStatus.CreateImmutable(owner, group, FsPermission
                                                                           .CreateImmutable(perm));
            INodeDirectory inodeDirectory = new INodeDirectory(INodeId.GrandfatherInodeId, Sharpen.Runtime.GetBytesForString
                                                                   (name, "UTF-8"), permStatus, 0L);

            parent.AddChild(inodeDirectory);
            return(inodeDirectory);
        }
Пример #21
0
 /// <returns>true if the given inode is an ancestor directory of this inode.</returns>
 public bool IsAncestorDirectory(INodeDirectory dir)
 {
     for (INodeDirectory p = GetParent(); p != null; p = p.GetParent())
     {
         if (p == dir)
         {
             return(true);
         }
     }
     return(false);
 }
Пример #22
0
        internal static Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeMap NewInstance(INodeDirectory
                                                                                    rootDir)
        {
            // Compute the map capacity by allocating 1% of total memory
            int capacity = LightWeightGSet.ComputeCapacity(1, "INodeMap");
            GSet <INode, INodeWithAdditionalFields> map = new LightWeightGSet <INode, INodeWithAdditionalFields
                                                                               >(capacity);

            map.Put(rootDir);
            return(new Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeMap(map));
        }
Пример #23
0
        /// <exception cref="System.IO.IOException"/>
        private static INodeFile CreateINodeFile(INodeDirectory parent, string name, string
                                                 owner, string group, short perm)
        {
            PermissionStatus permStatus = PermissionStatus.CreateImmutable(owner, group, FsPermission
                                                                           .CreateImmutable(perm));
            INodeFile inodeFile = new INodeFile(INodeId.GrandfatherInodeId, Sharpen.Runtime.GetBytesForString
                                                    (name, "UTF-8"), permStatus, 0L, 0L, null, Replication, PreferredBlockSize, unchecked (
                                                    (byte)0));

            parent.AddChild(inodeFile);
            return(inodeFile);
        }
Пример #24
0
            /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/>
            internal RenameOperation(FSDirectory fsd, string src, string dst, INodesInPath srcIIP
                                     , INodesInPath dstIIP)
            {
                this.fsd          = fsd;
                this.src          = src;
                this.dst          = dst;
                this.srcIIP       = srcIIP;
                this.dstIIP       = dstIIP;
                this.srcParentIIP = srcIIP.GetParentINodesInPath();
                this.dstParentIIP = dstIIP.GetParentINodesInPath();
                BlockStoragePolicySuite bsps = fsd.GetBlockStoragePolicySuite();

                srcChild     = this.srcIIP.GetLastINode();
                srcChildName = srcChild.GetLocalNameBytes();
                int srcLatestSnapshotId = srcIIP.GetLatestSnapshotId();

                isSrcInSnapshot     = srcChild.IsInLatestSnapshot(srcLatestSnapshotId);
                srcChildIsReference = srcChild.IsReference();
                srcParent           = this.srcIIP.GetINode(-2).AsDirectory();
                // Record the snapshot on srcChild. After the rename, before any new
                // snapshot is taken on the dst tree, changes will be recorded in the
                // latest snapshot of the src tree.
                if (isSrcInSnapshot)
                {
                    srcChild.RecordModification(srcLatestSnapshotId);
                }
                // check srcChild for reference
                srcRefDstSnapshot = srcChildIsReference ? srcChild.AsReference().GetDstSnapshotId
                                        () : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId;
                oldSrcCounts = new QuotaCounts.Builder().Build();
                if (isSrcInSnapshot)
                {
                    INodeReference.WithName withName = srcParent.ReplaceChild4ReferenceWithName(srcChild
                                                                                                , srcLatestSnapshotId);
                    withCount   = (INodeReference.WithCount)withName.GetReferredINode();
                    srcChild    = withName;
                    this.srcIIP = INodesInPath.Replace(srcIIP, srcIIP.Length() - 1, srcChild);
                    // get the counts before rename
                    withCount.GetReferredINode().ComputeQuotaUsage(bsps, oldSrcCounts, true);
                }
                else
                {
                    if (srcChildIsReference)
                    {
                        // srcChild is reference but srcChild is not in latest snapshot
                        withCount = (INodeReference.WithCount)srcChild.AsReference().GetReferredINode();
                    }
                    else
                    {
                        withCount = null;
                    }
                }
            }
Пример #25
0
 /// <summary>
 /// <inheritDoc/>
 /// <br/>
 /// To destroy a DstReference node, we first remove its link with the
 /// referred node. If the reference number of the referred node is &lt;= 0, we
 /// destroy the subtree of the referred node. Otherwise, we clean the
 /// referred node's subtree and delete everything created after the last
 /// rename operation, i.e., everything outside of the scope of the prior
 /// WithName nodes.
 /// </summary>
 public override void DestroyAndCollectBlocks(BlockStoragePolicySuite bsps, INode.BlocksMapUpdateInfo
                                              collectedBlocks, IList <INode> removedINodes)
 {
     if (RemoveReference(this) <= 0)
     {
         GetReferredINode().DestroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
     }
     else
     {
         // we will clean everything, including files, directories, and
         // snapshots, that were created after this prior snapshot
         int prior = GetPriorSnapshot(this);
         // prior must be non-null, otherwise we do not have any previous
         // WithName nodes, and the reference number will be 0.
         Preconditions.CheckState(prior != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                  .NoSnapshotId);
         // identify the snapshot created after prior
         int   snapshot = GetSelfSnapshot(prior);
         INode referred = GetReferredINode().AsReference().GetReferredINode();
         if (referred.IsFile())
         {
             // if referred is a file, it must be a file with snapshot since we did
             // recordModification before the rename
             INodeFile file = referred.AsFile();
             Preconditions.CheckState(file.IsWithSnapshot());
             // make sure we mark the file as deleted
             file.GetFileWithSnapshotFeature().DeleteCurrentFile();
             // when calling cleanSubtree of the referred node, since we
             // compute quota usage updates before calling this destroy
             // function, we use true for countDiffChange
             referred.CleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes);
         }
         else
         {
             if (referred.IsDirectory())
             {
                 // similarly, if referred is a directory, it must be an
                 // INodeDirectory with snapshot
                 INodeDirectory dir = referred.AsDirectory();
                 Preconditions.CheckState(dir.IsWithSnapshot());
                 try
                 {
                     DirectoryWithSnapshotFeature.DestroyDstSubtree(bsps, dir, snapshot, prior, collectedBlocks
                                                                    , removedINodes);
                 }
                 catch (QuotaExceededException e)
                 {
                     Log.Error("should not exceed quota while snapshot deletion", e);
                 }
             }
         }
     }
 }
Пример #26
0
        public virtual void TestSnapshotOnRoot()
        {
            Path root = new Path("/");

            hdfs.AllowSnapshot(root);
            hdfs.CreateSnapshot(root, "s1");
            cluster.Shutdown();
            cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(Replication
                                                                                  ).Build();
            cluster.WaitActive();
            fsn  = cluster.GetNamesystem();
            hdfs = cluster.GetFileSystem();
            // save namespace and restart cluster
            hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            hdfs.SaveNamespace();
            hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            cluster.Shutdown();
            cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(Replication
                                                                                  ).Build();
            cluster.WaitActive();
            fsn  = cluster.GetNamesystem();
            hdfs = cluster.GetFileSystem();
            INodeDirectory rootNode = fsn.dir.GetINode4Write(root.ToString()).AsDirectory();

            NUnit.Framework.Assert.IsTrue("The children list of root should be empty", rootNode
                                          .GetChildrenList(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId
                                                           ).IsEmpty());
            // one snapshot on root: s1
            IList <DirectoryWithSnapshotFeature.DirectoryDiff> diffList = rootNode.GetDiffs().
                                                                          AsList();

            NUnit.Framework.Assert.AreEqual(1, diffList.Count);
            Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s1 = rootNode.GetSnapshot
                                                                              (DFSUtil.String2Bytes("s1"));
            NUnit.Framework.Assert.AreEqual(s1.GetId(), diffList[0].GetSnapshotId());
            // check SnapshotManager's snapshottable directory list
            NUnit.Framework.Assert.AreEqual(1, fsn.GetSnapshotManager().GetNumSnapshottableDirs
                                                ());
            SnapshottableDirectoryStatus[] sdirs = fsn.GetSnapshotManager().GetSnapshottableDirListing
                                                       (null);
            NUnit.Framework.Assert.AreEqual(root, sdirs[0].GetFullPath());
            // save namespace and restart cluster
            hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            hdfs.SaveNamespace();
            hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            cluster.Shutdown();
            cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(Replication
                                                                                  ).Build();
            cluster.WaitActive();
            fsn  = cluster.GetNamesystem();
            hdfs = cluster.GetFileSystem();
        }
Пример #27
0
        /// <summary>
        /// Delete a path from the name space
        /// Update the count at each ancestor directory with quota
        /// </summary>
        /// <param name="iip">the inodes resolved from the path</param>
        /// <param name="collectedBlocks">blocks collected from the deleted path</param>
        /// <param name="removedINodes">inodes that should be removed from inodeMap</param>
        /// <param name="mtime">the time the inode is removed</param>
        /// <returns>the number of inodes deleted; 0 if no inodes are deleted.</returns>
        private static long UnprotectedDelete(FSDirectory fsd, INodesInPath iip, INode.BlocksMapUpdateInfo
                                              collectedBlocks, IList <INode> removedINodes, long mtime)
        {
            System.Diagnostics.Debug.Assert(fsd.HasWriteLock());
            // check if target node exists
            INode targetNode = iip.GetLastINode();

            if (targetNode == null)
            {
                return(-1);
            }
            // record modification
            int latestSnapshot = iip.GetLatestSnapshotId();

            targetNode.RecordModification(latestSnapshot);
            // Remove the node from the namespace
            long removed = fsd.RemoveLastINode(iip);

            if (removed == -1)
            {
                return(-1);
            }
            // set the parent's modification time
            INodeDirectory parent = targetNode.GetParent();

            parent.UpdateModificationTime(mtime, latestSnapshot);
            fsd.UpdateCountForDelete(targetNode, iip);
            if (removed == 0)
            {
                return(0);
            }
            // collect block and update quota
            if (!targetNode.IsInLatestSnapshot(latestSnapshot))
            {
                targetNode.DestroyAndCollectBlocks(fsd.GetBlockStoragePolicySuite(), collectedBlocks
                                                   , removedINodes);
            }
            else
            {
                QuotaCounts counts = targetNode.CleanSubtree(fsd.GetBlockStoragePolicySuite(), Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                                             .CurrentStateId, latestSnapshot, collectedBlocks, removedINodes);
                removed = counts.GetNameSpace();
                fsd.UpdateCountNoQuotaCheck(iip, iip.Length() - 1, counts.Negation());
            }
            if (NameNode.stateChangeLog.IsDebugEnabled())
            {
                NameNode.stateChangeLog.Debug("DIR* FSDirectory.unprotectedDelete: " + iip.GetPath
                                                  () + " is removed");
            }
            return(removed);
        }
Пример #28
0
        internal ContentSummaryComputationContext ComputeContentSummary(INodeDirectory dir
                                                                        , ContentSummaryComputationContext summary)
        {
            long original      = summary.GetCounts().GetStoragespace();
            long oldYieldCount = summary.GetYieldCount();

            dir.ComputeDirectoryContentSummary(summary, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                               .CurrentStateId);
            // Check only when the content has not changed in the middle.
            if (oldYieldCount == summary.GetYieldCount())
            {
                CheckStoragespace(dir, summary.GetCounts().GetStoragespace() - original);
            }
            return(summary);
        }
Пример #29
0
        /// <summary>Test if the quota can be correctly updated for append</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestUpdateQuotaForAppend()
        {
            Path foo            = new Path(dir, "foo");
            Path bar            = new Path(foo, "bar");
            long currentFileLen = Blocksize;

            DFSTestUtil.CreateFile(dfs, bar, currentFileLen, Replication, seed);
            dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1);
            // append half of the block data, the previous file length is at block
            // boundary
            DFSTestUtil.AppendFile(dfs, bar, Blocksize / 2);
            currentFileLen += (Blocksize / 2);
            INodeDirectory fooNode = fsdir.GetINode4Write(foo.ToString()).AsDirectory();

            NUnit.Framework.Assert.IsTrue(fooNode.IsQuotaSet());
            QuotaCounts quota = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            long        ns    = quota.GetNameSpace();
            long        ds    = quota.GetStorageSpace();

            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual(currentFileLen * Replication, ds);
            ContentSummary c = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), ds);
            // append another block, the previous file length is not at block boundary
            DFSTestUtil.AppendFile(dfs, bar, Blocksize);
            currentFileLen += Blocksize;
            quota           = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            ns              = quota.GetNameSpace();
            ds              = quota.GetStorageSpace();
            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual(currentFileLen * Replication, ds);
            c = dfs.GetContentSummary(foo);
            NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), ds);
            // append several blocks
            DFSTestUtil.AppendFile(dfs, bar, Blocksize * 3 + Blocksize / 8);
            currentFileLen += (Blocksize * 3 + Blocksize / 8);
            quota           = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            ns              = quota.GetNameSpace();
            ds              = quota.GetStorageSpace();
            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual(currentFileLen * Replication, ds);
            c = dfs.GetContentSummary(foo);
            NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), ds);
        }
Пример #30
0
 private void AddToParent(INodeDirectory parent, INode child)
 {
     if (parent == dir.rootDir && FSDirectory.IsReservedName(child))
     {
         throw new HadoopIllegalArgumentException("File name \"" + child.GetLocalName() +
                                                  "\" is reserved. Please " + " change the name of the existing file or directory to another "
                                                  + "name before upgrading to this release.");
     }
     // NOTE: This does not update space counts for parents
     if (!parent.AddChild(child))
     {
         return;
     }
     dir.CacheName(child);
     if (child.IsFile())
     {
         UpdateBlocksMap(child.AsFile(), fsn.GetBlockManager());
     }
 }