Beispiel #1
0
            private int GetSelfSnapshot(int prior)
            {
                INodeReference.WithCount wc = (INodeReference.WithCount)GetReferredINode().AsReference
                                                  ();
                INode referred     = wc.GetReferredINode();
                int   lastSnapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId;

                if (referred.IsFile() && referred.AsFile().IsWithSnapshot())
                {
                    lastSnapshot = referred.AsFile().GetDiffs().GetLastSnapshotId();
                }
                else
                {
                    if (referred.IsDirectory())
                    {
                        DirectoryWithSnapshotFeature sf = referred.AsDirectory().GetDirectoryWithSnapshotFeature
                                                              ();
                        if (sf != null)
                        {
                            lastSnapshot = sf.GetLastSnapshotId();
                        }
                    }
                }
                if (lastSnapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId &&
                    lastSnapshot != prior)
                {
                    return(lastSnapshot);
                }
                else
                {
                    return(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId);
                }
            }
        /// <summary>Create FileStatus by file INode</summary>
        /// <exception cref="System.IO.IOException"/>
        internal static HdfsFileStatus CreateFileStatus(FSDirectory fsd, string fullPath,
                                                        byte[] path, INode node, byte storagePolicy, int snapshot, bool isRawPath, INodesInPath
                                                        iip)
        {
            long size = 0;
            // length is zero for directories
            short replication = 0;
            long  blocksize   = 0;
            bool  isEncrypted;
            FileEncryptionInfo feInfo = isRawPath ? null : fsd.GetFileEncryptionInfo(node, snapshot
                                                                                     , iip);

            if (node.IsFile())
            {
                INodeFile fileNode = node.AsFile();
                size        = fileNode.ComputeFileSize(snapshot);
                replication = fileNode.GetFileReplication(snapshot);
                blocksize   = fileNode.GetPreferredBlockSize();
                isEncrypted = (feInfo != null) || (isRawPath && fsd.IsInAnEZ(INodesInPath.FromINode
                                                                                 (node)));
            }
            else
            {
                isEncrypted = fsd.IsInAnEZ(INodesInPath.FromINode(node));
            }
            int childrenNum = node.IsDirectory() ? node.AsDirectory().GetChildrenNum(snapshot
                                                                                     ) : 0;
            INodeAttributes nodeAttrs = fsd.GetAttributes(fullPath, path, node, snapshot);

            return(new HdfsFileStatus(size, node.IsDirectory(), replication, blocksize, node.
                                      GetModificationTime(snapshot), node.GetAccessTime(snapshot), GetPermissionForFileStatus
                                          (nodeAttrs, isEncrypted), nodeAttrs.GetUserName(), nodeAttrs.GetGroupName(), node
                                      .IsSymlink() ? node.AsSymlink().GetSymlink() : null, path, node.GetId(), childrenNum
                                      , feInfo, storagePolicy));
        }
 /// <summary>Save one inode's attributes to the image.</summary>
 /// <exception cref="System.IO.IOException"/>
 public static void SaveINode2Image(INode node, DataOutput @out, bool writeUnderConstruction
                                    , SnapshotFSImageFormat.ReferenceMap referenceMap)
 {
     if (node.IsReference())
     {
         WriteINodeReference(node.AsReference(), @out, writeUnderConstruction, referenceMap
                             );
     }
     else
     {
         if (node.IsDirectory())
         {
             WriteINodeDirectory(node.AsDirectory(), @out);
         }
         else
         {
             if (node.IsSymlink())
             {
                 WriteINodeSymlink(node.AsSymlink(), @out);
             }
             else
             {
                 if (node.IsFile())
                 {
                     WriteINodeFile(node.AsFile(), @out, writeUnderConstruction);
                 }
             }
         }
     }
 }
        /// <summary>Create FileStatus with location info by file INode</summary>
        /// <exception cref="System.IO.IOException"/>
        private static HdfsLocatedFileStatus CreateLocatedFileStatus(FSDirectory fsd, string
                                                                     fullPath, byte[] path, INode node, byte storagePolicy, int snapshot, bool isRawPath
                                                                     , INodesInPath iip)
        {
            System.Diagnostics.Debug.Assert(fsd.HasReadLock());
            long size = 0;
            // length is zero for directories
            short              replication = 0;
            long               blocksize   = 0;
            LocatedBlocks      loc         = null;
            bool               isEncrypted;
            FileEncryptionInfo feInfo = isRawPath ? null : fsd.GetFileEncryptionInfo(node, snapshot
                                                                                     , iip);

            if (node.IsFile())
            {
                INodeFile fileNode = node.AsFile();
                size        = fileNode.ComputeFileSize(snapshot);
                replication = fileNode.GetFileReplication(snapshot);
                blocksize   = fileNode.GetPreferredBlockSize();
                bool inSnapshot = snapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                  .CurrentStateId;
                bool isUc     = !inSnapshot && fileNode.IsUnderConstruction();
                long fileSize = !inSnapshot && isUc?fileNode.ComputeFileSizeNotIncludingLastUcBlock
                                    () : size;

                loc = fsd.GetFSNamesystem().GetBlockManager().CreateLocatedBlocks(fileNode.GetBlocks
                                                                                      (snapshot), fileSize, isUc, 0L, size, false, inSnapshot, feInfo);
                if (loc == null)
                {
                    loc = new LocatedBlocks();
                }
                isEncrypted = (feInfo != null) || (isRawPath && fsd.IsInAnEZ(INodesInPath.FromINode
                                                                                 (node)));
            }
            else
            {
                isEncrypted = fsd.IsInAnEZ(INodesInPath.FromINode(node));
            }
            int childrenNum = node.IsDirectory() ? node.AsDirectory().GetChildrenNum(snapshot
                                                                                     ) : 0;
            INodeAttributes       nodeAttrs = fsd.GetAttributes(fullPath, path, node, snapshot);
            HdfsLocatedFileStatus status    = new HdfsLocatedFileStatus(size, node.IsDirectory()
                                                                        , replication, blocksize, node.GetModificationTime(snapshot), node.GetAccessTime
                                                                            (snapshot), GetPermissionForFileStatus(nodeAttrs, isEncrypted), nodeAttrs.GetUserName
                                                                            (), nodeAttrs.GetGroupName(), node.IsSymlink() ? node.AsSymlink().GetSymlink() :
                                                                        null, path, node.GetId(), loc, childrenNum, feInfo, storagePolicy);

            // Set caching information for the located blocks.
            if (loc != null)
            {
                CacheManager cacheManager = fsd.GetFSNamesystem().GetCacheManager();
                foreach (LocatedBlock lb in loc.GetLocatedBlocks())
                {
                    cacheManager.SetCachedLocations(lb);
                }
            }
            return(status);
        }
Beispiel #5
0
 /// <summary>
 /// <inheritDoc/>
 /// <br/>
 /// To destroy a DstReference node, we first remove its link with the
 /// referred node. If the reference number of the referred node is &lt;= 0, we
 /// destroy the subtree of the referred node. Otherwise, we clean the
 /// referred node's subtree and delete everything created after the last
 /// rename operation, i.e., everything outside of the scope of the prior
 /// WithName nodes.
 /// </summary>
 public override void DestroyAndCollectBlocks(BlockStoragePolicySuite bsps, INode.BlocksMapUpdateInfo
                                              collectedBlocks, IList <INode> removedINodes)
 {
     if (RemoveReference(this) <= 0)
     {
         GetReferredINode().DestroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
     }
     else
     {
         // we will clean everything, including files, directories, and
         // snapshots, that were created after this prior snapshot
         int prior = GetPriorSnapshot(this);
         // prior must be non-null, otherwise we do not have any previous
         // WithName nodes, and the reference number will be 0.
         Preconditions.CheckState(prior != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                  .NoSnapshotId);
         // identify the snapshot created after prior
         int   snapshot = GetSelfSnapshot(prior);
         INode referred = GetReferredINode().AsReference().GetReferredINode();
         if (referred.IsFile())
         {
             // if referred is a file, it must be a file with snapshot since we did
             // recordModification before the rename
             INodeFile file = referred.AsFile();
             Preconditions.CheckState(file.IsWithSnapshot());
             // make sure we mark the file as deleted
             file.GetFileWithSnapshotFeature().DeleteCurrentFile();
             // when calling cleanSubtree of the referred node, since we
             // compute quota usage updates before calling this destroy
             // function, we use true for countDiffChange
             referred.CleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes);
         }
         else
         {
             if (referred.IsDirectory())
             {
                 // similarly, if referred is a directory, it must be an
                 // INodeDirectory with snapshot
                 INodeDirectory dir = referred.AsDirectory();
                 Preconditions.CheckState(dir.IsWithSnapshot());
                 try
                 {
                     DirectoryWithSnapshotFeature.DestroyDstSubtree(bsps, dir, snapshot, prior, collectedBlocks
                                                                    , removedINodes);
                 }
                 catch (QuotaExceededException e)
                 {
                     Log.Error("should not exceed quota while snapshot deletion", e);
                 }
             }
         }
     }
 }
        /// <summary>for snapshot file after deleting the original file.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotPathINodesAfterDeletion()
        {
            // Create a snapshot for the dir, and check the inodes for the path
            // pointing to a snapshot file
            hdfs.AllowSnapshot(sub1);
            hdfs.CreateSnapshot(sub1, "s2");
            // Delete the original file /TestSnapshot/sub1/file1
            hdfs.Delete(file1, false);
            Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot;
            {
                // Resolve the path for the snapshot file
                // /TestSnapshot/sub1/.snapshot/s2/file1
                string       snapshotPath = sub1.ToString() + "/.snapshot/s2/file1";
                string[]     names        = INode.GetPathNames(snapshotPath);
                byte[][]     components   = INode.GetPathComponents(names);
                INodesInPath nodesInPath  = INodesInPath.Resolve(fsdir.rootDir, components, false);
                // Length of inodes should be (components.length - 1), since we will ignore
                // ".snapshot"
                NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length - 1);
                // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s2, file1}
                snapshot = GetSnapshot(nodesInPath, "s2", 3);
                AssertSnapshot(nodesInPath, true, snapshot, 3);
                // Check the INode for file1 (snapshot file)
                INode inode = nodesInPath.GetLastINode();
                NUnit.Framework.Assert.AreEqual(file1.GetName(), inode.GetLocalName());
                NUnit.Framework.Assert.IsTrue(inode.AsFile().IsWithSnapshot());
            }
            // Check the INodes for path /TestSnapshot/sub1/file1
            string[]     names_1       = INode.GetPathNames(file1.ToString());
            byte[][]     components_1  = INode.GetPathComponents(names_1);
            INodesInPath nodesInPath_1 = INodesInPath.Resolve(fsdir.rootDir, components_1, false
                                                              );

            // The length of inodes should be equal to components.length
            NUnit.Framework.Assert.AreEqual(nodesInPath_1.Length(), components_1.Length);
            // The number of non-null elements should be components.length - 1 since
            // file1 has been deleted
            NUnit.Framework.Assert.AreEqual(GetNumNonNull(nodesInPath_1), components_1.Length
                                            - 1);
            // The returned nodesInPath should be non-snapshot
            AssertSnapshot(nodesInPath_1, false, snapshot, -1);
            // The last INode should be null, and the one before should be associated
            // with sub1
            NUnit.Framework.Assert.IsNull(nodesInPath_1.GetINode(components_1.Length - 1));
            NUnit.Framework.Assert.AreEqual(nodesInPath_1.GetINode(components_1.Length - 2).GetFullPathName
                                                (), sub1.ToString());
            NUnit.Framework.Assert.AreEqual(nodesInPath_1.GetINode(components_1.Length - 3).GetFullPathName
                                                (), dir.ToString());
            hdfs.DeleteSnapshot(sub1, "s2");
            hdfs.DisallowSnapshot(sub1);
        }
Beispiel #7
0
 /// <summary>
 /// When destroying a reference node (WithName or DstReference), we call this
 /// method to identify the snapshot which is the latest snapshot before the
 /// reference node's creation.
 /// </summary>
 internal static int GetPriorSnapshot(Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeReference
                                      @ref)
 {
     INodeReference.WithCount wc = (INodeReference.WithCount)@ref.GetReferredINode();
     INodeReference.WithName  wn = null;
     if (@ref is INodeReference.DstReference)
     {
         wn = wc.GetLastWithName();
     }
     else
     {
         if (@ref is INodeReference.WithName)
         {
             wn = wc.GetPriorWithName((INodeReference.WithName)@ref);
         }
     }
     if (wn != null)
     {
         INode referred = wc.GetReferredINode();
         if (referred.IsFile() && referred.AsFile().IsWithSnapshot())
         {
             return(referred.AsFile().GetDiffs().GetPrior(wn.lastSnapshotId));
         }
         else
         {
             if (referred.IsDirectory())
             {
                 DirectoryWithSnapshotFeature sf = referred.AsDirectory().GetDirectoryWithSnapshotFeature
                                                       ();
                 if (sf != null)
                 {
                     return(sf.GetDiffs().GetPrior(wn.lastSnapshotId));
                 }
             }
         }
     }
     return(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.NoSnapshotId);
 }
Beispiel #8
0
            private int GetSelfSnapshot()
            {
                INode referred = GetReferredINode().AsReference().GetReferredINode();
                int   snapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.NoSnapshotId;

                if (referred.IsFile() && referred.AsFile().IsWithSnapshot())
                {
                    snapshot = referred.AsFile().GetDiffs().GetPrior(lastSnapshotId);
                }
                else
                {
                    if (referred.IsDirectory())
                    {
                        DirectoryWithSnapshotFeature sf = referred.AsDirectory().GetDirectoryWithSnapshotFeature
                                                              ();
                        if (sf != null)
                        {
                            snapshot = sf.GetDiffs().GetPrior(lastSnapshotId);
                        }
                    }
                }
                return(snapshot);
            }
Beispiel #9
0
 private void AddToParent(INodeDirectory parent, INode child)
 {
     if (parent == dir.rootDir && FSDirectory.IsReservedName(child))
     {
         throw new HadoopIllegalArgumentException("File name \"" + child.GetLocalName() +
                                                  "\" is reserved. Please " + " change the name of the existing file or directory to another "
                                                  + "name before upgrading to this release.");
     }
     // NOTE: This does not update space counts for parents
     if (!parent.AddChild(child))
     {
         return;
     }
     dir.CacheName(child);
     if (child.IsFile())
     {
         UpdateBlocksMap(child.AsFile(), fsn.GetBlockManager());
     }
 }
Beispiel #10
0
        /// <exception cref="System.IO.IOException"/>
        internal static void UnprotectedSetStoragePolicy(FSDirectory fsd, BlockManager bm
                                                         , INodesInPath iip, byte policyId)
        {
            System.Diagnostics.Debug.Assert(fsd.HasWriteLock());
            INode inode = iip.GetLastINode();

            if (inode == null)
            {
                throw new FileNotFoundException("File/Directory does not exist: " + iip.GetPath()
                                                );
            }
            int snapshotId = iip.GetLatestSnapshotId();

            if (inode.IsFile())
            {
                BlockStoragePolicy newPolicy = bm.GetStoragePolicy(policyId);
                if (newPolicy.IsCopyOnCreateFile())
                {
                    throw new HadoopIllegalArgumentException("Policy " + newPolicy + " cannot be set after file creation."
                                                             );
                }
                BlockStoragePolicy currentPolicy = bm.GetStoragePolicy(inode.GetLocalStoragePolicyID
                                                                           ());
                if (currentPolicy != null && currentPolicy.IsCopyOnCreateFile())
                {
                    throw new HadoopIllegalArgumentException("Existing policy " + currentPolicy.GetName
                                                                 () + " cannot be changed after file creation.");
                }
                inode.AsFile().SetStoragePolicyID(policyId, snapshotId);
            }
            else
            {
                if (inode.IsDirectory())
                {
                    SetDirStoragePolicy(fsd, inode.AsDirectory(), policyId, snapshotId);
                }
                else
                {
                    throw new FileNotFoundException(iip.GetPath() + " is not a file or directory");
                }
            }
        }
Beispiel #11
0
 /// <summary>Cast INode to INodeFile.</summary>
 /// <exception cref="System.IO.FileNotFoundException"/>
 public static Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeFile ValueOf(INode inode
                                                                        , string path, bool acceptNull)
 {
     if (inode == null)
     {
         if (acceptNull)
         {
             return(null);
         }
         else
         {
             throw new FileNotFoundException("File does not exist: " + path);
         }
     }
     if (!inode.IsFile())
     {
         throw new FileNotFoundException("Path is not a file: " + path);
     }
     return(inode.AsFile());
 }
Beispiel #12
0
 /// <exception cref="System.IO.IOException"/>
 private void Save(OutputStream @out, INode n)
 {
     if (n.IsDirectory())
     {
         Save(@out, n.AsDirectory());
     }
     else
     {
         if (n.IsFile())
         {
             Save(@out, n.AsFile());
         }
         else
         {
             if (n.IsSymlink())
             {
                 Save(@out, n.AsSymlink());
             }
         }
     }
 }
Beispiel #13
0
        /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/>
        /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/>
        /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotAccessControlException"/
        ///     >
        internal static Block[] UnprotectedSetReplication(FSDirectory fsd, string src, short
                                                          replication, short[] blockRepls)
        {
            System.Diagnostics.Debug.Assert(fsd.HasWriteLock());
            INodesInPath iip   = fsd.GetINodesInPath4Write(src, true);
            INode        inode = iip.GetLastINode();

            if (inode == null || !inode.IsFile())
            {
                return(null);
            }
            INodeFile file  = inode.AsFile();
            short     oldBR = file.GetBlockReplication();

            // before setFileReplication, check for increasing block replication.
            // if replication > oldBR, then newBR == replication.
            // if replication < oldBR, we don't know newBR yet.
            if (replication > oldBR)
            {
                long dsDelta = file.StoragespaceConsumed() / oldBR;
                fsd.UpdateCount(iip, 0L, dsDelta, oldBR, replication, true);
            }
            file.SetFileReplication(replication, iip.GetLatestSnapshotId());
            short newBR = file.GetBlockReplication();

            // check newBR < oldBR case.
            if (newBR < oldBR)
            {
                long dsDelta = file.StoragespaceConsumed() / newBR;
                fsd.UpdateCount(iip, 0L, dsDelta, oldBR, newBR, true);
            }
            if (blockRepls != null)
            {
                blockRepls[0] = oldBR;
                blockRepls[1] = newBR;
            }
            return(file.GetBlocks());
        }
        /// <summary>for snapshot file while modifying file after snapshot.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotPathINodesAfterModification()
        {
            // First check the INode for /TestSnapshot/sub1/file1
            string[]     names       = INode.GetPathNames(file1.ToString());
            byte[][]     components  = INode.GetPathComponents(names);
            INodesInPath nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false);

            // The number of inodes should be equal to components.length
            NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length);
            // The last INode should be associated with file1
            NUnit.Framework.Assert.AreEqual(nodesInPath.GetINode(components.Length - 1).GetFullPathName
                                                (), file1.ToString());
            // record the modification time of the inode
            long modTime = nodesInPath.GetINode(nodesInPath.Length() - 1).GetModificationTime
                               ();

            // Create a snapshot for the dir, and check the inodes for the path
            // pointing to a snapshot file
            hdfs.AllowSnapshot(sub1);
            hdfs.CreateSnapshot(sub1, "s3");
            // Modify file1
            DFSTestUtil.AppendFile(hdfs, file1, "the content for appending");
            // Check the INodes for snapshot of file1
            string snapshotPath = sub1.ToString() + "/.snapshot/s3/file1";

            names      = INode.GetPathNames(snapshotPath);
            components = INode.GetPathComponents(names);
            INodesInPath ssNodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false
                                                              );

            // Length of ssInodes should be (components.length - 1), since we will
            // ignore ".snapshot"
            NUnit.Framework.Assert.AreEqual(ssNodesInPath.Length(), components.Length - 1);
            Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s3 = GetSnapshot(ssNodesInPath
                                                                                      , "s3", 3);
            AssertSnapshot(ssNodesInPath, true, s3, 3);
            // Check the INode for snapshot of file1
            INode snapshotFileNode = ssNodesInPath.GetLastINode();

            NUnit.Framework.Assert.AreEqual(snapshotFileNode.GetLocalName(), file1.GetName());
            NUnit.Framework.Assert.IsTrue(snapshotFileNode.AsFile().IsWithSnapshot());
            // The modification time of the snapshot INode should be the same with the
            // original INode before modification
            NUnit.Framework.Assert.AreEqual(modTime, snapshotFileNode.GetModificationTime(ssNodesInPath
                                                                                          .GetPathSnapshotId()));
            // Check the INode for /TestSnapshot/sub1/file1 again
            names      = INode.GetPathNames(file1.ToString());
            components = INode.GetPathComponents(names);
            INodesInPath newNodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false
                                                               );

            AssertSnapshot(newNodesInPath, false, s3, -1);
            // The number of inodes should be equal to components.length
            NUnit.Framework.Assert.AreEqual(newNodesInPath.Length(), components.Length);
            // The last INode should be associated with file1
            int last = components.Length - 1;

            NUnit.Framework.Assert.AreEqual(newNodesInPath.GetINode(last).GetFullPathName(),
                                            file1.ToString());
            // The modification time of the INode for file3 should have been changed
            NUnit.Framework.Assert.IsFalse(modTime == newNodesInPath.GetINode(last).GetModificationTime
                                               ());
            hdfs.DeleteSnapshot(sub1, "s3");
            hdfs.DisallowSnapshot(sub1);
        }