Beispiel #1
0
        public virtual void TestDeletionOfLaterBlocksWithZeroSizeFirstBlock()
        {
            Path foo = new Path("/foo");
            Path bar = new Path(foo, "bar");

            byte[] testData = Sharpen.Runtime.GetBytesForString("foo bar baz");
            // Create a zero-length file.
            DFSTestUtil.CreateFile(hdfs, bar, 0, Replication, 0L);
            NUnit.Framework.Assert.AreEqual(0, fsdir.GetINode4Write(bar.ToString()).AsFile().
                                            GetBlocks().Length);
            // Create a snapshot that includes that file.
            SnapshotTestHelper.CreateSnapshot(hdfs, foo, "s0");
            // Extend that file.
            FSDataOutputStream @out = hdfs.Append(bar);

            @out.Write(testData);
            @out.Close();
            INodeFile barNode = fsdir.GetINode4Write(bar.ToString()).AsFile();

            BlockInfoContiguous[] blks = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, blks.Length);
            NUnit.Framework.Assert.AreEqual(testData.Length, blks[0].GetNumBytes());
            // Delete the file.
            hdfs.Delete(bar, true);
            // Now make sure that the NN can still save an fsimage successfully.
            cluster.GetNameNode().GetRpcServer().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter
                                                             , false);
            cluster.GetNameNode().GetRpcServer().SaveNamespace();
        }
        /// <summary>Test snapshot after file appending</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotAfterAppending()
        {
            Path file = new Path(dir, "file");

            // 1. create snapshot --> create file --> append
            SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s0");
            DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed);
            DFSTestUtil.AppendFile(hdfs, file, Blocksize);
            INodeFile fileNode = (INodeFile)fsdir.GetINode(file.ToString());

            // 2. create snapshot --> modify the file --> append
            hdfs.CreateSnapshot(dir, "s1");
            hdfs.SetReplication(file, (short)(Replication - 1));
            DFSTestUtil.AppendFile(hdfs, file, Blocksize);
            // check corresponding inodes
            fileNode = (INodeFile)fsdir.GetINode(file.ToString());
            NUnit.Framework.Assert.AreEqual(Replication - 1, fileNode.GetFileReplication());
            NUnit.Framework.Assert.AreEqual(Blocksize * 3, fileNode.ComputeFileSize());
            // 3. create snapshot --> append
            hdfs.CreateSnapshot(dir, "s2");
            DFSTestUtil.AppendFile(hdfs, file, Blocksize);
            // check corresponding inodes
            fileNode = (INodeFile)fsdir.GetINode(file.ToString());
            NUnit.Framework.Assert.AreEqual(Replication - 1, fileNode.GetFileReplication());
            NUnit.Framework.Assert.AreEqual(Blocksize * 4, fileNode.ComputeFileSize());
        }
Beispiel #3
0
        public virtual void TestDeletionWithZeroSizeBlock2()
        {
            Path foo    = new Path("/foo");
            Path subDir = new Path(foo, "sub");
            Path bar    = new Path(subDir, "bar");

            DFSTestUtil.CreateFile(hdfs, bar, Blocksize, Replication, 0L);
            hdfs.Append(bar);
            INodeFile barNode = fsdir.GetINode4Write(bar.ToString()).AsFile();

            BlockInfoContiguous[] blks = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, blks.Length);
            ExtendedBlock previous = new ExtendedBlock(fsn.GetBlockPoolId(), blks[0]);

            cluster.GetNameNodeRpc().AddBlock(bar.ToString(), hdfs.GetClient().GetClientName(
                                                  ), previous, null, barNode.GetId(), null);
            SnapshotTestHelper.CreateSnapshot(hdfs, foo, "s1");
            barNode = fsdir.GetINode4Write(bar.ToString()).AsFile();
            blks    = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(2, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
            NUnit.Framework.Assert.AreEqual(0, blks[1].GetNumBytes());
            hdfs.Delete(subDir, true);
            Path sbar = SnapshotTestHelper.GetSnapshotPath(foo, "s1", "sub/bar");

            barNode = fsdir.GetINode(sbar.ToString()).AsFile();
            blks    = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
        }
        /// <summary>
        /// If some blocks at the end of the block list no longer belongs to
        /// any inode, collect them and update the block list.
        /// </summary>
        public virtual void CollectBlocksAndClear(BlockStoragePolicySuite bsps, INodeFile
                                                  file, INode.BlocksMapUpdateInfo info, IList <INode> removedINodes)
        {
            // check if everything is deleted.
            if (IsCurrentFileDeleted() && GetDiffs().AsList().IsEmpty())
            {
                file.DestroyAndCollectBlocks(bsps, info, removedINodes);
                return;
            }
            // find max file size.
            long     max;
            FileDiff diff = GetDiffs().GetLast();

            if (IsCurrentFileDeleted())
            {
                max = diff == null ? 0 : diff.GetFileSize();
            }
            else
            {
                max = file.ComputeFileSize();
            }
            // Collect blocks that should be deleted
            FileDiff last = diffs.GetLast();

            BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.GetBlocks();
            if (snapshotBlocks == null)
            {
                file.CollectBlocksBeyondMax(max, info);
            }
            else
            {
                file.CollectBlocksBeyondSnapshot(snapshotBlocks, info);
            }
        }
Beispiel #5
0
 /// <exception cref="System.Exception"/>
 internal virtual void AssertAllNull(INodeFile inode, Path path, string[] snapshots
                                     )
 {
     NUnit.Framework.Assert.IsNull(inode.GetBlocks());
     AssertINodeNull(path.ToString());
     AssertINodeNullInSnapshots(path, snapshots);
 }
Beispiel #6
0
 internal static void AssertBlockCollection(BlockManager blkManager, INodeFile file
                                            , BlockInfoContiguous b)
 {
     NUnit.Framework.Assert.AreSame(b, blkManager.GetStoredBlock(b));
     NUnit.Framework.Assert.AreSame(file, blkManager.GetBlockCollection(b));
     NUnit.Framework.Assert.AreSame(file, b.GetBlockCollection());
 }
Beispiel #7
0
            /// <exception cref="System.IO.IOException"/>
            private void SerializeFileDiffList(INodeFile file, OutputStream @out)
            {
                FileWithSnapshotFeature sf = file.GetFileWithSnapshotFeature();

                if (sf != null)
                {
                    IList <FileDiff> diffList = sf.GetDiffs().AsList();
                    FsImageProto.SnapshotDiffSection.DiffEntry entry = ((FsImageProto.SnapshotDiffSection.DiffEntry
                                                                         )FsImageProto.SnapshotDiffSection.DiffEntry.NewBuilder().SetInodeId(file.GetId()
                                                                                                                                             ).SetType(FsImageProto.SnapshotDiffSection.DiffEntry.Type.Filediff).SetNumOfDiff
                                                                            (diffList.Count).Build());
                    entry.WriteDelimitedTo(@out);
                    for (int i = diffList.Count - 1; i >= 0; i--)
                    {
                        FileDiff diff = diffList[i];
                        FsImageProto.SnapshotDiffSection.FileDiff.Builder fb = FsImageProto.SnapshotDiffSection.FileDiff
                                                                               .NewBuilder().SetSnapshotId(diff.GetSnapshotId()).SetFileSize(diff.GetFileSize()
                                                                                                                                             );
                        if (diff.GetBlocks() != null)
                        {
                            foreach (Block block in diff.GetBlocks())
                            {
                                fb.AddBlocks(PBHelper.Convert(block));
                            }
                        }
                        INodeFileAttributes copy = diff.snapshotINode;
                        if (copy != null)
                        {
                            fb.SetName(ByteString.CopyFrom(copy.GetLocalNameBytes())).SetSnapshotCopy(FSImageFormatPBINode.Saver.BuildINodeFile
                                                                                                          (copy, parent.GetSaverContext()));
                        }
                        ((FsImageProto.SnapshotDiffSection.FileDiff)fb.Build()).WriteDelimitedTo(@out);
                    }
                }
            }
Beispiel #8
0
        /// <summary>
        /// Test replication for a file with snapshots, also including the scenario
        /// where the original file is deleted
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestReplicationAfterDeletion()
        {
            // Create file1, set its replication to 3
            DFSTestUtil.CreateFile(hdfs, file1, Blocksize, Replication, seed);
            IDictionary <Path, short> snapshotRepMap = new Dictionary <Path, short>();

            // Take 3 snapshots of sub1
            for (int i = 1; i <= 3; i++)
            {
                Path root   = SnapshotTestHelper.CreateSnapshot(hdfs, sub1, "s" + i);
                Path ssFile = new Path(root, file1.GetName());
                snapshotRepMap[ssFile] = Replication;
            }
            // Check replication
            CheckFileReplication(file1, Replication, Replication);
            CheckSnapshotFileReplication(file1, snapshotRepMap, Replication);
            // Delete file1
            hdfs.Delete(file1, true);
            // Check replication of snapshots
            foreach (Path ss in snapshotRepMap.Keys)
            {
                INodeFile ssInode = GetINodeFile(ss);
                // The replication number derived from the
                // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
                NUnit.Framework.Assert.AreEqual(Replication, ssInode.GetBlockReplication());
                // Also check the number derived from INodeFile#getFileReplication
                NUnit.Framework.Assert.AreEqual(snapshotRepMap[ss], ssInode.GetFileReplication());
            }
        }
Beispiel #9
0
            /// <summary>Load FileDiff list for a file with snapshot feature</summary>
            /// <exception cref="System.IO.IOException"/>
            private void LoadFileDiffList(InputStream @in, INodeFile file, int size)
            {
                FileDiffList diffs = new FileDiffList();

                FSImageFormatProtobuf.LoaderContext state = parent.GetLoaderContext();
                for (int i = 0; i < size; i++)
                {
                    FsImageProto.SnapshotDiffSection.FileDiff pbf = FsImageProto.SnapshotDiffSection.FileDiff
                                                                    .ParseDelimitedFrom(@in);
                    INodeFileAttributes copy = null;
                    if (pbf.HasSnapshotCopy())
                    {
                        FsImageProto.INodeSection.INodeFile fileInPb = pbf.GetSnapshotCopy();
                        PermissionStatus permission = FSImageFormatPBINode.Loader.LoadPermission(fileInPb
                                                                                                 .GetPermission(), state.GetStringTable());
                        AclFeature acl = null;
                        if (fileInPb.HasAcl())
                        {
                            int[] entries = AclEntryStatusFormat.ToInt(FSImageFormatPBINode.Loader.LoadAclEntries
                                                                           (fileInPb.GetAcl(), state.GetStringTable()));
                            acl = new AclFeature(entries);
                        }
                        XAttrFeature xAttrs = null;
                        if (fileInPb.HasXAttrs())
                        {
                            xAttrs = new XAttrFeature(FSImageFormatPBINode.Loader.LoadXAttrs(fileInPb.GetXAttrs
                                                                                                 (), state.GetStringTable()));
                        }
                        copy = new INodeFileAttributes.SnapshotCopy(pbf.GetName().ToByteArray(), permission
                                                                    , acl, fileInPb.GetModificationTime(), fileInPb.GetAccessTime(), (short)fileInPb
                                                                    .GetReplication(), fileInPb.GetPreferredBlockSize(), unchecked ((byte)fileInPb.GetStoragePolicyID
                                                                                                                                        ()), xAttrs);
                    }
                    FileDiff diff = new FileDiff(pbf.GetSnapshotId(), copy, null, pbf.GetFileSize());
                    IList <HdfsProtos.BlockProto> bpl    = pbf.GetBlocksList();
                    BlockInfoContiguous[]         blocks = new BlockInfoContiguous[bpl.Count];
                    for (int j = 0; j < e; ++j)
                    {
                        Block blk = PBHelper.Convert(bpl[j]);
                        BlockInfoContiguous storedBlock = fsn.GetBlockManager().GetStoredBlock(blk);
                        if (storedBlock == null)
                        {
                            storedBlock = fsn.GetBlockManager().AddBlockCollection(new BlockInfoContiguous(blk
                                                                                                           , copy.GetFileReplication()), file);
                        }
                        blocks[j] = storedBlock;
                    }
                    if (blocks.Length > 0)
                    {
                        diff.SetBlocks(blocks);
                    }
                    diffs.AddFirst(diff);
                }
                file.AddSnapshotFeature(diffs);
            }
Beispiel #10
0
        /// <exception cref="System.Exception"/>
        internal static INodeFile AssertBlockCollection(string path, int numBlocks, FSDirectory
                                                        dir, BlockManager blkManager)
        {
            INodeFile file = INodeFile.ValueOf(dir.GetINode(path), path);

            NUnit.Framework.Assert.AreEqual(numBlocks, file.GetBlocks().Length);
            foreach (BlockInfoContiguous b in file.GetBlocks())
            {
                AssertBlockCollection(blkManager, file, b);
            }
            return(file);
        }
        /// <summary>
        /// Test snapshot during file appending, before the corresponding
        /// <see cref="Org.Apache.Hadoop.FS.FSDataOutputStream"/>
        /// instance closes.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotWhileAppending()
        {
            Path file = new Path(dir, "file");

            DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed);
            // 1. append without closing stream --> create snapshot
            HdfsDataOutputStream @out = AppendFileWithoutClosing(file, Blocksize);

            @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength));
            SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s0");
            @out.Close();
            // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
            // deleted list, with size BLOCKSIZE*2
            INodeFile fileNode = (INodeFile)fsdir.GetINode(file.ToString());

            NUnit.Framework.Assert.AreEqual(Blocksize * 2, fileNode.ComputeFileSize());
            INodeDirectory dirNode = fsdir.GetINode(dir.ToString()).AsDirectory();

            DirectoryWithSnapshotFeature.DirectoryDiff last = dirNode.GetDiffs().GetLast();
            // 2. append without closing stream
            @out = AppendFileWithoutClosing(file, Blocksize);
            @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength));
            // re-check nodeInDeleted_S0
            dirNode = fsdir.GetINode(dir.ToString()).AsDirectory();
            NUnit.Framework.Assert.AreEqual(Blocksize * 2, fileNode.ComputeFileSize(last.GetSnapshotId
                                                                                        ()));
            // 3. take snapshot --> close stream
            hdfs.CreateSnapshot(dir, "s1");
            @out.Close();
            // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
            // have been stored in s1's deleted list
            fileNode = (INodeFile)fsdir.GetINode(file.ToString());
            dirNode  = fsdir.GetINode(dir.ToString()).AsDirectory();
            last     = dirNode.GetDiffs().GetLast();
            NUnit.Framework.Assert.IsTrue(fileNode.IsWithSnapshot());
            NUnit.Framework.Assert.AreEqual(Blocksize * 3, fileNode.ComputeFileSize(last.GetSnapshotId
                                                                                        ()));
            // 4. modify file --> append without closing stream --> take snapshot -->
            // close stream
            hdfs.SetReplication(file, (short)(Replication - 1));
            @out = AppendFileWithoutClosing(file, Blocksize);
            hdfs.CreateSnapshot(dir, "s2");
            @out.Close();
            // re-check the size of nodeInDeleted_S1
            NUnit.Framework.Assert.AreEqual(Blocksize * 3, fileNode.ComputeFileSize(last.GetSnapshotId
                                                                                        ()));
        }
        internal virtual bool ChangedBetweenSnapshots(INodeFile file, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                                      from, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot to)
        {
            int[] diffIndexPair = diffs.ChangedBetweenSnapshots(from, to);
            if (diffIndexPair == null)
            {
                return(false);
            }
            int earlierDiffIndex           = diffIndexPair[0];
            int laterDiffIndex             = diffIndexPair[1];
            IList <FileDiff> diffList      = diffs.AsList();
            long             earlierLength = diffList[earlierDiffIndex].GetFileSize();
            long             laterLength   = laterDiffIndex == diffList.Count ? file.ComputeFileSize(true,
                                                                                                     false) : diffList[laterDiffIndex].GetFileSize();

            if (earlierLength != laterLength)
            {
                // file length has been changed
                return(true);
            }
            INodeFileAttributes earlierAttr = null;

            // check the metadata
            for (int i = earlierDiffIndex; i < laterDiffIndex; i++)
            {
                FileDiff diff = diffList[i];
                if (diff.snapshotINode != null)
                {
                    earlierAttr = diff.snapshotINode;
                    break;
                }
            }
            if (earlierAttr == null)
            {
                // no meta-change at all, return false
                return(false);
            }
            INodeFileAttributes laterAttr = diffs.GetSnapshotINode(Math.Max(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                                                            .GetSnapshotId(from), Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.GetSnapshotId
                                                                                (to)), file);

            return(!earlierAttr.MetadataEquals(laterAttr));
        }
Beispiel #13
0
        /// <summary>Check the replication for both the current file and all its prior snapshots
        ///     </summary>
        /// <param name="currentFile">the Path of the current file</param>
        /// <param name="snapshotRepMap">
        /// A map maintaining all the snapshots of the current file, as well
        /// as their expected replication number stored in their corresponding
        /// INodes
        /// </param>
        /// <param name="expectedBlockRep">The expected replication number</param>
        /// <exception cref="System.Exception"/>
        private void CheckSnapshotFileReplication(Path currentFile, IDictionary <Path, short
                                                                                 > snapshotRepMap, short expectedBlockRep)
        {
            // First check the getBlockReplication for the INode of the currentFile
            INodeFile inodeOfCurrentFile = GetINodeFile(currentFile);

            NUnit.Framework.Assert.AreEqual(expectedBlockRep, inodeOfCurrentFile.GetBlockReplication
                                                ());
            // Then check replication for every snapshot
            foreach (Path ss in snapshotRepMap.Keys)
            {
                INodesInPath iip     = fsdir.GetINodesInPath(ss.ToString(), true);
                INodeFile    ssInode = iip.GetLastINode().AsFile();
                // The replication number derived from the
                // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
                NUnit.Framework.Assert.AreEqual(expectedBlockRep, ssInode.GetBlockReplication());
                // Also check the number derived from INodeFile#getFileReplication
                NUnit.Framework.Assert.AreEqual(snapshotRepMap[ss], ssInode.GetFileReplication(iip
                                                                                               .GetPathSnapshotId()));
            }
        }
 public virtual QuotaCounts CleanFile(BlockStoragePolicySuite bsps, INodeFile file
                                      , int snapshotId, int priorSnapshotId, INode.BlocksMapUpdateInfo collectedBlocks
                                      , IList <INode> removedINodes)
 {
     if (snapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId)
     {
         // delete the current file while the file has snapshot feature
         if (!IsCurrentFileDeleted())
         {
             file.RecordModification(priorSnapshotId);
             DeleteCurrentFile();
         }
         CollectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
         return(new QuotaCounts.Builder().Build());
     }
     else
     {
         // delete the snapshot
         priorSnapshotId = GetDiffs().UpdatePrior(snapshotId, priorSnapshotId);
         return(diffs.DeleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file, collectedBlocks
                                         , removedINodes));
     }
 }
        public virtual QuotaCounts UpdateQuotaAndCollectBlocks(BlockStoragePolicySuite bsps
                                                               , INodeFile file, FileDiff removed, INode.BlocksMapUpdateInfo collectedBlocks, IList
                                                               <INode> removedINodes)
        {
            long oldStoragespace                  = file.StoragespaceConsumed();
            byte storagePolicyID                  = file.GetStoragePolicyID();
            BlockStoragePolicy         bsp        = null;
            EnumCounters <StorageType> typeSpaces = new EnumCounters <StorageType>(typeof(StorageType
                                                                                          ));

            if (storagePolicyID != BlockStoragePolicySuite.IdUnspecified)
            {
                bsp = bsps.GetPolicy(file.GetStoragePolicyID());
            }
            if (removed.snapshotINode != null)
            {
                short replication = removed.snapshotINode.GetFileReplication();
                short currentRepl = file.GetBlockReplication();
                if (currentRepl == 0)
                {
                    long oldFileSizeNoRep = file.ComputeFileSize(true, true);
                    oldStoragespace = oldFileSizeNoRep * replication;
                    if (bsp != null)
                    {
                        IList <StorageType> oldTypeChosen = bsp.ChooseStorageTypes(replication);
                        foreach (StorageType t in oldTypeChosen)
                        {
                            if (t.SupportTypeQuota())
                            {
                                typeSpaces.Add(t, -oldFileSizeNoRep);
                            }
                        }
                    }
                }
                else
                {
                    if (replication > currentRepl)
                    {
                        long oldFileSizeNoRep = file.StoragespaceConsumedNoReplication();
                        oldStoragespace = oldFileSizeNoRep * replication;
                        if (bsp != null)
                        {
                            IList <StorageType> oldTypeChosen = bsp.ChooseStorageTypes(replication);
                            foreach (StorageType t in oldTypeChosen)
                            {
                                if (t.SupportTypeQuota())
                                {
                                    typeSpaces.Add(t, -oldFileSizeNoRep);
                                }
                            }
                            IList <StorageType> newTypeChosen = bsp.ChooseStorageTypes(currentRepl);
                            foreach (StorageType t_1 in newTypeChosen)
                            {
                                if (t_1.SupportTypeQuota())
                                {
                                    typeSpaces.Add(t_1, oldFileSizeNoRep);
                                }
                            }
                        }
                    }
                }
                AclFeature aclFeature = removed.GetSnapshotINode().GetAclFeature();
                if (aclFeature != null)
                {
                    AclStorage.RemoveAclFeature(aclFeature);
                }
            }
            GetDiffs().CombineAndCollectSnapshotBlocks(bsps, file, removed, collectedBlocks,
                                                       removedINodes);
            long ssDelta = oldStoragespace - file.StoragespaceConsumed();

            return(new QuotaCounts.Builder().StorageSpace(ssDelta).TypeSpaces(typeSpaces).Build
                       ());
        }
Beispiel #16
0
        /// <summary>
        /// Clean an inode while we move it from the deleted list of post to the
        /// deleted list of prior.
        /// </summary>
        /// <param name="bsps">The block storage policy suite.</param>
        /// <param name="inode">The inode to clean.</param>
        /// <param name="post">The post snapshot.</param>
        /// <param name="prior">The id of the prior snapshot.</param>
        /// <param name="collectedBlocks">Used to collect blocks for later deletion.</param>
        /// <returns>Quota usage update.</returns>
        private static QuotaCounts CleanDeletedINode(BlockStoragePolicySuite bsps, INode
                                                     inode, int post, int prior, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode
                                                                                                                                   > removedINodes)
        {
            QuotaCounts   counts = new QuotaCounts.Builder().Build();
            Deque <INode> queue  = new ArrayDeque <INode>();

            queue.AddLast(inode);
            while (!queue.IsEmpty())
            {
                INode topNode = queue.PollFirst();
                if (topNode is INodeReference.WithName)
                {
                    INodeReference.WithName wn = (INodeReference.WithName)topNode;
                    if (wn.GetLastSnapshotId() >= post)
                    {
                        wn.CleanSubtree(bsps, post, prior, collectedBlocks, removedINodes);
                    }
                }
                else
                {
                    // For DstReference node, since the node is not in the created list of
                    // prior, we should treat it as regular file/dir
                    if (topNode.IsFile() && topNode.AsFile().IsWithSnapshot())
                    {
                        INodeFile file = topNode.AsFile();
                        counts.Add(file.GetDiffs().DeleteSnapshotDiff(bsps, post, prior, file, collectedBlocks
                                                                      , removedINodes));
                    }
                    else
                    {
                        if (topNode.IsDirectory())
                        {
                            INodeDirectory dir = topNode.AsDirectory();
                            DirectoryWithSnapshotFeature.ChildrenDiff priorChildrenDiff = null;
                            DirectoryWithSnapshotFeature sf = dir.GetDirectoryWithSnapshotFeature();
                            if (sf != null)
                            {
                                // delete files/dirs created after prior. Note that these
                                // files/dirs, along with inode, were deleted right after post.
                                DirectoryWithSnapshotFeature.DirectoryDiff priorDiff = sf.GetDiffs().GetDiffById(
                                    prior);
                                if (priorDiff != null && priorDiff.GetSnapshotId() == prior)
                                {
                                    priorChildrenDiff = priorDiff.GetChildrenDiff();
                                    counts.Add(priorChildrenDiff.DestroyCreatedList(bsps, dir, collectedBlocks, removedINodes
                                                                                    ));
                                }
                            }
                            foreach (INode child in dir.GetChildrenList(prior))
                            {
                                if (priorChildrenDiff != null && priorChildrenDiff.Search(Diff.ListType.Deleted,
                                                                                          child.GetLocalNameBytes()) != null)
                                {
                                    continue;
                                }
                                queue.AddLast(child);
                            }
                        }
                    }
                }
            }
            return(counts);
        }
Beispiel #17
0
        /// <summary>Test deleting a file with snapshots.</summary>
        /// <remarks>
        /// Test deleting a file with snapshots. Need to check the blocksMap to make
        /// sure the corresponding record is updated correctly.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestDeletionWithSnapshots()
        {
            Path file0 = new Path(sub1, "file0");
            Path file1 = new Path(sub1, "file1");
            Path sub2  = new Path(sub1, "sub2");
            Path file2 = new Path(sub2, "file2");
            Path file3 = new Path(sub1, "file3");
            Path file4 = new Path(sub1, "file4");
            Path file5 = new Path(sub1, "file5");

            // Create file under sub1
            DFSTestUtil.CreateFile(hdfs, file0, 4 * Blocksize, Replication, seed);
            DFSTestUtil.CreateFile(hdfs, file1, 2 * Blocksize, Replication, seed);
            DFSTestUtil.CreateFile(hdfs, file2, 3 * Blocksize, Replication, seed);
            {
                // Normal deletion
                INodeFile             f2     = AssertBlockCollection(file2.ToString(), 3, fsdir, blockmanager);
                BlockInfoContiguous[] blocks = f2.GetBlocks();
                hdfs.Delete(sub2, true);
                // The INode should have been removed from the blocksMap
                foreach (BlockInfoContiguous b in blocks)
                {
                    NUnit.Framework.Assert.IsNull(blockmanager.GetBlockCollection(b));
                }
            }
            // Create snapshots for sub1
            string[] snapshots = new string[] { "s0", "s1", "s2" };
            DFSTestUtil.CreateFile(hdfs, file3, 5 * Blocksize, Replication, seed);
            SnapshotTestHelper.CreateSnapshot(hdfs, sub1, snapshots[0]);
            DFSTestUtil.CreateFile(hdfs, file4, 1 * Blocksize, Replication, seed);
            SnapshotTestHelper.CreateSnapshot(hdfs, sub1, snapshots[1]);
            DFSTestUtil.CreateFile(hdfs, file5, 7 * Blocksize, Replication, seed);
            SnapshotTestHelper.CreateSnapshot(hdfs, sub1, snapshots[2]);
            {
                // set replication so that the inode should be replaced for snapshots
                INodeFile f1 = AssertBlockCollection(file1.ToString(), 2, fsdir, blockmanager);
                NUnit.Framework.Assert.AreSame(typeof(INodeFile), f1.GetType());
                hdfs.SetReplication(file1, (short)2);
                f1 = AssertBlockCollection(file1.ToString(), 2, fsdir, blockmanager);
                NUnit.Framework.Assert.IsTrue(f1.IsWithSnapshot());
                NUnit.Framework.Assert.IsFalse(f1.IsUnderConstruction());
            }
            // Check the block information for file0
            INodeFile f0 = AssertBlockCollection(file0.ToString(), 4, fsdir, blockmanager);

            BlockInfoContiguous[] blocks0 = f0.GetBlocks();
            // Also check the block information for snapshot of file0
            Path snapshotFile0 = SnapshotTestHelper.GetSnapshotPath(sub1, "s0", file0.GetName
                                                                        ());

            AssertBlockCollection(snapshotFile0.ToString(), 4, fsdir, blockmanager);
            // Delete file0
            hdfs.Delete(file0, true);
            // Make sure the blocks of file0 is still in blocksMap
            foreach (BlockInfoContiguous b_1 in blocks0)
            {
                NUnit.Framework.Assert.IsNotNull(blockmanager.GetBlockCollection(b_1));
            }
            AssertBlockCollection(snapshotFile0.ToString(), 4, fsdir, blockmanager);
            // Compare the INode in the blocksMap with INodes for snapshots
            string s1f0 = SnapshotTestHelper.GetSnapshotPath(sub1, "s1", file0.GetName()).ToString
                              ();

            AssertBlockCollection(s1f0, 4, fsdir, blockmanager);
            // Delete snapshot s1
            hdfs.DeleteSnapshot(sub1, "s1");
            // Make sure the first block of file0 is still in blocksMap
            foreach (BlockInfoContiguous b_2 in blocks0)
            {
                NUnit.Framework.Assert.IsNotNull(blockmanager.GetBlockCollection(b_2));
            }
            AssertBlockCollection(snapshotFile0.ToString(), 4, fsdir, blockmanager);
            try
            {
                INodeFile.ValueOf(fsdir.GetINode(s1f0), s1f0);
                NUnit.Framework.Assert.Fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot"
                                            );
            }
            catch (IOException e)
            {
                GenericTestUtils.AssertExceptionContains("File does not exist: " + s1f0, e);
            }
        }
Beispiel #18
0
        /// <exception cref="System.Exception"/>
        internal virtual INodeFile GetINodeFile(Path p)
        {
            string s = p.ToString();

            return(INodeFile.ValueOf(fsdir.GetINode(s), s));
        }
Beispiel #19
0
 /// <summary>Add a modified file</summary>
 internal virtual void AddFileDiff(INodeFile file, byte[][] relativePath)
 {
     diffMap[file] = relativePath;
 }
Beispiel #20
0
 /// <summary>
 /// Recursively compute the difference between snapshots under a given
 /// directory/file.
 /// </summary>
 /// <param name="snapshotRoot">The directory where snapshots were taken.</param>
 /// <param name="node">The directory/file under which the diff is computed.</param>
 /// <param name="parentPath">
 /// Relative path (corresponding to the snapshot root) of
 /// the node's parent.
 /// </param>
 /// <param name="diffReport">data structure used to store the diff.</param>
 private void ComputeDiffRecursively(INodeDirectory snapshotRoot, INode node, IList
                                     <byte[]> parentPath, SnapshotDiffInfo diffReport)
 {
     Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot earlierSnapshot = diffReport
                                                                                .IsFromEarlier() ? diffReport.GetFrom() : diffReport.GetTo();
     Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot laterSnapshot = diffReport
                                                                              .IsFromEarlier() ? diffReport.GetTo() : diffReport.GetFrom();
     byte[][] relativePath = Sharpen.Collections.ToArray(parentPath, new byte[parentPath
                                                                              .Count][]);
     if (node.IsDirectory())
     {
         DirectoryWithSnapshotFeature.ChildrenDiff diff = new DirectoryWithSnapshotFeature.ChildrenDiff
                                                              ();
         INodeDirectory dir = node.AsDirectory();
         DirectoryWithSnapshotFeature sf = dir.GetDirectoryWithSnapshotFeature();
         if (sf != null)
         {
             bool change = sf.ComputeDiffBetweenSnapshots(earlierSnapshot, laterSnapshot, diff
                                                          , dir);
             if (change)
             {
                 diffReport.AddDirDiff(dir, relativePath, diff);
             }
         }
         ReadOnlyList <INode> children = dir.GetChildrenList(earlierSnapshot.GetId());
         foreach (INode child in children)
         {
             byte[] name      = child.GetLocalNameBytes();
             bool   toProcess = diff.SearchIndex(Diff.ListType.Deleted, name) < 0;
             if (!toProcess && child is INodeReference.WithName)
             {
                 byte[][] renameTargetPath = FindRenameTargetPath(snapshotRoot, (INodeReference.WithName
                                                                                 )child, laterSnapshot == null ? Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                                                  .CurrentStateId : laterSnapshot.GetId());
                 if (renameTargetPath != null)
                 {
                     toProcess = true;
                     diffReport.SetRenameTarget(child.GetId(), renameTargetPath);
                 }
             }
             if (toProcess)
             {
                 parentPath.AddItem(name);
                 ComputeDiffRecursively(snapshotRoot, child, parentPath, diffReport);
                 parentPath.Remove(parentPath.Count - 1);
             }
         }
     }
     else
     {
         if (node.IsFile() && node.AsFile().IsWithSnapshot())
         {
             INodeFile file   = node.AsFile();
             bool      change = file.GetFileWithSnapshotFeature().ChangedBetweenSnapshots(file, earlierSnapshot
                                                                                          , laterSnapshot);
             if (change)
             {
                 diffReport.AddFileDiff(file, relativePath);
             }
         }
     }
 }
        public virtual void TestBlockReceived()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024);
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeCount).Build();
                cluster.WaitActive();
                DistributedFileSystem hdfs       = cluster.GetFileSystem();
                FSNamesystem          fsn        = cluster.GetNamesystem();
                BlockManager          blkManager = fsn.GetBlockManager();
                string file       = "/tmp.txt";
                Path   filePath   = new Path(file);
                short  replFactor = 1;
                DFSTestUtil.CreateFile(hdfs, filePath, 1024L, replFactor, 0);
                // temporarily stop the heartbeat
                AList <DataNode> datanodes = cluster.GetDataNodes();
                for (int i = 0; i < DatanodeCount; i++)
                {
                    DataNodeTestUtils.SetHeartbeatsDisabledForTests(datanodes[i], true);
                }
                hdfs.SetReplication(filePath, (short)DatanodeCount);
                BlockManagerTestUtil.ComputeAllPendingWork(blkManager);
                NUnit.Framework.Assert.AreEqual(1, blkManager.pendingReplications.Size());
                INodeFile fileNode = fsn.GetFSDirectory().GetINode4Write(file).AsFile();
                Block[]   blocks   = fileNode.GetBlocks();
                NUnit.Framework.Assert.AreEqual(DatanodeCount - 1, blkManager.pendingReplications
                                                .GetNumReplicas(blocks[0]));
                LocatedBlock locatedBlock = hdfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                DatanodeInfo existingDn   = (locatedBlock.GetLocations())[0];
                int          reportDnNum  = 0;
                string       poolId       = cluster.GetNamesystem().GetBlockPoolId();
                // let two datanodes (other than the one that already has the data) to
                // report to NN
                for (int i_1 = 0; i_1 < DatanodeCount && reportDnNum < 2; i_1++)
                {
                    if (!datanodes[i_1].GetDatanodeId().Equals(existingDn))
                    {
                        DatanodeRegistration           dnR    = datanodes[i_1].GetDNRegistrationForBP(poolId);
                        StorageReceivedDeletedBlocks[] report = new StorageReceivedDeletedBlocks[] { new
                                                                                                     StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored", new ReceivedDeletedBlockInfo
                                                                                                                                  [] { new ReceivedDeletedBlockInfo(blocks[0], ReceivedDeletedBlockInfo.BlockStatus
                                                                                                                                                                    .ReceivedBlock, string.Empty) }) };
                        cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dnR, poolId, report);
                        reportDnNum++;
                    }
                }
                NUnit.Framework.Assert.AreEqual(DatanodeCount - 3, blkManager.pendingReplications
                                                .GetNumReplicas(blocks[0]));
                // let the same datanodes report again
                for (int i_2 = 0; i_2 < DatanodeCount && reportDnNum < 2; i_2++)
                {
                    if (!datanodes[i_2].GetDatanodeId().Equals(existingDn))
                    {
                        DatanodeRegistration           dnR    = datanodes[i_2].GetDNRegistrationForBP(poolId);
                        StorageReceivedDeletedBlocks[] report = new StorageReceivedDeletedBlocks[] { new
                                                                                                     StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored", new ReceivedDeletedBlockInfo
                                                                                                                                  [] { new ReceivedDeletedBlockInfo(blocks[0], ReceivedDeletedBlockInfo.BlockStatus
                                                                                                                                                                    .ReceivedBlock, string.Empty) }) };
                        cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dnR, poolId, report);
                        reportDnNum++;
                    }
                }
                NUnit.Framework.Assert.AreEqual(DatanodeCount - 3, blkManager.pendingReplications
                                                .GetNumReplicas(blocks[0]));
                // re-enable heartbeat for the datanode that has data
                for (int i_3 = 0; i_3 < DatanodeCount; i_3++)
                {
                    DataNodeTestUtils.SetHeartbeatsDisabledForTests(datanodes[i_3], false);
                    DataNodeTestUtils.TriggerHeartbeat(datanodes[i_3]);
                }
                Sharpen.Thread.Sleep(5000);
                NUnit.Framework.Assert.AreEqual(0, blkManager.pendingReplications.Size());
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Beispiel #22
0
 /// <exception cref="System.IO.IOException"/>
 public static void SaveFileDiffList(INodeFile file, DataOutput @out)
 {
     SaveINodeDiffs(file.GetDiffs(), @out, null);
 }
Beispiel #23
0
        /// <summary>
        /// Test that an append with no locations fails with an exception
        /// showing insufficient locations.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestAppendInsufficientLocations()
        {
            Configuration conf = new Configuration();

            // lower heartbeat interval for fast recognition of DN
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 3000);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build();
            DistributedFileSystem fileSystem = null;

            try
            {
                // create a file with replication 3
                fileSystem = cluster.GetFileSystem();
                Path f = new Path("/testAppend");
                FSDataOutputStream create = fileSystem.Create(f, (short)2);
                create.Write(Sharpen.Runtime.GetBytesForString("/testAppend"));
                create.Close();
                // Check for replications
                DFSTestUtil.WaitReplication(fileSystem, f, (short)2);
                // Shut down all DNs that have the last block location for the file
                LocatedBlocks lbs = fileSystem.dfs.GetNamenode().GetBlockLocations("/testAppend",
                                                                                   0, long.MaxValue);
                IList <DataNode> dnsOfCluster     = cluster.GetDataNodes();
                DatanodeInfo[]   dnsWithLocations = lbs.GetLastLocatedBlock().GetLocations();
                foreach (DataNode dn in dnsOfCluster)
                {
                    foreach (DatanodeInfo loc in dnsWithLocations)
                    {
                        if (dn.GetDatanodeId().Equals(loc))
                        {
                            dn.Shutdown();
                            DFSTestUtil.WaitForDatanodeDeath(dn);
                        }
                    }
                }
                // Wait till 0 replication is recognized
                DFSTestUtil.WaitReplication(fileSystem, f, (short)0);
                // Append to the file, at this state there are 3 live DNs but none of them
                // have the block.
                try
                {
                    fileSystem.Append(f);
                    NUnit.Framework.Assert.Fail("Append should fail because insufficient locations");
                }
                catch (IOException e)
                {
                    Log.Info("Expected exception: ", e);
                }
                FSDirectory dir   = cluster.GetNamesystem().GetFSDirectory();
                INodeFile   inode = INodeFile.ValueOf(dir.GetINode("/testAppend"), "/testAppend");
                NUnit.Framework.Assert.IsTrue("File should remain closed", !inode.IsUnderConstruction
                                                  ());
            }
            finally
            {
                if (null != fileSystem)
                {
                    fileSystem.Close();
                }
                cluster.Shutdown();
            }
        }
        /// <summary>Apply a CacheDirective to a file.</summary>
        /// <param name="directive">The CacheDirective to apply.</param>
        /// <param name="file">The file.</param>
        private void RescanFile(CacheDirective directive, INodeFile file)
        {
            BlockInfoContiguous[] blockInfos = file.GetBlocks();
            // Increment the "needed" statistics
            directive.AddFilesNeeded(1);
            // We don't cache UC blocks, don't add them to the total here
            long neededTotal = file.ComputeFileSizeNotIncludingLastUcBlock() * directive.GetReplication
                                   ();

            directive.AddBytesNeeded(neededTotal);
            // The pool's bytesNeeded is incremented as we scan. If the demand
            // thus far plus the demand of this file would exceed the pool's limit,
            // do not cache this file.
            CachePool pool = directive.GetPool();

            if (pool.GetBytesNeeded() > pool.GetLimit())
            {
                Log.Debug("Directive {}: not scanning file {} because " + "bytesNeeded for pool {} is {}, but the pool's limit is {}"
                          , directive.GetId(), file.GetFullPathName(), pool.GetPoolName(), pool.GetBytesNeeded
                              (), pool.GetLimit());
                return;
            }
            long cachedTotal = 0;

            foreach (BlockInfoContiguous blockInfo in blockInfos)
            {
                if (!blockInfo.GetBlockUCState().Equals(HdfsServerConstants.BlockUCState.Complete
                                                        ))
                {
                    // We don't try to cache blocks that are under construction.
                    Log.Trace("Directive {}: can't cache block {} because it is in state " + "{}, not COMPLETE."
                              , directive.GetId(), blockInfo, blockInfo.GetBlockUCState());
                    continue;
                }
                Block       block   = new Block(blockInfo.GetBlockId());
                CachedBlock ncblock = new CachedBlock(block.GetBlockId(), directive.GetReplication
                                                          (), mark);
                CachedBlock ocblock = cachedBlocks.Get(ncblock);
                if (ocblock == null)
                {
                    cachedBlocks.Put(ncblock);
                    ocblock = ncblock;
                }
                else
                {
                    // Update bytesUsed using the current replication levels.
                    // Assumptions: we assume that all the blocks are the same length
                    // on each datanode.  We can assume this because we're only caching
                    // blocks in state COMPLETE.
                    // Note that if two directives are caching the same block(s), they will
                    // both get them added to their bytesCached.
                    IList <DatanodeDescriptor> cachedOn = ocblock.GetDatanodes(DatanodeDescriptor.CachedBlocksList.Type
                                                                               .Cached);
                    long cachedByBlock = Math.Min(cachedOn.Count, directive.GetReplication()) * blockInfo
                                         .GetNumBytes();
                    cachedTotal += cachedByBlock;
                    if ((mark != ocblock.GetMark()) || (ocblock.GetReplication() < directive.GetReplication
                                                            ()))
                    {
                        //
                        // Overwrite the block's replication and mark in two cases:
                        //
                        // 1. If the mark on the CachedBlock is different from the mark for
                        // this scan, that means the block hasn't been updated during this
                        // scan, and we should overwrite whatever is there, since it is no
                        // longer valid.
                        //
                        // 2. If the replication in the CachedBlock is less than what the
                        // directive asks for, we want to increase the block's replication
                        // field to what the directive asks for.
                        //
                        ocblock.SetReplicationAndMark(directive.GetReplication(), mark);
                    }
                }
                Log.Trace("Directive {}: setting replication for block {} to {}", directive.GetId
                              (), blockInfo, ocblock.GetReplication());
            }
            // Increment the "cached" statistics
            directive.AddBytesCached(cachedTotal);
            if (cachedTotal == neededTotal)
            {
                directive.AddFilesCached(1);
            }
            Log.Debug("Directive {}: caching {}: {}/{} bytes", directive.GetId(), file.GetFullPathName
                          (), cachedTotal, neededTotal);
        }