/// <summary>
        /// Test snapshot during file appending, before the corresponding
        /// <see cref="Org.Apache.Hadoop.FS.FSDataOutputStream"/>
        /// instance closes.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotWhileAppending()
        {
            Path file = new Path(dir, "file");

            DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed);
            // 1. append without closing stream --> create snapshot
            HdfsDataOutputStream @out = AppendFileWithoutClosing(file, Blocksize);

            @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength));
            SnapshotTestHelper.CreateSnapshot(hdfs, dir, "s0");
            @out.Close();
            // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
            // deleted list, with size BLOCKSIZE*2
            INodeFile fileNode = (INodeFile)fsdir.GetINode(file.ToString());

            NUnit.Framework.Assert.AreEqual(Blocksize * 2, fileNode.ComputeFileSize());
            INodeDirectory dirNode = fsdir.GetINode(dir.ToString()).AsDirectory();

            DirectoryWithSnapshotFeature.DirectoryDiff last = dirNode.GetDiffs().GetLast();
            // 2. append without closing stream
            @out = AppendFileWithoutClosing(file, Blocksize);
            @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength));
            // re-check nodeInDeleted_S0
            dirNode = fsdir.GetINode(dir.ToString()).AsDirectory();
            NUnit.Framework.Assert.AreEqual(Blocksize * 2, fileNode.ComputeFileSize(last.GetSnapshotId
                                                                                        ()));
            // 3. take snapshot --> close stream
            hdfs.CreateSnapshot(dir, "s1");
            @out.Close();
            // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
            // have been stored in s1's deleted list
            fileNode = (INodeFile)fsdir.GetINode(file.ToString());
            dirNode  = fsdir.GetINode(dir.ToString()).AsDirectory();
            last     = dirNode.GetDiffs().GetLast();
            NUnit.Framework.Assert.IsTrue(fileNode.IsWithSnapshot());
            NUnit.Framework.Assert.AreEqual(Blocksize * 3, fileNode.ComputeFileSize(last.GetSnapshotId
                                                                                        ()));
            // 4. modify file --> append without closing stream --> take snapshot -->
            // close stream
            hdfs.SetReplication(file, (short)(Replication - 1));
            @out = AppendFileWithoutClosing(file, Blocksize);
            hdfs.CreateSnapshot(dir, "s2");
            @out.Close();
            // re-check the size of nodeInDeleted_S1
            NUnit.Framework.Assert.AreEqual(Blocksize * 3, fileNode.ComputeFileSize(last.GetSnapshotId
                                                                                        ()));
        }
Beispiel #2
0
        /// <summary>Test deleting a file with snapshots.</summary>
        /// <remarks>
        /// Test deleting a file with snapshots. Need to check the blocksMap to make
        /// sure the corresponding record is updated correctly.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestDeletionWithSnapshots()
        {
            Path file0 = new Path(sub1, "file0");
            Path file1 = new Path(sub1, "file1");
            Path sub2  = new Path(sub1, "sub2");
            Path file2 = new Path(sub2, "file2");
            Path file3 = new Path(sub1, "file3");
            Path file4 = new Path(sub1, "file4");
            Path file5 = new Path(sub1, "file5");

            // Create file under sub1
            DFSTestUtil.CreateFile(hdfs, file0, 4 * Blocksize, Replication, seed);
            DFSTestUtil.CreateFile(hdfs, file1, 2 * Blocksize, Replication, seed);
            DFSTestUtil.CreateFile(hdfs, file2, 3 * Blocksize, Replication, seed);
            {
                // Normal deletion
                INodeFile             f2     = AssertBlockCollection(file2.ToString(), 3, fsdir, blockmanager);
                BlockInfoContiguous[] blocks = f2.GetBlocks();
                hdfs.Delete(sub2, true);
                // The INode should have been removed from the blocksMap
                foreach (BlockInfoContiguous b in blocks)
                {
                    NUnit.Framework.Assert.IsNull(blockmanager.GetBlockCollection(b));
                }
            }
            // Create snapshots for sub1
            string[] snapshots = new string[] { "s0", "s1", "s2" };
            DFSTestUtil.CreateFile(hdfs, file3, 5 * Blocksize, Replication, seed);
            SnapshotTestHelper.CreateSnapshot(hdfs, sub1, snapshots[0]);
            DFSTestUtil.CreateFile(hdfs, file4, 1 * Blocksize, Replication, seed);
            SnapshotTestHelper.CreateSnapshot(hdfs, sub1, snapshots[1]);
            DFSTestUtil.CreateFile(hdfs, file5, 7 * Blocksize, Replication, seed);
            SnapshotTestHelper.CreateSnapshot(hdfs, sub1, snapshots[2]);
            {
                // set replication so that the inode should be replaced for snapshots
                INodeFile f1 = AssertBlockCollection(file1.ToString(), 2, fsdir, blockmanager);
                NUnit.Framework.Assert.AreSame(typeof(INodeFile), f1.GetType());
                hdfs.SetReplication(file1, (short)2);
                f1 = AssertBlockCollection(file1.ToString(), 2, fsdir, blockmanager);
                NUnit.Framework.Assert.IsTrue(f1.IsWithSnapshot());
                NUnit.Framework.Assert.IsFalse(f1.IsUnderConstruction());
            }
            // Check the block information for file0
            INodeFile f0 = AssertBlockCollection(file0.ToString(), 4, fsdir, blockmanager);

            BlockInfoContiguous[] blocks0 = f0.GetBlocks();
            // Also check the block information for snapshot of file0
            Path snapshotFile0 = SnapshotTestHelper.GetSnapshotPath(sub1, "s0", file0.GetName
                                                                        ());

            AssertBlockCollection(snapshotFile0.ToString(), 4, fsdir, blockmanager);
            // Delete file0
            hdfs.Delete(file0, true);
            // Make sure the blocks of file0 is still in blocksMap
            foreach (BlockInfoContiguous b_1 in blocks0)
            {
                NUnit.Framework.Assert.IsNotNull(blockmanager.GetBlockCollection(b_1));
            }
            AssertBlockCollection(snapshotFile0.ToString(), 4, fsdir, blockmanager);
            // Compare the INode in the blocksMap with INodes for snapshots
            string s1f0 = SnapshotTestHelper.GetSnapshotPath(sub1, "s1", file0.GetName()).ToString
                              ();

            AssertBlockCollection(s1f0, 4, fsdir, blockmanager);
            // Delete snapshot s1
            hdfs.DeleteSnapshot(sub1, "s1");
            // Make sure the first block of file0 is still in blocksMap
            foreach (BlockInfoContiguous b_2 in blocks0)
            {
                NUnit.Framework.Assert.IsNotNull(blockmanager.GetBlockCollection(b_2));
            }
            AssertBlockCollection(snapshotFile0.ToString(), 4, fsdir, blockmanager);
            try
            {
                INodeFile.ValueOf(fsdir.GetINode(s1f0), s1f0);
                NUnit.Framework.Assert.Fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot"
                                            );
            }
            catch (IOException e)
            {
                GenericTestUtils.AssertExceptionContains("File does not exist: " + s1f0, e);
            }
        }