Exemple #1
0
        /// <summary>Test FileStatus of snapshot file before/after rename</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotRename()
        {
            DFSTestUtil.CreateFile(hdfs, file1, Blocksize, Replication, seed);
            // Create snapshot for sub1
            Path snapshotRoot = SnapshotTestHelper.CreateSnapshot(hdfs, sub1, "s1");
            Path ssPath       = new Path(snapshotRoot, file1.GetName());

            NUnit.Framework.Assert.IsTrue(hdfs.Exists(ssPath));
            FileStatus statusBeforeRename = hdfs.GetFileStatus(ssPath);

            // Rename the snapshot
            hdfs.RenameSnapshot(sub1, "s1", "s2");
            // <sub1>/.snapshot/s1/file1 should no longer exist
            NUnit.Framework.Assert.IsFalse(hdfs.Exists(ssPath));
            snapshotRoot = SnapshotTestHelper.GetSnapshotRoot(sub1, "s2");
            ssPath       = new Path(snapshotRoot, file1.GetName());
            // Instead, <sub1>/.snapshot/s2/file1 should exist
            NUnit.Framework.Assert.IsTrue(hdfs.Exists(ssPath));
            FileStatus statusAfterRename = hdfs.GetFileStatus(ssPath);

            // FileStatus of the snapshot should not change except the path
            NUnit.Framework.Assert.IsFalse(statusBeforeRename.Equals(statusAfterRename));
            statusBeforeRename.SetPath(statusAfterRename.GetPath());
            NUnit.Framework.Assert.AreEqual(statusBeforeRename.ToString(), statusAfterRename.
                                            ToString());
        }
        public virtual void TestConcatInEditLog()
        {
            Path TestDir = new Path("/testConcatInEditLog");
            long FileLen = blockSize;

            // 1. Concat some files
            Path[] srcFiles = new Path[3];
            for (int i = 0; i < srcFiles.Length; i++)
            {
                Path path = new Path(TestDir, "src-" + i);
                DFSTestUtil.CreateFile(dfs, path, FileLen, ReplFactor, 1);
                srcFiles[i] = path;
            }
            Path targetFile = new Path(TestDir, "target");

            DFSTestUtil.CreateFile(dfs, targetFile, FileLen, ReplFactor, 1);
            dfs.Concat(targetFile, srcFiles);
            // 2. Verify the concat operation basically worked, and record
            // file status.
            NUnit.Framework.Assert.IsTrue(dfs.Exists(targetFile));
            FileStatus origStatus = dfs.GetFileStatus(targetFile);

            // 3. Restart NN to force replay from edit log
            cluster.RestartNameNode(true);
            // 4. Verify concat operation was replayed correctly and file status
            // did not change.
            NUnit.Framework.Assert.IsTrue(dfs.Exists(targetFile));
            NUnit.Framework.Assert.IsFalse(dfs.Exists(srcFiles[0]));
            FileStatus statusAfterRestart = dfs.GetFileStatus(targetFile);

            NUnit.Framework.Assert.AreEqual(origStatus.GetModificationTime(), statusAfterRestart
                                            .GetModificationTime());
        }
Exemple #3
0
        public virtual void HSyncEndBlock_00()
        {
            int           preferredBlockSize = 1024;
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, preferredBlockSize);
            MiniDFSCluster        cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            DistributedFileSystem fileSystem = cluster.GetFileSystem();
            FSDataOutputStream    stm        = null;

            try
            {
                Path path = new Path("/" + fName);
                stm = fileSystem.Create(path, true, 4096, (short)2, AppendTestUtil.BlockSize);
                System.Console.Out.WriteLine("Created file " + path.ToString());
                ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                           .EndBlock));
                long currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(0L, currentFileLength);
                LocatedBlocks blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(0, blocks.GetLocatedBlocks().Count);
                // write a block and call hsync(end_block) at the block boundary
                stm.Write(new byte[preferredBlockSize]);
                ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                           .EndBlock));
                currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(preferredBlockSize, currentFileLength);
                blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(1, blocks.GetLocatedBlocks().Count);
                // call hsync then call hsync(end_block) immediately
                stm.Write(new byte[preferredBlockSize / 2]);
                stm.Hsync();
                ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                           .EndBlock));
                currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(preferredBlockSize + preferredBlockSize / 2, currentFileLength
                                                );
                blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(2, blocks.GetLocatedBlocks().Count);
                stm.Write(new byte[preferredBlockSize / 4]);
                stm.Hsync();
                currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(preferredBlockSize + preferredBlockSize / 2 + preferredBlockSize
                                                / 4, currentFileLength);
                blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(3, blocks.GetLocatedBlocks().Count);
            }
            finally
            {
                IOUtils.Cleanup(null, stm, fileSystem);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemple #4
0
        public virtual void TestAppend()
        {
            Configuration  conf        = new HdfsConfiguration();
            short          Replication = (short)3;
            MiniDFSCluster cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                DistributedFileSystem fs = cluster.GetFileSystem();
                Path f = new Path(Dir, "testAppend");
                {
                    Log.Info("create an empty file " + f);
                    fs.Create(f, Replication).Close();
                    FileStatus status = fs.GetFileStatus(f);
                    NUnit.Framework.Assert.AreEqual(Replication, status.GetReplication());
                    NUnit.Framework.Assert.AreEqual(0L, status.GetLen());
                }
                byte[] bytes = new byte[1000];
                {
                    Log.Info("append " + bytes.Length + " bytes to " + f);
                    FSDataOutputStream @out = fs.Append(f);
                    @out.Write(bytes);
                    @out.Close();
                    FileStatus status = fs.GetFileStatus(f);
                    NUnit.Framework.Assert.AreEqual(Replication, status.GetReplication());
                    NUnit.Framework.Assert.AreEqual(bytes.Length, status.GetLen());
                }
                {
                    Log.Info("append another " + bytes.Length + " bytes to " + f);
                    try
                    {
                        FSDataOutputStream @out = fs.Append(f);
                        @out.Write(bytes);
                        @out.Close();
                        NUnit.Framework.Assert.Fail();
                    }
                    catch (IOException ioe)
                    {
                        Log.Info("This exception is expected", ioe);
                    }
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemple #5
0
        /// <summary>
        /// Test fsimage loading when 1) there is an empty file loaded from fsimage,
        /// and 2) there is later an append operation to be applied from edit log.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestLoadImageWithEmptyFile()
        {
            // create an empty file
            Path file = new Path(dir, "file");
            FSDataOutputStream @out = hdfs.Create(file);

            @out.Close();
            // save namespace
            hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            hdfs.SaveNamespace();
            hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            // append to the empty file
            @out = hdfs.Append(file);
            @out.Write(1);
            @out.Close();
            // restart cluster
            cluster.Shutdown();
            cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(Replication
                                                                                  ).Build();
            cluster.WaitActive();
            hdfs = cluster.GetFileSystem();
            FileStatus status = hdfs.GetFileStatus(file);

            NUnit.Framework.Assert.AreEqual(1, status.GetLen());
        }
Exemple #6
0
        /// <exception cref="System.IO.IOException"/>
        private void AssertPathEquals(Path p1, Path p2)
        {
            FileStatus p1Stat = fs.GetFileStatus(p1);
            FileStatus p2Stat = fs.GetFileStatus(p2);

            /*
             * Use accessTime and modificationTime as substitutes for INode to check
             * for resolution to the same underlying file.
             */
            NUnit.Framework.Assert.AreEqual("Access times not equal", p1Stat.GetAccessTime(),
                                            p2Stat.GetAccessTime());
            NUnit.Framework.Assert.AreEqual("Modification times not equal", p1Stat.GetModificationTime
                                                (), p2Stat.GetModificationTime());
            NUnit.Framework.Assert.AreEqual("pathname1 not equal", p1, Path.GetPathWithoutSchemeAndAuthority
                                                (p1Stat.GetPath()));
            NUnit.Framework.Assert.AreEqual("pathname1 not equal", p2, Path.GetPathWithoutSchemeAndAuthority
                                                (p2Stat.GetPath()));
        }
        /// <summary>TC11: Racing rename</summary>
        /// <exception cref="System.Exception"/>
        private void TestTC11(bool appendToNewBlock)
        {
            Path p = new Path("/TC11/foo" + (appendToNewBlock ? "0" : "1"));

            System.Console.Out.WriteLine("p=" + p);
            //a. Create file and write one block of data. Close file.
            int len1 = (int)BlockSize;
            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            //b. Reopen file in "append" mode. Append half block of data.
            FSDataOutputStream out_1 = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.
                                                                                  Append, CreateFlag.NewBlock), 4096, null) : fs.Append(p);
            int len2 = (int)BlockSize / 2;

            AppendTestUtil.Write(out_1, len1, len2);
            out_1.Hflush();
            //c. Rename file to file.new.
            Path pnew = new Path(p + ".new");

            NUnit.Framework.Assert.IsTrue(fs.Rename(p, pnew));
            //d. Close file handle that was opened in (b).
            out_1.Close();
            //check block sizes
            long          len           = fs.GetFileStatus(pnew).GetLen();
            LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(pnew.ToString
                                                                                     (), 0L, len);
            int numblock = locatedblocks.LocatedBlockCount();

            for (int i = 0; i < numblock; i++)
            {
                LocatedBlock  lb   = locatedblocks.Get(i);
                ExtendedBlock blk  = lb.GetBlock();
                long          size = lb.GetBlockSize();
                if (i < numblock - 1)
                {
                    NUnit.Framework.Assert.AreEqual(BlockSize, size);
                }
                foreach (DatanodeInfo datanodeinfo in lb.GetLocations())
                {
                    DataNode dn       = cluster.GetDataNode(datanodeinfo.GetIpcPort());
                    Block    metainfo = DataNodeTestUtils.GetFSDataset(dn).GetStoredBlock(blk.GetBlockPoolId
                                                                                              (), blk.GetBlockId());
                    NUnit.Framework.Assert.AreEqual(size, metainfo.GetNumBytes());
                }
            }
        }
        public virtual void TestAppend2AfterSoftLimit()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
            //Set small soft-limit for lease
            long           softLimit = 1L;
            long           hardLimit = 9999999L;
            MiniDFSCluster cluster   = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.SetLeasePeriod(softLimit, hardLimit);
            cluster.WaitActive();
            DistributedFileSystem fs  = cluster.GetFileSystem();
            DistributedFileSystem fs2 = new DistributedFileSystem();

            fs2.Initialize(fs.GetUri(), conf);
            Path testPath = new Path("/testAppendAfterSoftLimit");

            byte[] fileContents = AppendTestUtil.InitBuffer(32);
            // create a new file without closing
            FSDataOutputStream @out = fs.Create(testPath);

            @out.Write(fileContents);
            //Wait for > soft-limit
            Sharpen.Thread.Sleep(250);
            try
            {
                FSDataOutputStream appendStream2 = fs2.Append(testPath, EnumSet.Of(CreateFlag.Append
                                                                                   , CreateFlag.NewBlock), 4096, null);
                appendStream2.Write(fileContents);
                appendStream2.Close();
                NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen
                                                    ());
                // make sure we now have 1 block since the first writer was revoked
                LocatedBlocks blks = fs.GetClient().GetLocatedBlocks(testPath.ToString(), 0L);
                NUnit.Framework.Assert.AreEqual(1, blks.GetLocatedBlocks().Count);
                foreach (LocatedBlock blk in blks.GetLocatedBlocks())
                {
                    NUnit.Framework.Assert.AreEqual(fileContents.Length, blk.GetBlockSize());
                }
            }
            finally
            {
                fs.Close();
                fs2.Close();
                cluster.Shutdown();
            }
        }
Exemple #9
0
        /// <summary>Check the replication of a given file.</summary>
        /// <remarks>
        /// Check the replication of a given file. We test both
        /// <see cref="Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeFile.GetFileReplication()"
        ///     />
        /// and
        /// <see cref="Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeFile.GetBlockReplication()
        ///     "/>
        /// .
        /// </remarks>
        /// <param name="file">The given file</param>
        /// <param name="replication">The expected replication number</param>
        /// <param name="blockReplication">The expected replication number for the block</param>
        /// <exception cref="System.Exception"/>
        private void CheckFileReplication(Path file, short replication, short blockReplication
                                          )
        {
            // Get FileStatus of file1, and identify the replication number of file1.
            // Note that the replication number in FileStatus was derived from
            // INodeFile#getFileReplication().
            short fileReplication = hdfs.GetFileStatus(file1).GetReplication();

            NUnit.Framework.Assert.AreEqual(replication, fileReplication);
            // Check the correctness of getBlockReplication()
            INode inode = fsdir.GetINode(file1.ToString());

            NUnit.Framework.Assert.IsTrue(inode is INodeFile);
            NUnit.Framework.Assert.AreEqual(blockReplication, ((INodeFile)inode).GetBlockReplication
                                                ());
        }
Exemple #10
0
            /// <exception cref="System.Exception"/>
            public object Run()
            {
                DistributedFileSystem fs = this._enclosing.cluster.GetFileSystem();

                try
                {
                    fs.GetFileStatus(ezRawEncFile);
                    NUnit.Framework.Assert.Fail("access to /.reserved/raw is superuser-only operation"
                                                );
                }
                catch (AccessControlException e)
                {
                    GenericTestUtils.AssertExceptionContains("Superuser privilege is required", e);
                }
                return(null);
            }
Exemple #11
0
        /// <summary>Test NN crash and client crash/stuck immediately after block allocation</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestOpenFileWhenNNAndClientCrashAfterAddBlock()
        {
            cluster.GetConfiguration(0).Set(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey,
                                            "1.0f");
            string testData = "testData";

            // to make sure we write the full block before creating dummy block at NN.
            cluster.GetConfiguration(0).SetInt("io.bytes.per.checksum", testData.Length);
            cluster.RestartNameNode(0);
            try
            {
                cluster.WaitActive();
                cluster.TransitionToActive(0);
                cluster.TransitionToStandby(1);
                DistributedFileSystem dfs     = cluster.GetFileSystem(0);
                string             pathString = "/tmp1.txt";
                Path               filePath   = new Path(pathString);
                FSDataOutputStream create     = dfs.Create(filePath, FsPermission.GetDefault(), true,
                                                           1024, (short)3, testData.Length, null);
                create.Write(Sharpen.Runtime.GetBytesForString(testData));
                create.Hflush();
                long       fileId     = ((DFSOutputStream)create.GetWrappedStream()).GetFileId();
                FileStatus fileStatus = dfs.GetFileStatus(filePath);
                DFSClient  client     = DFSClientAdapter.GetClient(dfs);
                // add one dummy block at NN, but not write to DataNode
                ExtendedBlock previousBlock = DFSClientAdapter.GetPreviousBlock(client, fileId);
                DFSClientAdapter.GetNamenode(client).AddBlock(pathString, client.GetClientName(),
                                                              new ExtendedBlock(previousBlock), new DatanodeInfo[0], DFSClientAdapter.GetFileId
                                                                  ((DFSOutputStream)create.GetWrappedStream()), null);
                cluster.RestartNameNode(0, true);
                cluster.RestartDataNode(0);
                cluster.TransitionToActive(0);
                // let the block reports be processed.
                Sharpen.Thread.Sleep(2000);
                FSDataInputStream @is = dfs.Open(filePath);
                @is.Close();
                dfs.RecoverLease(filePath);
                // initiate recovery
                NUnit.Framework.Assert.IsTrue("Recovery also should be success", dfs.RecoverLease
                                                  (filePath));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemple #12
0
        /// <summary>Ensure mtime and atime can be loaded from fsimage.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestLoadMtimeAtime()
        {
            Configuration  conf    = new Configuration();
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                DistributedFileSystem hdfs = cluster.GetFileSystem();
                string userDir             = hdfs.GetHomeDirectory().ToUri().GetPath().ToString();
                Path   file = new Path(userDir, "file");
                Path   dir  = new Path(userDir, "/dir");
                Path   link = new Path(userDir, "/link");
                hdfs.CreateNewFile(file);
                hdfs.Mkdirs(dir);
                hdfs.CreateSymlink(file, link, false);
                long mtimeFile = hdfs.GetFileStatus(file).GetModificationTime();
                long atimeFile = hdfs.GetFileStatus(file).GetAccessTime();
                long mtimeDir  = hdfs.GetFileStatus(dir).GetModificationTime();
                long mtimeLink = hdfs.GetFileLinkStatus(link).GetModificationTime();
                long atimeLink = hdfs.GetFileLinkStatus(link).GetAccessTime();
                // save namespace and restart cluster
                hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
                hdfs.SaveNamespace();
                hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
                cluster.Shutdown();
                cluster = new MiniDFSCluster.Builder(conf).Format(false).NumDataNodes(1).Build();
                cluster.WaitActive();
                hdfs = cluster.GetFileSystem();
                NUnit.Framework.Assert.AreEqual(mtimeFile, hdfs.GetFileStatus(file).GetModificationTime
                                                    ());
                NUnit.Framework.Assert.AreEqual(atimeFile, hdfs.GetFileStatus(file).GetAccessTime
                                                    ());
                NUnit.Framework.Assert.AreEqual(mtimeDir, hdfs.GetFileStatus(dir).GetModificationTime
                                                    ());
                NUnit.Framework.Assert.AreEqual(mtimeLink, hdfs.GetFileLinkStatus(link).GetModificationTime
                                                    ());
                NUnit.Framework.Assert.AreEqual(atimeLink, hdfs.GetFileLinkStatus(link).GetAccessTime
                                                    ());
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemple #13
0
        /// <exception cref="System.Exception"/>
        private void VerifyFilesUnreadablebyHDFS(MiniDFSCluster cluster, Path root)
        {
            DistributedFileSystem fs    = cluster.GetFileSystem();
            Queue <Path>          paths = new List <Path>();

            paths.AddItem(root);
            while (!paths.IsEmpty())
            {
                Path       p    = paths.Poll();
                FileStatus stat = fs.GetFileStatus(p);
                if (!stat.IsDirectory())
                {
                    try
                    {
                        Log.Warn("\n\n ##Testing path [" + p + "]\n\n");
                        fs.Open(p);
                        NUnit.Framework.Assert.Fail("Super user should not be able to read [" + UserGroupInformation
                                                    .GetCurrentUser() + "] [" + p.GetName() + "]");
                    }
                    catch (AccessControlException e)
                    {
                        NUnit.Framework.Assert.IsTrue(e.Message.Contains("superuser is not allowed to perform this operation"
                                                                         ));
                    }
                    catch (Exception)
                    {
                        NUnit.Framework.Assert.Fail("Should get an AccessControlException here");
                    }
                }
                if (stat.IsDirectory())
                {
                    FileStatus[] ls = fs.ListStatus(p);
                    foreach (FileStatus f in ls)
                    {
                        paths.AddItem(f.GetPath());
                    }
                }
            }
        }
Exemple #14
0
        public virtual void TestBestEffort()
        {
            Configuration conf = new HdfsConfiguration();

            //always replace a datanode but do not throw exception
            ReplaceDatanodeOnFailure.Write(ReplaceDatanodeOnFailure.Policy.Always, true, conf
                                           );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                DistributedFileSystem fs = cluster.GetFileSystem();
                Path   f     = new Path(Dir, "testIgnoreReplaceFailure");
                byte[] bytes = new byte[1000];
                {
                    Log.Info("write " + bytes.Length + " bytes to " + f);
                    FSDataOutputStream @out = fs.Create(f, Replication);
                    @out.Write(bytes);
                    @out.Close();
                    FileStatus status = fs.GetFileStatus(f);
                    NUnit.Framework.Assert.AreEqual(Replication, status.GetReplication());
                    NUnit.Framework.Assert.AreEqual(bytes.Length, status.GetLen());
                }
                {
                    Log.Info("append another " + bytes.Length + " bytes to " + f);
                    FSDataOutputStream @out = fs.Append(f);
                    @out.Write(bytes);
                    @out.Close();
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemple #15
0
        public virtual void HSyncUpdateLength_00()
        {
            Configuration         conf       = new HdfsConfiguration();
            MiniDFSCluster        cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            DistributedFileSystem fileSystem = cluster.GetFileSystem();

            try
            {
                Path path = new Path(fName);
                FSDataOutputStream stm = fileSystem.Create(path, true, 4096, (short)2, AppendTestUtil
                                                           .BlockSize);
                System.Console.Out.WriteLine("Created file " + path.ToString());
                ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                           .UpdateLength));
                long currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(0L, currentFileLength);
                stm.Close();
            }
            finally
            {
                fileSystem.Close();
                cluster.Shutdown();
            }
        }
 // create a bunch of files. Write to them and then verify.
 public override void Run()
 {
     System.Console.Out.WriteLine("Workload " + this.id + " starting... ");
     for (int i = 0; i < this._enclosing.numAppendsPerThread; i++)
     {
         // pick a file at random and remove it from pool
         Path testfile;
         lock (this._enclosing.testFiles)
         {
             if (this._enclosing.testFiles.Count == 0)
             {
                 System.Console.Out.WriteLine("Completed write to almost all files.");
                 return;
             }
             int index = AppendTestUtil.NextInt(this._enclosing.testFiles.Count);
             testfile = this._enclosing.testFiles.Remove(index);
         }
         long len          = 0;
         int  sizeToAppend = 0;
         try
         {
             DistributedFileSystem fs = this.cluster.GetFileSystem();
             // add a random number of bytes to file
             len = fs.GetFileStatus(testfile).GetLen();
             // if file is already full, then pick another file
             if (len >= AppendTestUtil.FileSize)
             {
                 System.Console.Out.WriteLine("File " + testfile + " is full.");
                 continue;
             }
             // do small size appends so that we can trigger multiple
             // appends to the same file.
             //
             int left = (int)(AppendTestUtil.FileSize - len) / 3;
             if (left <= 0)
             {
                 left = 1;
             }
             sizeToAppend = AppendTestUtil.NextInt(left);
             System.Console.Out.WriteLine("Workload thread " + this.id + " appending " + sizeToAppend
                                          + " bytes " + " to file " + testfile + " of size " + len);
             FSDataOutputStream stm = this.appendToNewBlock ? fs.Append(testfile, EnumSet.Of(CreateFlag
                                                                                             .Append, CreateFlag.NewBlock), 4096, null) : fs.Append(testfile);
             stm.Write(this._enclosing.fileContents, (int)len, sizeToAppend);
             stm.Close();
             // wait for the file size to be reflected in the namenode metadata
             while (fs.GetFileStatus(testfile).GetLen() != (len + sizeToAppend))
             {
                 try
                 {
                     System.Console.Out.WriteLine("Workload thread " + this.id + " file " + testfile +
                                                  " size " + fs.GetFileStatus(testfile).GetLen() + " expected size " + (len + sizeToAppend
                                                                                                                        ) + " waiting for namenode metadata update.");
                     Sharpen.Thread.Sleep(5000);
                 }
                 catch (Exception)
                 {
                 }
             }
             NUnit.Framework.Assert.IsTrue("File " + testfile + " size is " + fs.GetFileStatus
                                               (testfile).GetLen() + " but expected " + (len + sizeToAppend), fs.GetFileStatus(
                                               testfile).GetLen() == (len + sizeToAppend));
             AppendTestUtil.CheckFullFile(fs, testfile, (int)(len + sizeToAppend), this._enclosing
                                          .fileContents, "Read 2");
         }
         catch (Exception e)
         {
             TestFileAppend2.globalStatus = false;
             if (e.ToString() != null)
             {
                 System.Console.Out.WriteLine("Workload exception " + this.id + " testfile " + testfile
                                              + " " + e);
                 Sharpen.Runtime.PrintStackTrace(e);
             }
             NUnit.Framework.Assert.IsTrue("Workload exception " + this.id + " testfile " + testfile
                                           + " expected size " + (len + sizeToAppend), false);
         }
         // Add testfile back to the pool of files.
         lock (this._enclosing.testFiles)
         {
             this._enclosing.testFiles.AddItem(testfile);
         }
     }
 }
        public static void CreateOriginalFSImage()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new Configuration();
                conf.SetLong(DFSConfigKeys.DfsNamenodeDelegationTokenMaxLifetimeKey, 10000);
                conf.SetLong(DFSConfigKeys.DfsNamenodeDelegationTokenRenewIntervalKey, 5000);
                conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true);
                conf.Set(CommonConfigurationKeysPublic.HadoopSecurityAuthToLocal, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//"
                         + "DEFAULT");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                DistributedFileSystem hdfs = cluster.GetFileSystem();
                // Create a reasonable namespace
                for (int i = 0; i < NumDirs; i++)
                {
                    Path dir = new Path("/dir" + i);
                    hdfs.Mkdirs(dir);
                    writtenFiles[dir.ToString()] = PathToFileEntry(hdfs, dir.ToString());
                    for (int j = 0; j < FilesPerDir; j++)
                    {
                        Path file            = new Path(dir, "file" + j);
                        FSDataOutputStream o = hdfs.Create(file);
                        o.Write(23);
                        o.Close();
                        writtenFiles[file.ToString()] = PathToFileEntry(hdfs, file.ToString());
                    }
                }
                // Create an empty directory
                Path emptydir = new Path("/emptydir");
                hdfs.Mkdirs(emptydir);
                writtenFiles[emptydir.ToString()] = hdfs.GetFileStatus(emptydir);
                //Create a directory whose name should be escaped in XML
                Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
                hdfs.Mkdirs(invalidXMLDir);
                // Get delegation tokens so we log the delegation token op
                Org.Apache.Hadoop.Security.Token.Token <object>[] delegationTokens = hdfs.AddDelegationTokens
                                                                                         (TestRenewer, null);
                foreach (Org.Apache.Hadoop.Security.Token.Token <object> t in delegationTokens)
                {
                    Log.Debug("got token " + t);
                }
                Path snapshot = new Path("/snapshot");
                hdfs.Mkdirs(snapshot);
                hdfs.AllowSnapshot(snapshot);
                hdfs.Mkdirs(new Path("/snapshot/1"));
                hdfs.Delete(snapshot, true);
                // Set XAttrs so the fsimage contains XAttr ops
                Path xattr = new Path("/xattr");
                hdfs.Mkdirs(xattr);
                hdfs.SetXAttr(xattr, "user.a1", new byte[] { unchecked ((int)(0x31)), unchecked ((int
                                                                                                  )(0x32)), unchecked ((int)(0x33)) });
                hdfs.SetXAttr(xattr, "user.a2", new byte[] { unchecked ((int)(0x37)), unchecked ((int
                                                                                                  )(0x38)), unchecked ((int)(0x39)) });
                // OIV should be able to handle empty value XAttrs
                hdfs.SetXAttr(xattr, "user.a3", null);
                writtenFiles[xattr.ToString()] = hdfs.GetFileStatus(xattr);
                // Write results to the fsimage file
                hdfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false);
                hdfs.SaveNamespace();
                // Determine location of fsimage file
                originalFsimage = FSImageTestUtil.FindLatestImageFile(FSImageTestUtil.GetFSImage(
                                                                          cluster.GetNameNode()).GetStorage().GetStorageDir(0));
                if (originalFsimage == null)
                {
                    throw new RuntimeException("Didn't generate or can't find fsimage");
                }
                Log.Debug("original FS image file is " + originalFsimage);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void TestGetBlockLocations()
        {
            Path root = new Path("/");
            Path file = new Path("/file");

            DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed);
            // take a snapshot on root
            SnapshotTestHelper.CreateSnapshot(hdfs, root, "s1");
            Path fileInSnapshot = SnapshotTestHelper.GetSnapshotPath(root, "s1", file.GetName
                                                                         ());
            FileStatus status = hdfs.GetFileStatus(fileInSnapshot);

            // make sure we record the size for the file
            NUnit.Framework.Assert.AreEqual(Blocksize, status.GetLen());
            // append data to file
            DFSTestUtil.AppendFile(hdfs, file, Blocksize - 1);
            status = hdfs.GetFileStatus(fileInSnapshot);
            // the size of snapshot file should still be BLOCKSIZE
            NUnit.Framework.Assert.AreEqual(Blocksize, status.GetLen());
            // the size of the file should be (2 * BLOCKSIZE - 1)
            status = hdfs.GetFileStatus(file);
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 - 1, status.GetLen());
            // call DFSClient#callGetBlockLocations for the file in snapshot
            LocatedBlocks blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc
                                                                              (), fileInSnapshot.ToString(), 0, long.MaxValue);
            IList <LocatedBlock> blockList = blocks.GetLocatedBlocks();

            // should be only one block
            NUnit.Framework.Assert.AreEqual(Blocksize, blocks.GetFileLength());
            NUnit.Framework.Assert.AreEqual(1, blockList.Count);
            // check the last block
            LocatedBlock lastBlock = blocks.GetLastLocatedBlock();

            NUnit.Framework.Assert.AreEqual(0, lastBlock.GetStartOffset());
            NUnit.Framework.Assert.AreEqual(Blocksize, lastBlock.GetBlockSize());
            // take another snapshot
            SnapshotTestHelper.CreateSnapshot(hdfs, root, "s2");
            Path fileInSnapshot2 = SnapshotTestHelper.GetSnapshotPath(root, "s2", file.GetName
                                                                          ());
            // append data to file without closing
            HdfsDataOutputStream @out = AppendFileWithoutClosing(file, Blocksize);

            @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength));
            status = hdfs.GetFileStatus(fileInSnapshot2);
            // the size of snapshot file should be BLOCKSIZE*2-1
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 - 1, status.GetLen());
            // the size of the file should be (3 * BLOCKSIZE - 1)
            status = hdfs.GetFileStatus(file);
            NUnit.Framework.Assert.AreEqual(Blocksize * 3 - 1, status.GetLen());
            blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc(), fileInSnapshot2
                                                            .ToString(), 0, long.MaxValue);
            NUnit.Framework.Assert.IsFalse(blocks.IsUnderConstruction());
            NUnit.Framework.Assert.IsTrue(blocks.IsLastBlockComplete());
            blockList = blocks.GetLocatedBlocks();
            // should be 2 blocks
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 - 1, blocks.GetFileLength());
            NUnit.Framework.Assert.AreEqual(2, blockList.Count);
            // check the last block
            lastBlock = blocks.GetLastLocatedBlock();
            NUnit.Framework.Assert.AreEqual(Blocksize, lastBlock.GetStartOffset());
            NUnit.Framework.Assert.AreEqual(Blocksize, lastBlock.GetBlockSize());
            blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc(), fileInSnapshot2
                                                            .ToString(), Blocksize, 0);
            blockList = blocks.GetLocatedBlocks();
            NUnit.Framework.Assert.AreEqual(1, blockList.Count);
            // check blocks for file being written
            blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc(), file.ToString
                                                                (), 0, long.MaxValue);
            blockList = blocks.GetLocatedBlocks();
            NUnit.Framework.Assert.AreEqual(3, blockList.Count);
            NUnit.Framework.Assert.IsTrue(blocks.IsUnderConstruction());
            NUnit.Framework.Assert.IsFalse(blocks.IsLastBlockComplete());
            lastBlock = blocks.GetLastLocatedBlock();
            NUnit.Framework.Assert.AreEqual(Blocksize * 2, lastBlock.GetStartOffset());
            NUnit.Framework.Assert.AreEqual(Blocksize - 1, lastBlock.GetBlockSize());
            @out.Close();
        }
Exemple #19
0
        /// <summary>
        /// The method starts new cluster with defined Configuration; creates a file
        /// with specified block_size and writes 10 equal sections in it; it also calls
        /// hflush/hsync after each write and throws an IOException in case of an error.
        /// </summary>
        /// <param name="conf">cluster configuration</param>
        /// <param name="fileName">of the file to be created and processed as required</param>
        /// <param name="block_size">value to be used for the file's creation</param>
        /// <param name="replicas">is the number of replicas</param>
        /// <param name="isSync">hsync or hflush</param>
        /// <param name="syncFlags">specify the semantic of the sync/flush</param>
        /// <exception cref="System.IO.IOException">in case of any errors</exception>
        public static void DoTheJob(Configuration conf, string fileName, long block_size,
                                    short replicas, bool isSync, EnumSet <HdfsDataOutputStream.SyncFlag> syncFlags)
        {
            byte[] fileContent;
            int    Sections = 10;

            fileContent = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(replicas).
                                     Build();
            // Make sure we work with DFS in order to utilize all its functionality
            DistributedFileSystem fileSystem = cluster.GetFileSystem();
            FSDataInputStream     @is;

            try
            {
                Path   path     = new Path(fileName);
                string pathName = new Path(fileSystem.GetWorkingDirectory(), path).ToUri().GetPath
                                      ();
                FSDataOutputStream stm = fileSystem.Create(path, false, 4096, replicas, block_size
                                                           );
                System.Console.Out.WriteLine("Created file " + fileName);
                int tenth    = AppendTestUtil.FileSize / Sections;
                int rounding = AppendTestUtil.FileSize - tenth * Sections;
                for (int i = 0; i < Sections; i++)
                {
                    System.Console.Out.WriteLine("Writing " + (tenth * i) + " to " + (tenth * (i + 1)
                                                                                      ) + " section to file " + fileName);
                    // write to the file
                    stm.Write(fileContent, tenth * i, tenth);
                    // Wait while hflush/hsync pushes all packets through built pipeline
                    if (isSync)
                    {
                        ((DFSOutputStream)stm.GetWrappedStream()).Hsync(syncFlags);
                    }
                    else
                    {
                        ((DFSOutputStream)stm.GetWrappedStream()).Hflush();
                    }
                    // Check file length if updatelength is required
                    if (isSync && syncFlags.Contains(HdfsDataOutputStream.SyncFlag.UpdateLength))
                    {
                        long currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                        NUnit.Framework.Assert.AreEqual("File size doesn't match for hsync/hflush with updating the length"
                                                        , tenth * (i + 1), currentFileLength);
                    }
                    else
                    {
                        if (isSync && syncFlags.Contains(HdfsDataOutputStream.SyncFlag.EndBlock))
                        {
                            LocatedBlocks blocks = fileSystem.dfs.GetLocatedBlocks(pathName, 0);
                            NUnit.Framework.Assert.AreEqual(i + 1, blocks.GetLocatedBlocks().Count);
                        }
                    }
                    byte[] toRead   = new byte[tenth];
                    byte[] expected = new byte[tenth];
                    System.Array.Copy(fileContent, tenth * i, expected, 0, tenth);
                    // Open the same file for read. Need to create new reader after every write operation(!)
                    @is = fileSystem.Open(path);
                    @is.Seek(tenth * i);
                    int readBytes = @is.Read(toRead, 0, tenth);
                    System.Console.Out.WriteLine("Has read " + readBytes);
                    NUnit.Framework.Assert.IsTrue("Should've get more bytes", (readBytes > 0) && (readBytes
                                                                                                  <= tenth));
                    @is.Close();
                    CheckData(toRead, 0, readBytes, expected, "Partial verification");
                }
                System.Console.Out.WriteLine("Writing " + (tenth * Sections) + " to " + (tenth *
                                                                                         Sections + rounding) + " section to file " + fileName);
                stm.Write(fileContent, tenth * Sections, rounding);
                stm.Close();
                NUnit.Framework.Assert.AreEqual("File size doesn't match ", AppendTestUtil.FileSize
                                                , fileSystem.GetFileStatus(path).GetLen());
                AppendTestUtil.CheckFullFile(fileSystem, path, fileContent.Length, fileContent, "hflush()"
                                             );
            }
            finally
            {
                fileSystem.Close();
                cluster.Shutdown();
            }
        }
        public virtual void TestSoftLeaseRecovery()
        {
            IDictionary <string, string[]> u2g_map = new Dictionary <string, string[]>(1);

            u2g_map[fakeUsername] = new string[] { fakeGroup };
            DFSTestUtil.UpdateConfWithFakeGroupMapping(conf, u2g_map);
            // Reset default lease periods
            cluster.SetLeasePeriod(HdfsConstants.LeaseSoftlimitPeriod, HdfsConstants.LeaseHardlimitPeriod
                                   );
            //create a file
            // create a random file name
            string filestr = "/foo" + AppendTestUtil.NextInt();

            AppendTestUtil.Log.Info("filestr=" + filestr);
            Path filepath          = new Path(filestr);
            FSDataOutputStream stm = dfs.Create(filepath, true, BufSize, ReplicationNum, BlockSize
                                                );

            NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr));
            // write random number of bytes into it.
            int size = AppendTestUtil.NextInt(FileSize);

            AppendTestUtil.Log.Info("size=" + size);
            stm.Write(buffer, 0, size);
            // hflush file
            AppendTestUtil.Log.Info("hflush");
            stm.Hflush();
            AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()");
            dfs.dfs.GetLeaseRenewer().InterruptAndJoin();
            // set the soft limit to be 1 second so that the
            // namenode triggers lease recovery on next attempt to write-for-open.
            cluster.SetLeasePeriod(ShortLeasePeriod, LongLeasePeriod);
            {
                // try to re-open the file before closing the previous handle. This
                // should fail but will trigger lease recovery.
                UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(fakeUsername
                                                                                     , new string[] { fakeGroup });
                FileSystem dfs2 = DFSTestUtil.GetFileSystemAs(ugi, conf);
                bool       done = false;
                for (int i = 0; i < 10 && !done; i++)
                {
                    AppendTestUtil.Log.Info("i=" + i);
                    try
                    {
                        dfs2.Create(filepath, false, BufSize, ReplicationNum, BlockSize);
                        NUnit.Framework.Assert.Fail("Creation of an existing file should never succeed.");
                    }
                    catch (FileAlreadyExistsException)
                    {
                        done = true;
                    }
                    catch (AlreadyBeingCreatedException ex)
                    {
                        AppendTestUtil.Log.Info("GOOD! got " + ex.Message);
                    }
                    catch (IOException ioe)
                    {
                        AppendTestUtil.Log.Warn("UNEXPECTED IOException", ioe);
                    }
                    if (!done)
                    {
                        AppendTestUtil.Log.Info("sleep " + 5000 + "ms");
                        try
                        {
                            Sharpen.Thread.Sleep(5000);
                        }
                        catch (Exception)
                        {
                        }
                    }
                }
                NUnit.Framework.Assert.IsTrue(done);
            }
            AppendTestUtil.Log.Info("Lease for file " + filepath + " is recovered. " + "Validating its contents now..."
                                    );
            // verify that file-size matches
            long fileSize = dfs.GetFileStatus(filepath).GetLen();

            NUnit.Framework.Assert.IsTrue("File should be " + size + " bytes, but is actually "
                                          + " found to be " + fileSize + " bytes", fileSize == size);
            // verify data
            AppendTestUtil.Log.Info("File size is good. " + "Now validating data and sizes from datanodes..."
                                    );
            AppendTestUtil.CheckFullFile(dfs, filepath, size, buffer, filestr);
        }
Exemple #21
0
        public virtual void TestReplaceDatanodeOnFailure()
        {
            Configuration conf = new HdfsConfiguration();

            //always replace a datanode
            ReplaceDatanodeOnFailure.Write(ReplaceDatanodeOnFailure.Policy.Always, true, conf
                                           );
            string[] racks = new string[Replication];
            Arrays.Fill(racks, Rack0);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Racks(racks).NumDataNodes
                                         (Replication).Build();

            try
            {
                DistributedFileSystem fs = cluster.GetFileSystem();
                Path dir = new Path(Dir);
                TestReplaceDatanodeOnFailure.SlowWriter[] slowwriters = new TestReplaceDatanodeOnFailure.SlowWriter
                                                                        [10];
                for (int i = 1; i <= slowwriters.Length; i++)
                {
                    //create slow writers in different speed
                    slowwriters[i - 1] = new TestReplaceDatanodeOnFailure.SlowWriter(fs, new Path(dir
                                                                                                  , "file" + i), i * 200L);
                }
                foreach (TestReplaceDatanodeOnFailure.SlowWriter s in slowwriters)
                {
                    s.Start();
                }
                // Let slow writers write something.
                // Some of them are too slow and will be not yet started.
                SleepSeconds(1);
                //start new datanodes
                cluster.StartDataNodes(conf, 2, true, null, new string[] { Rack1, Rack1 });
                //stop an old datanode
                cluster.StopDataNode(AppendTestUtil.NextInt(Replication));
                //Let the slow writer writes a few more seconds
                //Everyone should have written something.
                SleepSeconds(5);
                //check replication and interrupt.
                foreach (TestReplaceDatanodeOnFailure.SlowWriter s_1 in slowwriters)
                {
                    s_1.CheckReplication();
                    s_1.InterruptRunning();
                }
                //close files
                foreach (TestReplaceDatanodeOnFailure.SlowWriter s_2 in slowwriters)
                {
                    s_2.JoinAndClose();
                }
                //Verify the file
                Log.Info("Verify the file");
                for (int i_1 = 0; i_1 < slowwriters.Length; i_1++)
                {
                    Log.Info(slowwriters[i_1].filepath + ": length=" + fs.GetFileStatus(slowwriters[i_1
                                                                                        ].filepath).GetLen());
                    FSDataInputStream @in = null;
                    try
                    {
                        @in = fs.Open(slowwriters[i_1].filepath);
                        for (int j = 0; (x = @in.Read()) != -1; j++)
                        {
                            NUnit.Framework.Assert.AreEqual(j, x);
                        }
                    }
                    finally
                    {
                        IOUtils.CloseStream(@in);
                    }
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemple #22
0
        /// <summary>
        /// Test that we cannot read a file beyond its snapshot length
        /// when accessing it via a snapshot path.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotfileLength()
        {
            hdfs.Mkdirs(sub);
            int bytesRead;

            byte[]            buffer     = new byte[Blocksize * 8];
            int               origLen    = Blocksize + 1;
            int               toAppend   = Blocksize;
            FSDataInputStream fis        = null;
            FileStatus        fileStatus = null;
            // Create and write a file.
            Path file1 = new Path(sub, file1Name);

            DFSTestUtil.CreateFile(hdfs, file1, Blocksize, 0, Blocksize, Replication, Seed);
            DFSTestUtil.AppendFile(hdfs, file1, origLen);
            // Create a snapshot on the parent directory.
            hdfs.AllowSnapshot(sub);
            hdfs.CreateSnapshot(sub, snapshot1);
            Path         file1snap1  = SnapshotTestHelper.GetSnapshotPath(sub, snapshot1, file1Name);
            FileChecksum snapChksum1 = hdfs.GetFileChecksum(file1snap1);

            Assert.AssertThat("file and snapshot file checksums are not equal", hdfs.GetFileChecksum
                                  (file1), CoreMatchers.Is(snapChksum1));
            // Append to the file.
            FSDataOutputStream @out = hdfs.Append(file1);

            // Nothing has been appended yet. All checksums should still be equal.
            Assert.AssertThat("file and snapshot checksums (open for append) are not equal",
                              hdfs.GetFileChecksum(file1), CoreMatchers.Is(snapChksum1));
            Assert.AssertThat("snapshot checksum (post-open for append) has changed", hdfs.GetFileChecksum
                                  (file1snap1), CoreMatchers.Is(snapChksum1));
            try
            {
                AppendTestUtil.Write(@out, 0, toAppend);
                // Test reading from snapshot of file that is open for append
                byte[] dataFromSnapshot = DFSTestUtil.ReadFileBuffer(hdfs, file1snap1);
                Assert.AssertThat("Wrong data size in snapshot.", dataFromSnapshot.Length, CoreMatchers.Is
                                      (origLen));
                // Verify that checksum didn't change
                Assert.AssertThat("snapshot file checksum (pre-close) has changed", hdfs.GetFileChecksum
                                      (file1), CoreMatchers.Is(snapChksum1));
                Assert.AssertThat("snapshot checksum (post-append) has changed", hdfs.GetFileChecksum
                                      (file1snap1), CoreMatchers.Is(snapChksum1));
            }
            finally
            {
                @out.Close();
            }
            Assert.AssertThat("file and snapshot file checksums (post-close) are equal", hdfs
                              .GetFileChecksum(file1), CoreMatchers.Not(snapChksum1));
            Assert.AssertThat("snapshot file checksum (post-close) has changed", hdfs.GetFileChecksum
                                  (file1snap1), CoreMatchers.Is(snapChksum1));
            // Make sure we can read the entire file via its non-snapshot path.
            fileStatus = hdfs.GetFileStatus(file1);
            Assert.AssertThat(fileStatus.GetLen(), CoreMatchers.Is((long)origLen + toAppend));
            fis       = hdfs.Open(file1);
            bytesRead = fis.Read(0, buffer, 0, buffer.Length);
            Assert.AssertThat(bytesRead, CoreMatchers.Is(origLen + toAppend));
            fis.Close();
            // Try to open the file via its snapshot path.
            fis        = hdfs.Open(file1snap1);
            fileStatus = hdfs.GetFileStatus(file1snap1);
            Assert.AssertThat(fileStatus.GetLen(), CoreMatchers.Is((long)origLen));
            // Make sure we can only read up to the snapshot length.
            bytesRead = fis.Read(0, buffer, 0, buffer.Length);
            Assert.AssertThat(bytesRead, CoreMatchers.Is(origLen));
            fis.Close();
            byte[] dataFromSnapshot_1 = DFSTestUtil.ReadFileBuffer(hdfs, file1snap1);
            Assert.AssertThat("Wrong data size in snapshot.", dataFromSnapshot_1.Length, CoreMatchers.Is
                                  (origLen));
        }
        /// <summary>for snapshot file.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotPathINodes()
        {
            // Create a snapshot for the dir, and check the inodes for the path
            // pointing to a snapshot file
            hdfs.AllowSnapshot(sub1);
            hdfs.CreateSnapshot(sub1, "s1");
            // The path when accessing the snapshot file of file1 is
            // /TestSnapshot/sub1/.snapshot/s1/file1
            string snapshotPath = sub1.ToString() + "/.snapshot/s1/file1";

            string[]     names       = INode.GetPathNames(snapshotPath);
            byte[][]     components  = INode.GetPathComponents(names);
            INodesInPath nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false);

            // Length of inodes should be (components.length - 1), since we will ignore
            // ".snapshot"
            NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length - 1);
            // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1}
            Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot = GetSnapshot(nodesInPath
                                                                                            , "s1", 3);
            AssertSnapshot(nodesInPath, true, snapshot, 3);
            // Check the INode for file1 (snapshot file)
            INode snapshotFileNode = nodesInPath.GetLastINode();

            AssertINodeFile(snapshotFileNode, file1);
            NUnit.Framework.Assert.IsTrue(snapshotFileNode.GetParent().IsWithSnapshot());
            // Call getExistingPathINodes and request only one INode.
            nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false);
            NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length - 1);
            AssertSnapshot(nodesInPath, true, snapshot, 3);
            // Check the INode for file1 (snapshot file)
            AssertINodeFile(nodesInPath.GetLastINode(), file1);
            // Resolve the path "/TestSnapshot/sub1/.snapshot"
            string dotSnapshotPath = sub1.ToString() + "/.snapshot";

            names       = INode.GetPathNames(dotSnapshotPath);
            components  = INode.GetPathComponents(names);
            nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false);
            // The number of INodes returned should still be components.length
            // since we put a null in the inode array for ".snapshot"
            NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length);
            // No SnapshotRoot dir is included in the resolved inodes
            AssertSnapshot(nodesInPath, true, snapshot, -1);
            // The last INode should be null, the last but 1 should be sub1
            NUnit.Framework.Assert.IsNull(nodesInPath.GetLastINode());
            NUnit.Framework.Assert.AreEqual(nodesInPath.GetINode(-2).GetFullPathName(), sub1.
                                            ToString());
            NUnit.Framework.Assert.IsTrue(nodesInPath.GetINode(-2).IsDirectory());
            string[] invalidPathComponent = new string[] { "invalidDir", "foo", ".snapshot",
                                                           "bar" };
            Path invalidPath = new Path(invalidPathComponent[0]);

            for (int i = 1; i < invalidPathComponent.Length; i++)
            {
                invalidPath = new Path(invalidPath, invalidPathComponent[i]);
                try
                {
                    hdfs.GetFileStatus(invalidPath);
                    NUnit.Framework.Assert.Fail();
                }
                catch (FileNotFoundException fnfe)
                {
                    System.Console.Out.WriteLine("The exception is expected: " + fnfe);
                }
            }
            hdfs.DeleteSnapshot(sub1, "s1");
            hdfs.DisallowSnapshot(sub1);
        }