Esempio n. 1
0
        public virtual void TestDeletionWithZeroSizeBlock()
        {
            Path foo = new Path("/foo");
            Path bar = new Path(foo, "bar");

            DFSTestUtil.CreateFile(hdfs, bar, Blocksize, Replication, 0L);
            SnapshotTestHelper.CreateSnapshot(hdfs, foo, "s0");
            hdfs.Append(bar);
            INodeFile barNode = fsdir.GetINode4Write(bar.ToString()).AsFile();

            BlockInfoContiguous[] blks = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
            ExtendedBlock previous = new ExtendedBlock(fsn.GetBlockPoolId(), blks[0]);

            cluster.GetNameNodeRpc().AddBlock(bar.ToString(), hdfs.GetClient().GetClientName(
                                                  ), previous, null, barNode.GetId(), null);
            SnapshotTestHelper.CreateSnapshot(hdfs, foo, "s1");
            barNode = fsdir.GetINode4Write(bar.ToString()).AsFile();
            blks    = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(2, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
            NUnit.Framework.Assert.AreEqual(0, blks[1].GetNumBytes());
            hdfs.Delete(bar, true);
            Path sbar = SnapshotTestHelper.GetSnapshotPath(foo, "s1", bar.GetName());

            barNode = fsdir.GetINode(sbar.ToString()).AsFile();
            blks    = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
        }
Esempio n. 2
0
        /// <summary>Append a file without closing the output stream</summary>
        /// <exception cref="System.IO.IOException"/>
        private HdfsDataOutputStream AppendFileWithoutClosing(Path file, int length)
        {
            byte[] toAppend = new byte[length];
            Random random   = new Random();

            random.NextBytes(toAppend);
            HdfsDataOutputStream @out = (HdfsDataOutputStream)hdfs.Append(file);

            @out.Write(toAppend);
            return(@out);
        }
Esempio n. 3
0
        public virtual void TestFailedAppendBlockRejection()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set("dfs.client.block.write.replace-datanode-on-failure.enable", "false");
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            DistributedFileSystem fs      = null;

            try
            {
                fs = cluster.GetFileSystem();
                Path path = new Path("/test");
                FSDataOutputStream @out = fs.Create(path);
                @out.WriteBytes("hello\n");
                @out.Close();
                // stop one datanode
                MiniDFSCluster.DataNodeProperties dnProp = cluster.StopDataNode(0);
                string dnAddress = dnProp.datanode.GetXferAddress().ToString();
                if (dnAddress.StartsWith("/"))
                {
                    dnAddress = Sharpen.Runtime.Substring(dnAddress, 1);
                }
                // append again to bump genstamps
                for (int i = 0; i < 2; i++)
                {
                    @out = fs.Append(path);
                    @out.WriteBytes("helloagain\n");
                    @out.Close();
                }
                // re-open and make the block state as underconstruction
                @out = fs.Append(path);
                cluster.RestartDataNode(dnProp, true);
                // wait till the block report comes
                Sharpen.Thread.Sleep(2000);
                // check the block locations, this should not contain restarted datanode
                BlockLocation[] locations = fs.GetFileBlockLocations(path, 0, long.MaxValue);
                string[]        names     = locations[0].GetNames();
                foreach (string node in names)
                {
                    if (node.Equals(dnAddress))
                    {
                        NUnit.Framework.Assert.Fail("Failed append should not be present in latest block locations."
                                                    );
                    }
                }
                @out.Close();
            }
            finally
            {
                IOUtils.CloseStream(fs);
                cluster.Shutdown();
            }
        }
Esempio n. 4
0
        public virtual void TestAppend()
        {
            Configuration  conf        = new HdfsConfiguration();
            short          Replication = (short)3;
            MiniDFSCluster cluster     = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                DistributedFileSystem fs = cluster.GetFileSystem();
                Path f = new Path(Dir, "testAppend");
                {
                    Log.Info("create an empty file " + f);
                    fs.Create(f, Replication).Close();
                    FileStatus status = fs.GetFileStatus(f);
                    NUnit.Framework.Assert.AreEqual(Replication, status.GetReplication());
                    NUnit.Framework.Assert.AreEqual(0L, status.GetLen());
                }
                byte[] bytes = new byte[1000];
                {
                    Log.Info("append " + bytes.Length + " bytes to " + f);
                    FSDataOutputStream @out = fs.Append(f);
                    @out.Write(bytes);
                    @out.Close();
                    FileStatus status = fs.GetFileStatus(f);
                    NUnit.Framework.Assert.AreEqual(Replication, status.GetReplication());
                    NUnit.Framework.Assert.AreEqual(bytes.Length, status.GetLen());
                }
                {
                    Log.Info("append another " + bytes.Length + " bytes to " + f);
                    try
                    {
                        FSDataOutputStream @out = fs.Append(f);
                        @out.Write(bytes);
                        @out.Close();
                        NUnit.Framework.Assert.Fail();
                    }
                    catch (IOException ioe)
                    {
                        Log.Info("This exception is expected", ioe);
                    }
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 5
0
        /// <summary>Test the updation of NeededReplications for the Appended Block</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestUpdateNeededReplicationsForAppendedFile()
        {
            Configuration         conf       = new Configuration();
            MiniDFSCluster        cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            DistributedFileSystem fileSystem = null;

            try
            {
                // create a file.
                fileSystem = cluster.GetFileSystem();
                Path f = new Path("/testAppend");
                FSDataOutputStream create = fileSystem.Create(f, (short)2);
                create.Write(Sharpen.Runtime.GetBytesForString("/testAppend"));
                create.Close();
                // Append to the file.
                FSDataOutputStream append = fileSystem.Append(f);
                append.Write(Sharpen.Runtime.GetBytesForString("/testAppend"));
                append.Close();
                // Start a new datanode
                cluster.StartDataNodes(conf, 1, true, null, null);
                // Check for replications
                DFSTestUtil.WaitReplication(fileSystem, f, (short)2);
            }
            finally
            {
                if (null != fileSystem)
                {
                    fileSystem.Close();
                }
                cluster.Shutdown();
            }
        }
Esempio n. 6
0
 /// <summary>
 /// The idea for making sure that there is no more than one instance
 /// running in an HDFS is to create a file in the HDFS, writes the hostname
 /// of the machine on which the instance is running to the file, but did not
 /// close the file until it exits.
 /// </summary>
 /// <remarks>
 /// The idea for making sure that there is no more than one instance
 /// running in an HDFS is to create a file in the HDFS, writes the hostname
 /// of the machine on which the instance is running to the file, but did not
 /// close the file until it exits.
 /// This prevents the second instance from running because it can not
 /// creates the file while the first one is running.
 /// This method checks if there is any running instance. If no, mark yes.
 /// Note that this is an atomic operation.
 /// </remarks>
 /// <returns>
 /// null if there is a running instance;
 /// otherwise, the output stream to the newly created file.
 /// </returns>
 /// <exception cref="System.IO.IOException"/>
 private OutputStream CheckAndMarkRunning()
 {
     try
     {
         if (fs.Exists(idPath))
         {
             // try appending to it so that it will fail fast if another balancer is
             // running.
             IOUtils.CloseStream(fs.Append(idPath));
             fs.Delete(idPath, true);
         }
         FSDataOutputStream fsout = fs.Create(idPath, false);
         // mark balancer idPath to be deleted during filesystem closure
         fs.DeleteOnExit(idPath);
         if (write2IdFile)
         {
             fsout.WriteBytes(Sharpen.Runtime.GetLocalHost().GetHostName());
             fsout.Hflush();
         }
         return(fsout);
     }
     catch (RemoteException e)
     {
         if (typeof(AlreadyBeingCreatedException).FullName.Equals(e.GetClassName()))
         {
             return(null);
         }
         else
         {
             throw;
         }
     }
 }
Esempio n. 7
0
        public virtual void TestAppend()
        {
            int maxOldFileLen   = 2 * BlockSize + 1;
            int maxFlushedBytes = BlockSize;

            byte[] contents = AppendTestUtil.InitBuffer(maxOldFileLen + 2 * maxFlushedBytes);
            for (int oldFileLen = 0; oldFileLen <= maxOldFileLen; oldFileLen++)
            {
                for (int flushedBytes1 = 0; flushedBytes1 <= maxFlushedBytes; flushedBytes1++)
                {
                    for (int flushedBytes2 = 0; flushedBytes2 <= maxFlushedBytes; flushedBytes2++)
                    {
                        int fileLen = oldFileLen + flushedBytes1 + flushedBytes2;
                        // create the initial file of oldFileLen
                        Path p = new Path("foo" + oldFileLen + "_" + flushedBytes1 + "_" + flushedBytes2);
                        Log.Info("Creating file " + p);
                        FSDataOutputStream @out = fs.Create(p, false, conf.GetInt(CommonConfigurationKeys
                                                                                  .IoFileBufferSizeKey, 4096), Replication, BlockSize);
                        @out.Write(contents, 0, oldFileLen);
                        @out.Close();
                        // append flushedBytes bytes to the file
                        @out = fs.Append(p);
                        @out.Write(contents, oldFileLen, flushedBytes1);
                        @out.Hflush();
                        // write another flushedBytes2 bytes to the file
                        @out.Write(contents, oldFileLen + flushedBytes1, flushedBytes2);
                        @out.Close();
                        // validate the file content
                        AppendTestUtil.CheckFullFile(fs, p, fileLen, contents, p.ToString());
                        fs.Delete(p, false);
                    }
                }
            }
        }
Esempio n. 8
0
        public virtual void TestAppendLessThanChecksumChunk()
        {
            byte[]         buf     = new byte[1024];
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).NumDataNodes
                                         (1).Build();

            cluster.WaitActive();
            try
            {
                using (DistributedFileSystem fs = cluster.GetFileSystem())
                {
                    int  len1 = 200;
                    int  len2 = 300;
                    Path p    = new Path("/foo");
                    FSDataOutputStream @out = fs.Create(p);
                    @out.Write(buf, 0, len1);
                    @out.Close();
                    @out = fs.Append(p);
                    @out.Write(buf, 0, len2);
                    // flush but leave open
                    @out.Hflush();
                    // read data to verify the replica's content and checksum are correct
                    FSDataInputStream @in = fs.Open(p);
                    int length            = @in.Read(0, buf, 0, len1 + len2);
                    NUnit.Framework.Assert.IsTrue(length > 0);
                    @in.Close();
                    @out.Close();
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
 /// <exception cref="System.Exception"/>
 public virtual void TestFavoredNodesEndToEndForAppend()
 {
     // create 10 files with random preferred nodes
     for (int i = 0; i < NumFiles; i++)
     {
         Random rand = new Random(Runtime.CurrentTimeMillis() + i);
         // pass a new created rand so as to get a uniform distribution each time
         // without too much collisions (look at the do-while loop in getDatanodes)
         IPEndPoint[] datanode = GetDatanodes(rand);
         Path         p        = new Path("/filename" + i);
         // create and close the file.
         dfs.Create(p, FsPermission.GetDefault(), true, 4096, (short)3, 4096L, null, null)
         .Close();
         // re-open for append
         FSDataOutputStream @out = dfs.Append(p, EnumSet.Of(CreateFlag.Append), 4096, null
                                              , datanode);
         @out.Write(SomeBytes);
         @out.Close();
         BlockLocation[] locations = GetBlockLocations(p);
         // verify the files got created in the right nodes
         foreach (BlockLocation loc in locations)
         {
             string[] hosts  = loc.GetNames();
             string[] hosts1 = GetStringForInetSocketAddrs(datanode);
             NUnit.Framework.Assert.IsTrue(CompareNodes(hosts, hosts1));
         }
     }
 }
Esempio n. 10
0
        public virtual void TestAppendWithPipelineRecovery()
        {
            Configuration      conf    = new Configuration();
            MiniDFSCluster     cluster = null;
            FSDataOutputStream @out    = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).ManageDataDfsDirs(true).ManageNameDfsDirs
                              (true).NumDataNodes(4).Racks(new string[] { "/rack1", "/rack1", "/rack2", "/rack2" }).Build();
                cluster.WaitActive();
                DistributedFileSystem fs = cluster.GetFileSystem();
                Path path = new Path("/test1");
                @out = fs.Create(path, true, BlockSize, (short)3, BlockSize);
                AppendTestUtil.Write(@out, 0, 1024);
                @out.Close();
                cluster.StopDataNode(3);
                @out = fs.Append(path);
                AppendTestUtil.Write(@out, 1024, 1024);
                @out.Close();
                cluster.RestartNameNode(true);
                AppendTestUtil.Check(fs, path, 2048);
            }
            finally
            {
                IOUtils.CloseStream(@out);
                if (null != cluster)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 11
0
        public virtual void Pipeline_01()
        {
            string MethodName = GenericTestUtils.GetMethodName();

            if (Log.IsDebugEnabled())
            {
                Log.Debug("Running " + MethodName);
            }
            Path filePath = new Path("/" + MethodName + ".dat");

            DFSTestUtil.CreateFile(fs, filePath, FileSize, ReplFactor, rand.NextLong());
            if (Log.IsDebugEnabled())
            {
                Log.Debug("Invoking append but doing nothing otherwise...");
            }
            FSDataOutputStream ofs = fs.Append(filePath);

            ofs.WriteBytes("Some more stuff to write");
            ((DFSOutputStream)ofs.GetWrappedStream()).Hflush();
            IList <LocatedBlock> lb = cluster.GetNameNodeRpc().GetBlockLocations(filePath.ToString
                                                                                     (), FileSize - 1, FileSize).GetLocatedBlocks();
            string bpid = cluster.GetNamesystem().GetBlockPoolId();

            foreach (DataNode dn in cluster.GetDataNodes())
            {
                Replica r = DataNodeTestUtils.FetchReplicaInfo(dn, bpid, lb[0].GetBlock().GetBlockId
                                                                   ());
                NUnit.Framework.Assert.IsTrue("Replica on DN " + dn + " shouldn't be null", r !=
                                              null);
                NUnit.Framework.Assert.AreEqual("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()"
                                                , HdfsServerConstants.ReplicaState.Rbw, r.GetState());
            }
            ofs.Close();
        }
Esempio n. 12
0
        public virtual void TestLeaseRecoveryAndAppend()
        {
            Configuration conf = new Configuration();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                Path file = new Path("/testLeaseRecovery");
                DistributedFileSystem dfs = cluster.GetFileSystem();
                // create a file with 0 bytes
                FSDataOutputStream @out = dfs.Create(file);
                @out.Hflush();
                @out.Hsync();
                // abort the original stream
                ((DFSOutputStream)@out.GetWrappedStream()).Abort();
                DistributedFileSystem newdfs = (DistributedFileSystem)FileSystem.NewInstance(cluster
                                                                                             .GetConfiguration(0));
                // Append to a file , whose lease is held by another client should fail
                try
                {
                    newdfs.Append(file);
                    NUnit.Framework.Assert.Fail("Append to a file(lease is held by another client) should fail"
                                                );
                }
                catch (RemoteException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains("file lease is currently owned")
                                                  );
                }
                // Lease recovery on first try should be successful
                bool recoverLease = newdfs.RecoverLease(file);
                NUnit.Framework.Assert.IsTrue(recoverLease);
                FSDataOutputStream append = newdfs.Append(file);
                append.Write(Sharpen.Runtime.GetBytesForString("test"));
                append.Close();
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                    cluster = null;
                }
            }
        }
Esempio n. 13
0
        /// <exception cref="System.IO.IOException"/>
        private void TestHSyncOperation(bool testWithAppend)
        {
            Configuration         conf    = new HdfsConfiguration();
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).Build();
            DistributedFileSystem fs      = cluster.GetFileSystem();
            Path p   = new Path("/testHSync/foo");
            int  len = 1 << 16;
            FSDataOutputStream @out = fs.Create(p, FsPermission.GetDefault(), EnumSet.Of(CreateFlag
                                                                                         .Create, CreateFlag.Overwrite, CreateFlag.SyncBlock), 4096, (short)1, len, null);

            if (testWithAppend)
            {
                // re-open the file with append call
                @out.Close();
                @out = fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.SyncBlock), 4096, null
                                 );
            }
            @out.Hflush();
            // hflush does not sync
            CheckSyncMetric(cluster, 0);
            @out.Hsync();
            // hsync on empty file does nothing
            CheckSyncMetric(cluster, 0);
            @out.Write(1);
            CheckSyncMetric(cluster, 0);
            @out.Hsync();
            CheckSyncMetric(cluster, 1);
            // avoiding repeated hsyncs is a potential future optimization
            @out.Hsync();
            CheckSyncMetric(cluster, 2);
            @out.Hflush();
            // hflush still does not sync
            CheckSyncMetric(cluster, 2);
            @out.Close();
            // close is sync'ing
            CheckSyncMetric(cluster, 3);
            // same with a file created with out SYNC_BLOCK
            @out = fs.Create(p, FsPermission.GetDefault(), EnumSet.Of(CreateFlag.Create, CreateFlag
                                                                      .Overwrite), 4096, (short)1, len, null);
            @out.Hsync();
            CheckSyncMetric(cluster, 3);
            @out.Write(1);
            CheckSyncMetric(cluster, 3);
            @out.Hsync();
            CheckSyncMetric(cluster, 4);
            // repeated hsyncs
            @out.Hsync();
            CheckSyncMetric(cluster, 5);
            @out.Close();
            // close does not sync (not opened with SYNC_BLOCK)
            CheckSyncMetric(cluster, 5);
            cluster.Shutdown();
        }
Esempio n. 14
0
        /// <exception cref="System.Exception"/>
        public virtual void TestBlockReaderLocalLegacyWithAppend()
        {
            short             ReplFactor = 1;
            HdfsConfiguration conf       = GetConfiguration(null);

            conf.SetBoolean(DFSConfigKeys.DfsClientUseLegacyBlockreaderlocal, true);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            DistributedFileSystem dfs = cluster.GetFileSystem();
            Path path = new Path("/testBlockReaderLocalLegacy");

            DFSTestUtil.CreateFile(dfs, path, 10, ReplFactor, 0);
            DFSTestUtil.WaitReplication(dfs, path, ReplFactor);
            ClientDatanodeProtocol proxy;

            Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token;
            ExtendedBlock originalBlock;
            long          originalGS;

            {
                LocatedBlock lb = cluster.GetNameNode().GetRpcServer().GetBlockLocations(path.ToString
                                                                                             (), 0, 1).Get(0);
                proxy = DFSUtil.CreateClientDatanodeProtocolProxy(lb.GetLocations()[0], conf, 60000
                                                                  , false);
                token = lb.GetBlockToken();
                // get block and generation stamp
                ExtendedBlock blk = new ExtendedBlock(lb.GetBlock());
                originalBlock = new ExtendedBlock(blk);
                originalGS    = originalBlock.GetGenerationStamp();
                // test getBlockLocalPathInfo
                BlockLocalPathInfo info = proxy.GetBlockLocalPathInfo(blk, token);
                NUnit.Framework.Assert.AreEqual(originalGS, info.GetBlock().GetGenerationStamp());
            }
            {
                // append one byte
                FSDataOutputStream @out = dfs.Append(path);
                @out.Write(1);
                @out.Close();
            }
            {
                // get new generation stamp
                LocatedBlock lb = cluster.GetNameNode().GetRpcServer().GetBlockLocations(path.ToString
                                                                                             (), 0, 1).Get(0);
                long newGS = lb.GetBlock().GetGenerationStamp();
                NUnit.Framework.Assert.IsTrue(newGS > originalGS);
                // getBlockLocalPathInfo using the original block.
                NUnit.Framework.Assert.AreEqual(originalGS, originalBlock.GetGenerationStamp());
                BlockLocalPathInfo info = proxy.GetBlockLocalPathInfo(originalBlock, token);
                NUnit.Framework.Assert.AreEqual(newGS, info.GetBlock().GetGenerationStamp());
            }
            cluster.Shutdown();
        }
Esempio n. 15
0
        public virtual void TestAppend2AfterSoftLimit()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
            //Set small soft-limit for lease
            long           softLimit = 1L;
            long           hardLimit = 9999999L;
            MiniDFSCluster cluster   = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.SetLeasePeriod(softLimit, hardLimit);
            cluster.WaitActive();
            DistributedFileSystem fs  = cluster.GetFileSystem();
            DistributedFileSystem fs2 = new DistributedFileSystem();

            fs2.Initialize(fs.GetUri(), conf);
            Path testPath = new Path("/testAppendAfterSoftLimit");

            byte[] fileContents = AppendTestUtil.InitBuffer(32);
            // create a new file without closing
            FSDataOutputStream @out = fs.Create(testPath);

            @out.Write(fileContents);
            //Wait for > soft-limit
            Sharpen.Thread.Sleep(250);
            try
            {
                FSDataOutputStream appendStream2 = fs2.Append(testPath, EnumSet.Of(CreateFlag.Append
                                                                                   , CreateFlag.NewBlock), 4096, null);
                appendStream2.Write(fileContents);
                appendStream2.Close();
                NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen
                                                    ());
                // make sure we now have 1 block since the first writer was revoked
                LocatedBlocks blks = fs.GetClient().GetLocatedBlocks(testPath.ToString(), 0L);
                NUnit.Framework.Assert.AreEqual(1, blks.GetLocatedBlocks().Count);
                foreach (LocatedBlock blk in blks.GetLocatedBlocks())
                {
                    NUnit.Framework.Assert.AreEqual(fileContents.Length, blk.GetBlockSize());
                }
            }
            finally
            {
                fs.Close();
                fs2.Close();
                cluster.Shutdown();
            }
        }
Esempio n. 16
0
 // Do small appends.
 /// <exception cref="System.IO.IOException"/>
 internal virtual void DoSmallAppends(Path file, DistributedFileSystem fs, int iterations
                                      )
 {
     for (int i = 0; i < iterations; i++)
     {
         FSDataOutputStream stm;
         try
         {
             stm = fs.Append(file);
         }
         catch (IOException)
         {
             // If another thread is already appending, skip this time.
             continue;
         }
         // Failure in write or close will be terminal.
         AppendTestUtil.Write(stm, 0, 123);
         stm.Close();
     }
 }
Esempio n. 17
0
        /// <exception cref="System.Exception"/>
        private void RecoverLeaseUsingCreate2(Path filepath)
        {
            FileSystem            dfs2 = GetFSAsAnotherUser(conf);
            int                   size = AppendTestUtil.NextInt(FileSize);
            DistributedFileSystem dfsx = (DistributedFileSystem)dfs2;
            //create file using dfsx
            Path filepath2         = new Path("/immediateRecoverLease-x2");
            FSDataOutputStream stm = dfsx.Create(filepath2, true, BufSize, ReplicationNum, BlockSize
                                                 );

            NUnit.Framework.Assert.IsTrue(dfsx.dfs.Exists("/immediateRecoverLease-x2"));
            try
            {
                Sharpen.Thread.Sleep(10000);
            }
            catch (Exception)
            {
            }
            dfsx.Append(filepath);
        }
Esempio n. 18
0
        public virtual void TestAppendAfterSoftLimit()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
            conf.SetBoolean(DFSConfigKeys.DfsSupportAppendKey, true);
            //Set small soft-limit for lease
            long           softLimit = 1L;
            long           hardLimit = 9999999L;
            MiniDFSCluster cluster   = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.SetLeasePeriod(softLimit, hardLimit);
            cluster.WaitActive();
            FileSystem fs  = cluster.GetFileSystem();
            FileSystem fs2 = new DistributedFileSystem();

            fs2.Initialize(fs.GetUri(), conf);
            Path testPath = new Path("/testAppendAfterSoftLimit");

            byte[] fileContents = AppendTestUtil.InitBuffer(32);
            // create a new file without closing
            FSDataOutputStream @out = fs.Create(testPath);

            @out.Write(fileContents);
            //Wait for > soft-limit
            Sharpen.Thread.Sleep(250);
            try
            {
                FSDataOutputStream appendStream2 = fs2.Append(testPath);
                appendStream2.Write(fileContents);
                appendStream2.Close();
                NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen
                                                    ());
            }
            finally
            {
                fs.Close();
                fs2.Close();
                cluster.Shutdown();
            }
        }
Esempio n. 19
0
        public virtual void TestAppend2Twice()
        {
            Configuration         conf    = new HdfsConfiguration();
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).Build();
            DistributedFileSystem fs1     = cluster.GetFileSystem();
            FileSystem            fs2     = AppendTestUtil.CreateHdfsWithDifferentUsername(conf);

            try
            {
                Path   p            = new Path("/testAppendTwice/foo");
                int    len          = 1 << 16;
                byte[] fileContents = AppendTestUtil.InitBuffer(len);
                {
                    // create a new file with a full block.
                    FSDataOutputStream @out = fs2.Create(p, true, 4096, (short)1, len);
                    @out.Write(fileContents, 0, len);
                    @out.Close();
                }
                //1st append does not add any data so that the last block remains full
                //and the last block in INodeFileUnderConstruction is a BlockInfo
                //but not BlockInfoUnderConstruction.
                ((DistributedFileSystem)fs2).Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock
                                                                  ), 4096, null);
                // 2nd append should get AlreadyBeingCreatedException
                fs1.Append(p);
                NUnit.Framework.Assert.Fail();
            }
            catch (RemoteException re)
            {
                AppendTestUtil.Log.Info("Got an exception:", re);
                NUnit.Framework.Assert.AreEqual(typeof(AlreadyBeingCreatedException).FullName, re
                                                .GetClassName());
            }
            finally
            {
                fs2.Close();
                fs1.Close();
                cluster.Shutdown();
            }
        }
Esempio n. 20
0
        /// <summary>
        /// Test if the quota can be correctly updated when file length is updated
        /// through fsync
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestUpdateQuotaForFSync()
        {
            Path foo = new Path("/foo");
            Path bar = new Path(foo, "bar");

            DFSTestUtil.CreateFile(dfs, bar, Blocksize, Replication, 0L);
            dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1);
            FSDataOutputStream @out = dfs.Append(bar);

            @out.Write(new byte[Blocksize / 4]);
            ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                        .UpdateLength));
            INodeDirectory fooNode = fsdir.GetINode4Write(foo.ToString()).AsDirectory();
            QuotaCounts    quota   = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            long           ns      = quota.GetNameSpace();
            long           ds      = quota.GetStorageSpace();

            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 * Replication, ds);
            // file is under construction
            @out.Write(new byte[Blocksize / 4]);
            @out.Close();
            fooNode = fsdir.GetINode4Write(foo.ToString()).AsDirectory();
            quota   = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            ns      = quota.GetNameSpace();
            ds      = quota.GetStorageSpace();
            NUnit.Framework.Assert.AreEqual(2, ns);
            NUnit.Framework.Assert.AreEqual((Blocksize + Blocksize / 2) * Replication, ds);
            // append another block
            DFSTestUtil.AppendFile(dfs, bar, Blocksize);
            quota = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            ns    = quota.GetNameSpace();
            ds    = quota.GetStorageSpace();
            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual((Blocksize * 2 + Blocksize / 2) * Replication, ds
                                            );
        }
Esempio n. 21
0
        public virtual void TestAddBlockUC()
        {
            DistributedFileSystem fs = cluster.GetFileSystem();
            Path file1 = new Path("/file1");

            DFSTestUtil.CreateFile(fs, file1, Blocksize - 1, Replication, 0L);
            FSDataOutputStream @out = null;

            try
            {
                // append files without closing the streams
                @out = fs.Append(file1);
                string appendContent = "appending-content";
                @out.WriteBytes(appendContent);
                ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                            .UpdateLength));
                // restart NN
                cluster.RestartNameNode(true);
                FSDirectory           fsdir      = cluster.GetNamesystem().GetFSDirectory();
                INodeFile             fileNode   = fsdir.GetINode4Write(file1.ToString()).AsFile();
                BlockInfoContiguous[] fileBlocks = fileNode.GetBlocks();
                NUnit.Framework.Assert.AreEqual(2, fileBlocks.Length);
                NUnit.Framework.Assert.AreEqual(Blocksize, fileBlocks[0].GetNumBytes());
                NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.Complete, fileBlocks
                                                [0].GetBlockUCState());
                NUnit.Framework.Assert.AreEqual(appendContent.Length - 1, fileBlocks[1].GetNumBytes
                                                    ());
                NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.UnderConstruction
                                                , fileBlocks[1].GetBlockUCState());
            }
            finally
            {
                if (@out != null)
                {
                    @out.Close();
                }
            }
        }
Esempio n. 22
0
        public virtual void TestBestEffort()
        {
            Configuration conf = new HdfsConfiguration();

            //always replace a datanode but do not throw exception
            ReplaceDatanodeOnFailure.Write(ReplaceDatanodeOnFailure.Policy.Always, true, conf
                                           );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                DistributedFileSystem fs = cluster.GetFileSystem();
                Path   f     = new Path(Dir, "testIgnoreReplaceFailure");
                byte[] bytes = new byte[1000];
                {
                    Log.Info("write " + bytes.Length + " bytes to " + f);
                    FSDataOutputStream @out = fs.Create(f, Replication);
                    @out.Write(bytes);
                    @out.Close();
                    FileStatus status = fs.GetFileStatus(f);
                    NUnit.Framework.Assert.AreEqual(Replication, status.GetReplication());
                    NUnit.Framework.Assert.AreEqual(bytes.Length, status.GetLen());
                }
                {
                    Log.Info("append another " + bytes.Length + " bytes to " + f);
                    FSDataOutputStream @out = fs.Append(f);
                    @out.Write(bytes);
                    @out.Close();
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 23
0
        public virtual void TestSpaceCommands()
        {
            Configuration conf = new HdfsConfiguration();

            // set a smaller block size so that we can test with smaller
            // diskspace quotas
            conf.Set(DFSConfigKeys.DfsBlockSizeKey, "512");
            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            DistributedFileSystem dfs = (DistributedFileSystem)fs;

            try
            {
                int   fileLen     = 1024;
                short replication = 3;
                int   fileSpace   = fileLen * replication;
                // create directory /nqdir0/qdir1/qdir20/nqdir30
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")
                                                         ));
                // set the quota of /nqdir0/qdir1 to 4 * fileSpace
                Path quotaDir1 = new Path("/nqdir0/qdir1");
                dfs.SetQuota(quotaDir1, HdfsConstants.QuotaDontSet, 4 * fileSpace);
                ContentSummary c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 4 * fileSpace);
                // set the quota of /nqdir0/qdir1/qdir20 to 6 * fileSpace
                Path quotaDir20 = new Path("/nqdir0/qdir1/qdir20");
                dfs.SetQuota(quotaDir20, HdfsConstants.QuotaDontSet, 6 * fileSpace);
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 6 * fileSpace);
                // Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace
                Path quotaDir21 = new Path("/nqdir0/qdir1/qdir21");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir21));
                dfs.SetQuota(quotaDir21, HdfsConstants.QuotaDontSet, 2 * fileSpace);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 2 * fileSpace);
                // 5: Create directory /nqdir0/qdir1/qdir21/nqdir32
                Path tempPath = new Path(quotaDir21, "nqdir32");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(tempPath));
                // create a file under nqdir32/fileDir
                DFSTestUtil.CreateFile(dfs, new Path(tempPath, "fileDir/file1"), fileLen, replication
                                       , 0);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                // Create a larger file /nqdir0/qdir1/qdir21/nqdir33/
                bool hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(dfs, new Path(quotaDir21, "nqdir33/file2"), 2 * fileLen, replication
                                           , 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // delete nqdir33
                NUnit.Framework.Assert.IsTrue(dfs.Delete(new Path(quotaDir21, "nqdir33"), true));
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 2 * fileSpace);
                // Verify space before the move:
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                // Move /nqdir0/qdir1/qdir21/nqdir32 /nqdir0/qdir1/qdir20/nqdir30
                Path dstPath = new Path(quotaDir20, "nqdir30");
                Path srcPath = new Path(quotaDir21, "nqdir32");
                NUnit.Framework.Assert.IsTrue(dfs.Rename(srcPath, dstPath));
                // verify space after the move
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                // verify space for its parent
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                // verify space for source for the move
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                Path file2    = new Path(dstPath, "fileDir/file2");
                int  file2Len = 2 * fileLen;
                // create a larger file under /nqdir0/qdir1/qdir20/nqdir30
                DFSTestUtil.CreateFile(dfs, file2, file2Len, replication, 0);
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 3 * fileSpace);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                // Reverse: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21/
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Rename(dstPath, srcPath));
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // make sure no intermediate directories left by failed rename
                NUnit.Framework.Assert.IsFalse(dfs.Exists(srcPath));
                // directory should exist
                NUnit.Framework.Assert.IsTrue(dfs.Exists(dstPath));
                // verify space after the failed move
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 3 * fileSpace);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                // Test Append :
                // verify space quota
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 4 * fileSpace);
                // verify space before append;
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 3 * fileSpace);
                OutputStream @out = dfs.Append(file2);
                // appending 1 fileLen should succeed
                @out.Write(new byte[fileLen]);
                @out.Close();
                file2Len += fileLen;
                // after append
                // verify space after append;
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 4 * fileSpace);
                // now increase the quota for quotaDir1
                dfs.SetQuota(quotaDir1, HdfsConstants.QuotaDontSet, 5 * fileSpace);
                // Now, appending more than 1 fileLen should result in an error
                @out         = dfs.Append(file2);
                hasException = false;
                try
                {
                    @out.Write(new byte[fileLen + 1024]);
                    @out.Flush();
                    @out.Close();
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                    IOUtils.CloseStream(@out);
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                file2Len += fileLen;
                // after partial append
                // verify space after partial append
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace);
                // Test set replication :
                // first reduce the replication
                dfs.SetReplication(file2, (short)(replication - 1));
                // verify that space is reduced by file2Len
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace - file2Len);
                // now try to increase the replication and and expect an error.
                hasException = false;
                try
                {
                    dfs.SetReplication(file2, (short)(replication + 1));
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // verify space consumed remains unchanged.
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace - file2Len);
                // now increase the quota for quotaDir1 and quotaDir20
                dfs.SetQuota(quotaDir1, HdfsConstants.QuotaDontSet, 10 * fileSpace);
                dfs.SetQuota(quotaDir20, HdfsConstants.QuotaDontSet, 10 * fileSpace);
                // then increasing replication should be ok.
                dfs.SetReplication(file2, (short)(replication + 1));
                // verify increase in space
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace + file2Len);
                // Test HDFS-2053 :
                // Create directory /hdfs-2053
                Path quotaDir2053 = new Path("/hdfs-2053");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053));
                // Create subdirectories /hdfs-2053/{A,B,C}
                Path quotaDir2053_A = new Path(quotaDir2053, "A");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053_A));
                Path quotaDir2053_B = new Path(quotaDir2053, "B");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053_B));
                Path quotaDir2053_C = new Path(quotaDir2053, "C");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053_C));
                // Factors to vary the sizes of test files created in each subdir.
                // The actual factors are not really important but they allow us to create
                // identifiable file sizes per subdir, which helps during debugging.
                int sizeFactorA = 1;
                int sizeFactorB = 2;
                int sizeFactorC = 4;
                // Set space quota for subdirectory C
                dfs.SetQuota(quotaDir2053_C, HdfsConstants.QuotaDontSet, (sizeFactorC + 1) * fileSpace
                             );
                c = dfs.GetContentSummary(quotaDir2053_C);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), (sizeFactorC + 1) * fileSpace);
                // Create a file under subdirectory A
                DFSTestUtil.CreateFile(dfs, new Path(quotaDir2053_A, "fileA"), sizeFactorA * fileLen
                                       , replication, 0);
                c = dfs.GetContentSummary(quotaDir2053_A);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), sizeFactorA * fileSpace);
                // Create a file under subdirectory B
                DFSTestUtil.CreateFile(dfs, new Path(quotaDir2053_B, "fileB"), sizeFactorB * fileLen
                                       , replication, 0);
                c = dfs.GetContentSummary(quotaDir2053_B);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), sizeFactorB * fileSpace);
                // Create a file under subdirectory C (which has a space quota)
                DFSTestUtil.CreateFile(dfs, new Path(quotaDir2053_C, "fileC"), sizeFactorC * fileLen
                                       , replication, 0);
                c = dfs.GetContentSummary(quotaDir2053_C);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), sizeFactorC * fileSpace);
                // Check space consumed for /hdfs-2053
                c = dfs.GetContentSummary(quotaDir2053);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), (sizeFactorA + sizeFactorB
                                                                       + sizeFactorC) * fileSpace);
                NUnit.Framework.Assert.AreEqual(20, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Esempio n. 24
0
        public virtual void TestMultiAppend2()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set("dfs.client.block.write.replace-datanode-on-failure.enable", "false");
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
            DistributedFileSystem fs      = null;
            string hello = "hello\n";

            try
            {
                fs = cluster.GetFileSystem();
                Path path = new Path("/test");
                FSDataOutputStream @out = fs.Create(path);
                @out.WriteBytes(hello);
                @out.Close();
                // stop one datanode
                MiniDFSCluster.DataNodeProperties dnProp = cluster.StopDataNode(0);
                string dnAddress = dnProp.datanode.GetXferAddress().ToString();
                if (dnAddress.StartsWith("/"))
                {
                    dnAddress = Sharpen.Runtime.Substring(dnAddress, 1);
                }
                // append again to bump genstamps
                for (int i = 0; i < 2; i++)
                {
                    @out = fs.Append(path, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock), 4096,
                                     null);
                    @out.WriteBytes(hello);
                    @out.Close();
                }
                // re-open and make the block state as underconstruction
                @out = fs.Append(path, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock), 4096,
                                 null);
                cluster.RestartDataNode(dnProp, true);
                // wait till the block report comes
                Sharpen.Thread.Sleep(2000);
                @out.WriteBytes(hello);
                @out.Close();
                // check the block locations
                LocatedBlocks blocks = fs.GetClient().GetLocatedBlocks(path.ToString(), 0L);
                // since we append the file 3 time, we should be 4 blocks
                NUnit.Framework.Assert.AreEqual(4, blocks.GetLocatedBlocks().Count);
                foreach (LocatedBlock block in blocks.GetLocatedBlocks())
                {
                    NUnit.Framework.Assert.AreEqual(hello.Length, block.GetBlockSize());
                }
                StringBuilder sb = new StringBuilder();
                for (int i_1 = 0; i_1 < 4; i_1++)
                {
                    sb.Append(hello);
                }
                byte[] content = Sharpen.Runtime.GetBytesForString(sb.ToString());
                AppendTestUtil.CheckFullFile(fs, path, content.Length, content, "Read /test");
                // restart namenode to make sure the editlog can be properly applied
                cluster.RestartNameNode(true);
                cluster.WaitActive();
                AppendTestUtil.CheckFullFile(fs, path, content.Length, content, "Read /test");
                blocks = fs.GetClient().GetLocatedBlocks(path.ToString(), 0L);
                // since we append the file 3 time, we should be 4 blocks
                NUnit.Framework.Assert.AreEqual(4, blocks.GetLocatedBlocks().Count);
                foreach (LocatedBlock block_1 in blocks.GetLocatedBlocks())
                {
                    NUnit.Framework.Assert.AreEqual(hello.Length, block_1.GetBlockSize());
                }
            }
            finally
            {
                IOUtils.CloseStream(fs);
                cluster.Shutdown();
            }
        }
Esempio n. 25
0
        public virtual void TestTC1()
        {
            Path p = new Path("/TC1/foo");

            System.Console.Out.WriteLine("p=" + p);
            //a. Create file and write one block of data. Close file.
            int len1 = (int)BlockSize;
            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            //   Reopen file to append. Append half block of data. Close file.
            int len2 = (int)BlockSize / 2;

            {
                FSDataOutputStream @out = fs.Append(p);
                AppendTestUtil.Write(@out, len1, len2);
                @out.Close();
            }
            //b. Reopen file and read 1.5 blocks worth of data. Close file.
            AppendTestUtil.Check(fs, p, len1 + len2);
        }
Esempio n. 26
0
 // create a bunch of files. Write to them and then verify.
 public override void Run()
 {
     System.Console.Out.WriteLine("Workload " + this.id + " starting... ");
     for (int i = 0; i < this._enclosing.numAppendsPerThread; i++)
     {
         // pick a file at random and remove it from pool
         Path testfile;
         lock (this._enclosing.testFiles)
         {
             if (this._enclosing.testFiles.Count == 0)
             {
                 System.Console.Out.WriteLine("Completed write to almost all files.");
                 return;
             }
             int index = AppendTestUtil.NextInt(this._enclosing.testFiles.Count);
             testfile = this._enclosing.testFiles.Remove(index);
         }
         long len          = 0;
         int  sizeToAppend = 0;
         try
         {
             DistributedFileSystem fs = this.cluster.GetFileSystem();
             // add a random number of bytes to file
             len = fs.GetFileStatus(testfile).GetLen();
             // if file is already full, then pick another file
             if (len >= AppendTestUtil.FileSize)
             {
                 System.Console.Out.WriteLine("File " + testfile + " is full.");
                 continue;
             }
             // do small size appends so that we can trigger multiple
             // appends to the same file.
             //
             int left = (int)(AppendTestUtil.FileSize - len) / 3;
             if (left <= 0)
             {
                 left = 1;
             }
             sizeToAppend = AppendTestUtil.NextInt(left);
             System.Console.Out.WriteLine("Workload thread " + this.id + " appending " + sizeToAppend
                                          + " bytes " + " to file " + testfile + " of size " + len);
             FSDataOutputStream stm = this.appendToNewBlock ? fs.Append(testfile, EnumSet.Of(CreateFlag
                                                                                             .Append, CreateFlag.NewBlock), 4096, null) : fs.Append(testfile);
             stm.Write(this._enclosing.fileContents, (int)len, sizeToAppend);
             stm.Close();
             // wait for the file size to be reflected in the namenode metadata
             while (fs.GetFileStatus(testfile).GetLen() != (len + sizeToAppend))
             {
                 try
                 {
                     System.Console.Out.WriteLine("Workload thread " + this.id + " file " + testfile +
                                                  " size " + fs.GetFileStatus(testfile).GetLen() + " expected size " + (len + sizeToAppend
                                                                                                                        ) + " waiting for namenode metadata update.");
                     Sharpen.Thread.Sleep(5000);
                 }
                 catch (Exception)
                 {
                 }
             }
             NUnit.Framework.Assert.IsTrue("File " + testfile + " size is " + fs.GetFileStatus
                                               (testfile).GetLen() + " but expected " + (len + sizeToAppend), fs.GetFileStatus(
                                               testfile).GetLen() == (len + sizeToAppend));
             AppendTestUtil.CheckFullFile(fs, testfile, (int)(len + sizeToAppend), this._enclosing
                                          .fileContents, "Read 2");
         }
         catch (Exception e)
         {
             TestFileAppend2.globalStatus = false;
             if (e.ToString() != null)
             {
                 System.Console.Out.WriteLine("Workload exception " + this.id + " testfile " + testfile
                                              + " " + e);
                 Sharpen.Runtime.PrintStackTrace(e);
             }
             NUnit.Framework.Assert.IsTrue("Workload exception " + this.id + " testfile " + testfile
                                           + " expected size " + (len + sizeToAppend), false);
         }
         // Add testfile back to the pool of files.
         lock (this._enclosing.testFiles)
         {
             this._enclosing.testFiles.AddItem(testfile);
         }
     }
 }
Esempio n. 27
0
        public virtual void TestSimpleAppend2()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50);
            fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize);
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).Build();
            DistributedFileSystem fs      = cluster.GetFileSystem();

            try
            {
                {
                    // test appending to a file.
                    // create a new file.
                    Path file1             = new Path("/simpleAppend.dat");
                    FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1);
                    System.Console.Out.WriteLine("Created file simpleAppend.dat");
                    // write to file
                    int mid = 186;
                    // io.bytes.per.checksum bytes
                    System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1);
                    stm.Write(fileContents, 0, mid);
                    stm.Close();
                    System.Console.Out.WriteLine("Wrote and Closed first part of file.");
                    // write to file
                    int mid2 = 607;
                    // io.bytes.per.checksum bytes
                    System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1);
                    stm = fs.Append(file1, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock), 4096,
                                    null);
                    stm.Write(fileContents, mid, mid2 - mid);
                    stm.Close();
                    System.Console.Out.WriteLine("Wrote and Closed second part of file.");
                    // write the remainder of the file
                    stm = fs.Append(file1, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock), 4096,
                                    null);
                    // ensure getPos is set to reflect existing size of the file
                    NUnit.Framework.Assert.IsTrue(stm.GetPos() > 0);
                    System.Console.Out.WriteLine("Writing " + (AppendTestUtil.FileSize - mid2) + " bytes to file "
                                                 + file1);
                    stm.Write(fileContents, mid2, AppendTestUtil.FileSize - mid2);
                    System.Console.Out.WriteLine("Written second part of file");
                    stm.Close();
                    System.Console.Out.WriteLine("Wrote and Closed second part of file.");
                    // verify that entire file is good
                    AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2"
                                                 );
                    // also make sure there three different blocks for the file
                    IList <LocatedBlock> blocks = fs.GetClient().GetLocatedBlocks(file1.ToString(), 0L
                                                                                  ).GetLocatedBlocks();
                    NUnit.Framework.Assert.AreEqual(12, blocks.Count);
                    // the block size is 1024
                    NUnit.Framework.Assert.AreEqual(mid, blocks[0].GetBlockSize());
                    NUnit.Framework.Assert.AreEqual(mid2 - mid, blocks[1].GetBlockSize());
                    for (int i = 2; i < 11; i++)
                    {
                        NUnit.Framework.Assert.AreEqual(AppendTestUtil.BlockSize, blocks[i].GetBlockSize(
                                                            ));
                    }
                    NUnit.Framework.Assert.AreEqual((AppendTestUtil.FileSize - mid2) % AppendTestUtil
                                                    .BlockSize, blocks[11].GetBlockSize());
                }
                {
                    // test appending to an non-existing file.
                    FSDataOutputStream @out = null;
                    try
                    {
                        @out = fs.Append(new Path("/non-existing.dat"), EnumSet.Of(CreateFlag.Append, CreateFlag
                                                                                   .NewBlock), 4096, null);
                        NUnit.Framework.Assert.Fail("Expected to have FileNotFoundException");
                    }
                    catch (FileNotFoundException fnfe)
                    {
                        System.Console.Out.WriteLine("Good: got " + fnfe);
                        Sharpen.Runtime.PrintStackTrace(fnfe, System.Console.Out);
                    }
                    finally
                    {
                        IOUtils.CloseStream(@out);
                    }
                }
                {
                    // test append permission.
                    // set root to all writable
                    Path root = new Path("/");
                    fs.SetPermission(root, new FsPermission((short)0x1ff));
                    fs.Close();
                    // login as a different user
                    UserGroupInformation superuser = UserGroupInformation.GetCurrentUser();
                    string username = "******";
                    string group    = "testappendgroup";
                    NUnit.Framework.Assert.IsFalse(superuser.GetShortUserName().Equals(username));
                    NUnit.Framework.Assert.IsFalse(Arrays.AsList(superuser.GetGroupNames()).Contains(
                                                       group));
                    UserGroupInformation appenduser = UserGroupInformation.CreateUserForTesting(username
                                                                                                , new string[] { group });
                    fs = (DistributedFileSystem)DFSTestUtil.GetFileSystemAs(appenduser, conf);
                    // create a file
                    Path dir = new Path(root, GetType().Name);
                    Path foo = new Path(dir, "foo.dat");
                    FSDataOutputStream @out = null;
                    int offset = 0;
                    try
                    {
                        @out = fs.Create(foo);
                        int len = 10 + AppendTestUtil.NextInt(100);
                        @out.Write(fileContents, offset, len);
                        offset += len;
                    }
                    finally
                    {
                        IOUtils.CloseStream(@out);
                    }
                    // change dir and foo to minimal permissions.
                    fs.SetPermission(dir, new FsPermission((short)0x40));
                    fs.SetPermission(foo, new FsPermission((short)0x80));
                    // try append, should success
                    @out = null;
                    try
                    {
                        @out = fs.Append(foo, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock), 4096, null
                                         );
                        int len = 10 + AppendTestUtil.NextInt(100);
                        @out.Write(fileContents, offset, len);
                        offset += len;
                    }
                    finally
                    {
                        IOUtils.CloseStream(@out);
                    }
                    // change dir and foo to all but no write on foo.
                    fs.SetPermission(foo, new FsPermission((short)0x17f));
                    fs.SetPermission(dir, new FsPermission((short)0x1ff));
                    // try append, should fail
                    @out = null;
                    try
                    {
                        @out = fs.Append(foo, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock), 4096, null
                                         );
                        NUnit.Framework.Assert.Fail("Expected to have AccessControlException");
                    }
                    catch (AccessControlException ace)
                    {
                        System.Console.Out.WriteLine("Good: got " + ace);
                        Sharpen.Runtime.PrintStackTrace(ace, System.Console.Out);
                    }
                    finally
                    {
                        IOUtils.CloseStream(@out);
                    }
                }
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
Esempio n. 28
0
        /// <summary>
        /// Test that we cannot read a file beyond its snapshot length
        /// when accessing it via a snapshot path.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotfileLength()
        {
            hdfs.Mkdirs(sub);
            int bytesRead;

            byte[]            buffer     = new byte[Blocksize * 8];
            int               origLen    = Blocksize + 1;
            int               toAppend   = Blocksize;
            FSDataInputStream fis        = null;
            FileStatus        fileStatus = null;
            // Create and write a file.
            Path file1 = new Path(sub, file1Name);

            DFSTestUtil.CreateFile(hdfs, file1, Blocksize, 0, Blocksize, Replication, Seed);
            DFSTestUtil.AppendFile(hdfs, file1, origLen);
            // Create a snapshot on the parent directory.
            hdfs.AllowSnapshot(sub);
            hdfs.CreateSnapshot(sub, snapshot1);
            Path         file1snap1  = SnapshotTestHelper.GetSnapshotPath(sub, snapshot1, file1Name);
            FileChecksum snapChksum1 = hdfs.GetFileChecksum(file1snap1);

            Assert.AssertThat("file and snapshot file checksums are not equal", hdfs.GetFileChecksum
                                  (file1), CoreMatchers.Is(snapChksum1));
            // Append to the file.
            FSDataOutputStream @out = hdfs.Append(file1);

            // Nothing has been appended yet. All checksums should still be equal.
            Assert.AssertThat("file and snapshot checksums (open for append) are not equal",
                              hdfs.GetFileChecksum(file1), CoreMatchers.Is(snapChksum1));
            Assert.AssertThat("snapshot checksum (post-open for append) has changed", hdfs.GetFileChecksum
                                  (file1snap1), CoreMatchers.Is(snapChksum1));
            try
            {
                AppendTestUtil.Write(@out, 0, toAppend);
                // Test reading from snapshot of file that is open for append
                byte[] dataFromSnapshot = DFSTestUtil.ReadFileBuffer(hdfs, file1snap1);
                Assert.AssertThat("Wrong data size in snapshot.", dataFromSnapshot.Length, CoreMatchers.Is
                                      (origLen));
                // Verify that checksum didn't change
                Assert.AssertThat("snapshot file checksum (pre-close) has changed", hdfs.GetFileChecksum
                                      (file1), CoreMatchers.Is(snapChksum1));
                Assert.AssertThat("snapshot checksum (post-append) has changed", hdfs.GetFileChecksum
                                      (file1snap1), CoreMatchers.Is(snapChksum1));
            }
            finally
            {
                @out.Close();
            }
            Assert.AssertThat("file and snapshot file checksums (post-close) are equal", hdfs
                              .GetFileChecksum(file1), CoreMatchers.Not(snapChksum1));
            Assert.AssertThat("snapshot file checksum (post-close) has changed", hdfs.GetFileChecksum
                                  (file1snap1), CoreMatchers.Is(snapChksum1));
            // Make sure we can read the entire file via its non-snapshot path.
            fileStatus = hdfs.GetFileStatus(file1);
            Assert.AssertThat(fileStatus.GetLen(), CoreMatchers.Is((long)origLen + toAppend));
            fis       = hdfs.Open(file1);
            bytesRead = fis.Read(0, buffer, 0, buffer.Length);
            Assert.AssertThat(bytesRead, CoreMatchers.Is(origLen + toAppend));
            fis.Close();
            // Try to open the file via its snapshot path.
            fis        = hdfs.Open(file1snap1);
            fileStatus = hdfs.GetFileStatus(file1snap1);
            Assert.AssertThat(fileStatus.GetLen(), CoreMatchers.Is((long)origLen));
            // Make sure we can only read up to the snapshot length.
            bytesRead = fis.Read(0, buffer, 0, buffer.Length);
            Assert.AssertThat(bytesRead, CoreMatchers.Is(origLen));
            fis.Close();
            byte[] dataFromSnapshot_1 = DFSTestUtil.ReadFileBuffer(hdfs, file1snap1);
            Assert.AssertThat("Wrong data size in snapshot.", dataFromSnapshot_1.Length, CoreMatchers.Is
                                  (origLen));
        }
Esempio n. 29
0
        /// <summary>
        /// Test that an append with no locations fails with an exception
        /// showing insufficient locations.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestAppendInsufficientLocations()
        {
            Configuration conf = new Configuration();

            // lower heartbeat interval for fast recognition of DN
            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 3000);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build();
            DistributedFileSystem fileSystem = null;

            try
            {
                // create a file with replication 3
                fileSystem = cluster.GetFileSystem();
                Path f = new Path("/testAppend");
                FSDataOutputStream create = fileSystem.Create(f, (short)2);
                create.Write(Sharpen.Runtime.GetBytesForString("/testAppend"));
                create.Close();
                // Check for replications
                DFSTestUtil.WaitReplication(fileSystem, f, (short)2);
                // Shut down all DNs that have the last block location for the file
                LocatedBlocks lbs = fileSystem.dfs.GetNamenode().GetBlockLocations("/testAppend",
                                                                                   0, long.MaxValue);
                IList <DataNode> dnsOfCluster     = cluster.GetDataNodes();
                DatanodeInfo[]   dnsWithLocations = lbs.GetLastLocatedBlock().GetLocations();
                foreach (DataNode dn in dnsOfCluster)
                {
                    foreach (DatanodeInfo loc in dnsWithLocations)
                    {
                        if (dn.GetDatanodeId().Equals(loc))
                        {
                            dn.Shutdown();
                            DFSTestUtil.WaitForDatanodeDeath(dn);
                        }
                    }
                }
                // Wait till 0 replication is recognized
                DFSTestUtil.WaitReplication(fileSystem, f, (short)0);
                // Append to the file, at this state there are 3 live DNs but none of them
                // have the block.
                try
                {
                    fileSystem.Append(f);
                    NUnit.Framework.Assert.Fail("Append should fail because insufficient locations");
                }
                catch (IOException e)
                {
                    Log.Info("Expected exception: ", e);
                }
                FSDirectory dir   = cluster.GetNamesystem().GetFSDirectory();
                INodeFile   inode = INodeFile.ValueOf(dir.GetINode("/testAppend"), "/testAppend");
                NUnit.Framework.Assert.IsTrue("File should remain closed", !inode.IsUnderConstruction
                                                  ());
            }
            finally
            {
                if (null != fileSystem)
                {
                    fileSystem.Close();
                }
                cluster.Shutdown();
            }
        }
 /// <exception cref="System.Exception"/>
 public virtual void TestAppend()
 {
     fs.Append(objInSnapshot, 65535, null);
 }