Esempio n. 1
0
        public virtual void TestPipelineRecoveryOnRestartFailure()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsClientDatanodeRestartTimeoutKey, "5");
            MiniDFSCluster cluster = null;

            try
            {
                int numDataNodes = 2;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                FileSystem fileSys = cluster.GetFileSystem();
                Path       file    = new Path("dataprotocol3.dat");
                DFSTestUtil.CreateFile(fileSys, file, 10240L, (short)2, 0L);
                DFSOutputStream @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                @out.Write(1);
                @out.Hflush();
                DFSAdmin dfsadmin = new DFSAdmin(conf);
                DataNode dn       = cluster.GetDataNodes()[0];
                string   dnAddr1  = dn.GetDatanodeId().GetIpcAddr(false);
                // issue shutdown to the datanode.
                string[] args1 = new string[] { "-shutdownDatanode", dnAddr1, "upgrade" };
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(args1));
                Sharpen.Thread.Sleep(4000);
                // This should succeed without restarting the node. The restart will
                // expire and regular pipeline recovery will kick in.
                @out.Close();
                // At this point there is only one node in the cluster.
                @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                @out.Write(1);
                @out.Hflush();
                dn = cluster.GetDataNodes()[1];
                string dnAddr2 = dn.GetDatanodeId().GetIpcAddr(false);
                // issue shutdown to the datanode.
                string[] args2 = new string[] { "-shutdownDatanode", dnAddr2, "upgrade" };
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(args2));
                Sharpen.Thread.Sleep(4000);
                try
                {
                    // close should fail
                    @out.Close();
                    System.Diagnostics.Debug.Assert(false);
                }
                catch (IOException)
                {
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 2
0
 /// <exception cref="System.Exception"/>
 private void TestAppend()
 {
     if (!IsLocalFS())
     {
         FileSystem fs = FileSystem.Get(GetProxiedFSConf());
         fs.Mkdirs(GetProxiedFSTestDir());
         Path         path = new Path(GetProxiedFSTestDir(), "foo.txt");
         OutputStream os   = fs.Create(path);
         os.Write(1);
         os.Close();
         fs.Close();
         fs = GetHttpFSFileSystem();
         os = fs.Append(new Path(path.ToUri().GetPath()));
         os.Write(2);
         os.Close();
         fs.Close();
         fs = FileSystem.Get(GetProxiedFSConf());
         InputStream @is = fs.Open(path);
         NUnit.Framework.Assert.AreEqual(@is.Read(), 1);
         NUnit.Framework.Assert.AreEqual(@is.Read(), 2);
         NUnit.Framework.Assert.AreEqual(@is.Read(), -1);
         @is.Close();
         fs.Close();
     }
 }
Esempio n. 3
0
        /// <summary>
        /// Test which randomly alternates between appending with
        /// CRC32 and with CRC32C, crossing several block boundaries.
        /// </summary>
        /// <remarks>
        /// Test which randomly alternates between appending with
        /// CRC32 and with CRC32C, crossing several block boundaries.
        /// Then, checks that all of the data can be read back correct.
        /// </remarks>
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestAlgoSwitchRandomized()
        {
            FileSystem fsWithCrc32  = CreateFsWithChecksum("CRC32", 512);
            FileSystem fsWithCrc32C = CreateFsWithChecksum("CRC32C", 512);
            Path       p            = new Path("/testAlgoSwitchRandomized");
            long       seed         = Time.Now();

            System.Console.Out.WriteLine("seed: " + seed);
            Random r = new Random(seed);

            // Create empty to start
            IOUtils.CloseStream(fsWithCrc32.Create(p));
            long st  = Time.Now();
            int  len = 0;

            while (Time.Now() - st < RandomTestRuntime)
            {
                int                thisLen = r.Next(500);
                FileSystem         fs      = (r.NextBoolean() ? fsWithCrc32 : fsWithCrc32C);
                FSDataOutputStream stm     = fs.Append(p);
                try
                {
                    AppendTestUtil.Write(stm, len, thisLen);
                }
                finally
                {
                    stm.Close();
                }
                len += thisLen;
            }
            AppendTestUtil.Check(fsWithCrc32, p, len);
            AppendTestUtil.Check(fsWithCrc32C, p, len);
        }
Esempio n. 4
0
 /// <summary>
 /// Test case that stops a writer after finalizing a block but
 /// before calling completeFile, recovers a file from another writer,
 /// starts writing from that writer, and then has the old lease holder
 /// call completeFile
 /// </summary>
 /// <exception cref="System.Exception"/>
 public virtual void TestCompleteOtherLeaseHoldersFile()
 {
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Build();
     try
     {
         cluster.WaitActive();
         NamenodeProtocols preSpyNN = cluster.GetNameNodeRpc();
         NamenodeProtocols spyNN    = Org.Mockito.Mockito.Spy(preSpyNN);
         // Delay completeFile
         GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(Log);
         Org.Mockito.Mockito.DoAnswer(delayer).When(spyNN).Complete(Matchers.AnyString(),
                                                                    Matchers.AnyString(), (ExtendedBlock)Matchers.AnyObject(), Matchers.AnyLong());
         DFSClient client = new DFSClient(null, spyNN, conf, null);
         file1 = new Path("/testCompleteOtherLease");
         OutputStream stm = client.Create("/testCompleteOtherLease", true);
         // write 1/2 block
         AppendTestUtil.Write(stm, 0, 4096);
         AtomicReference <Exception> err = new AtomicReference <Exception>();
         Sharpen.Thread t = new _Thread_242(stm, err);
         t.Start();
         Log.Info("Waiting for close to get to latch...");
         delayer.WaitForCall();
         // At this point, the block is finalized on the DNs, but the file
         // has not been completed in the NN.
         // Lose the leases
         Log.Info("Killing lease checker");
         client.GetLeaseRenewer().InterruptAndJoin();
         FileSystem fs1 = cluster.GetFileSystem();
         FileSystem fs2 = AppendTestUtil.CreateHdfsWithDifferentUsername(fs1.GetConf());
         Log.Info("Recovering file");
         RecoverFile(fs2);
         Log.Info("Opening file for append from new fs");
         FSDataOutputStream appenderStream = fs2.Append(file1);
         Log.Info("Writing some data from new appender");
         AppendTestUtil.Write(appenderStream, 0, 4096);
         Log.Info("Telling old close to proceed.");
         delayer.Proceed();
         Log.Info("Waiting for close to finish.");
         t.Join();
         Log.Info("Close finished.");
         // We expect that close will get a "Lease mismatch"
         // error.
         Exception thrownByClose = err.Get();
         NUnit.Framework.Assert.IsNotNull(thrownByClose);
         NUnit.Framework.Assert.IsTrue(thrownByClose is IOException);
         if (!thrownByClose.Message.Contains("Lease mismatch"))
         {
             throw thrownByClose;
         }
         // The appender should be able to close properly
         appenderStream.Close();
     }
     finally
     {
         cluster.Shutdown();
     }
 }
Esempio n. 5
0
            /// <summary>Executes the filesystem operation.</summary>
            /// <param name="fs">filesystem instance to use.</param>
            /// <returns>void.</returns>
            /// <exception cref="System.IO.IOException">thrown if an IO error occured.</exception>
            public virtual Void Execute(FileSystem fs)
            {
                int          bufferSize = fs.GetConf().GetInt("httpfs.buffer.size", 4096);
                OutputStream os         = fs.Append(path, bufferSize);

                IOUtils.CopyBytes(@is, os, bufferSize, true);
                os.Close();
                return(null);
            }
Esempio n. 6
0
        /// <exception cref="System.IO.IOException"/>
        public override void AppendToFile(Path path, int numBlocks, params Options.CreateOpts
                                          [] options)
        {
            Options.CreateOpts.BlockSize blockSizeOpt = Options.CreateOpts.GetOpt <Options.CreateOpts.BlockSize
                                                                                   >(options);
            long blockSize = blockSizeOpt != null?blockSizeOpt.GetValue() : DefaultBlockSize;

            FSDataOutputStream @out;

            @out = fs.Append(path);
            byte[] data = GetFileData(numBlocks, blockSize);
            @out.Write(data, 0, data.Length);
            @out.Close();
        }
Esempio n. 7
0
        public virtual void TestEarlierVersionEditLog()
        {
            Configuration conf    = new HdfsConfiguration();
            string        tarFile = Runtime.GetProperty("test.cache.data", "build/test/cache") + "/"
                                    + Hadoop10MultiblockTgz;
            string   testDir = PathUtils.GetTestDirName(GetType());
            FilePath dfsDir  = new FilePath(testDir, "image-1.0");

            if (dfsDir.Exists() && !FileUtil.FullyDelete(dfsDir))
            {
                throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
            }
            FileUtil.UnTar(new FilePath(tarFile), new FilePath(testDir));
            FilePath nameDir = new FilePath(dfsDir, "name");

            GenericTestUtils.AssertExists(nameDir);
            FilePath dataDir = new FilePath(dfsDir, "data");

            GenericTestUtils.AssertExists(dataDir);
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
            conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dataDir.GetAbsolutePath());
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(
                false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).NumDataNodes(1).StartupOption
                                         (HdfsServerConstants.StartupOption.Upgrade).Build();

            try
            {
                FileSystem fs       = cluster.GetFileSystem();
                Path       testPath = new Path("/user/todd/4blocks");
                // Read it without caring about the actual data within - we just need
                // to make sure that the block states and locations are OK.
                DFSTestUtil.ReadFile(fs, testPath);
                // Ensure that we can append to it - if the blocks were in some funny
                // state we'd get some kind of issue here.
                FSDataOutputStream stm = fs.Append(testPath);
                try
                {
                    stm.Write(1);
                }
                finally
                {
                    IOUtils.CloseStream(stm);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Esempio n. 8
0
        /// <exception cref="System.IO.IOException"/>
        private static void WriteTestDataToFile(FileSystem fs)
        {
            OutputStream @out = null;

            if (!fs.Exists(TestPath))
            {
                @out = fs.Create(TestPath);
            }
            else
            {
                @out = fs.Append(TestPath);
            }
            @out.Write(Sharpen.Runtime.GetBytesForString(PlainText));
            @out.Close();
        }
Esempio n. 9
0
        public virtual void TestAppendWhileInSafeMode()
        {
            Banner("Starting with NN0 active and NN1 standby, creating some blocks");
            // Make 4.5 blocks so that append() will re-open an existing block
            // instead of just adding a new one
            DFSTestUtil.CreateFile(fs, new Path("/test"), 4 * BlockSize + BlockSize / 2, (short
                                                                                          )3, 1L);
            // Roll edit log so that, when the SBN restarts, it will load
            // the namespace during startup.
            nn0.GetRpcServer().RollEditLog();
            Banner("Restarting standby");
            RestartStandby();
            // It will initially have all of the blocks necessary.
            AssertSafeMode(nn1, 5, 5, 3, 0);
            // Append to a block while SBN is in safe mode. This should
            // not affect safemode initially, since the DN message
            // will get queued.
            FSDataOutputStream stm = fs.Append(new Path("/test"));

            try
            {
                AssertSafeMode(nn1, 5, 5, 3, 0);
                // if we roll edits now, the SBN should see that it's under construction
                // and change its total count and safe count down by one, since UC
                // blocks are not counted by safe mode.
                HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
                AssertSafeMode(nn1, 4, 4, 3, 0);
            }
            finally
            {
                IOUtils.CloseStream(stm);
            }
            // Delete those blocks while the SBN is in safe mode.
            // This will not ACK the deletions to the SBN, so it won't
            // notice until we roll the edit log.
            Banner("Removing the blocks without rolling the edit log");
            fs.Delete(new Path("/test"), true);
            BlockManagerTestUtil.ComputeAllPendingWork(nn0.GetNamesystem().GetBlockManager());
            Banner("Triggering deletions on DNs and Deletion Reports");
            cluster.TriggerHeartbeats();
            HATestUtil.WaitForDNDeletions(cluster);
            cluster.TriggerDeletionReports();
            AssertSafeMode(nn1, 4, 4, 3, 0);
            // When we roll the edit log, the deletions will go through.
            Banner("Waiting for standby to catch up to active namespace");
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            AssertSafeMode(nn1, 0, 0, 3, 0);
        }
Esempio n. 10
0
        public virtual void TestRestartWithAppend()
        {
            Configuration conf = new HdfsConfiguration();

            // Turn off persistent IPC, so that the DFSClient can survive NN restart
            conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectionMaxidletimeKey, 0);
            MiniDFSCluster     cluster = null;
            FSDataOutputStream stream;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
                FileSystem fs = cluster.GetFileSystem();
                NameNode.GetAddress(conf).Port;
                // Creating a file with 4096 blockSize to write multiple blocks
                stream = fs.Create(FilePath, true, BlockSize, (short)1, BlockSize);
                stream.Write(DataBeforeRestart, 0, DataBeforeRestart.Length / 2);
                stream.Close();
                stream = fs.Append(FilePath, BlockSize);
                stream.Write(DataBeforeRestart, DataBeforeRestart.Length / 2, DataBeforeRestart.Length
                             / 2);
                stream.Close();
                NUnit.Framework.Assert.AreEqual(DataBeforeRestart.Length, fs.GetFileStatus(FilePath
                                                                                           ).GetLen());
                cluster.RestartNameNode();
                NUnit.Framework.Assert.AreEqual(DataBeforeRestart.Length, fs.GetFileStatus(FilePath
                                                                                           ).GetLen());
                FSDataInputStream readStream = fs.Open(FilePath);
                try
                {
                    byte[] verifyBuf = new byte[DataBeforeRestart.Length];
                    IOUtils.ReadFully(readStream, verifyBuf, 0, verifyBuf.Length);
                    Assert.AssertArrayEquals(DataBeforeRestart, verifyBuf);
                }
                finally
                {
                    IOUtils.CloseStream(readStream);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 11
0
        /// <summary>
        /// Test that quotas are properly tracked by the standby through
        /// create, append, delete.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotasTrackedOnStandby()
        {
            fs.Mkdirs(TestDir);
            DistributedFileSystem dfs = (DistributedFileSystem)fs;

            dfs.SetQuota(TestDir, NsQuota, DsQuota);
            long expectedSize = 3 * BlockSize + BlockSize / 2;

            DFSTestUtil.CreateFile(fs, TestFile, expectedSize, (short)1, 1L);
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            ContentSummary cs = nn1.GetRpcServer().GetContentSummary(TestDirStr);

            NUnit.Framework.Assert.AreEqual(NsQuota, cs.GetQuota());
            NUnit.Framework.Assert.AreEqual(DsQuota, cs.GetSpaceQuota());
            NUnit.Framework.Assert.AreEqual(expectedSize, cs.GetSpaceConsumed());
            NUnit.Framework.Assert.AreEqual(1, cs.GetDirectoryCount());
            NUnit.Framework.Assert.AreEqual(1, cs.GetFileCount());
            // Append to the file and make sure quota is updated correctly.
            FSDataOutputStream stm = fs.Append(TestFile);

            try
            {
                byte[] data = new byte[(int)(BlockSize * 3 / 2)];
                stm.Write(data);
                expectedSize += data.Length;
            }
            finally
            {
                IOUtils.CloseStream(stm);
            }
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            cs = nn1.GetRpcServer().GetContentSummary(TestDirStr);
            NUnit.Framework.Assert.AreEqual(NsQuota, cs.GetQuota());
            NUnit.Framework.Assert.AreEqual(DsQuota, cs.GetSpaceQuota());
            NUnit.Framework.Assert.AreEqual(expectedSize, cs.GetSpaceConsumed());
            NUnit.Framework.Assert.AreEqual(1, cs.GetDirectoryCount());
            NUnit.Framework.Assert.AreEqual(1, cs.GetFileCount());
            fs.Delete(TestFile, true);
            expectedSize = 0;
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            cs = nn1.GetRpcServer().GetContentSummary(TestDirStr);
            NUnit.Framework.Assert.AreEqual(NsQuota, cs.GetQuota());
            NUnit.Framework.Assert.AreEqual(DsQuota, cs.GetSpaceQuota());
            NUnit.Framework.Assert.AreEqual(expectedSize, cs.GetSpaceConsumed());
            NUnit.Framework.Assert.AreEqual(1, cs.GetDirectoryCount());
            NUnit.Framework.Assert.AreEqual(0, cs.GetFileCount());
        }
Esempio n. 12
0
            public async Task WhenFileAndPathValid_ExpectFileWritten()
            {
                //---------------Arrange-------------------
                var path     = Path.GetTempPath();
                var fileName = Guid.NewGuid() + ".csv";
                var document = Create_CsvFile(fileName);

                var sut = new FileSystem();
                //---------------Act----------------------
                var result = await sut.Append(path, document);

                //---------------Assert-----------------------
                var fileWritten = File.Exists(result.FullFilePath);

                result.HadError.Should().BeFalse();
                fileWritten.Should().BeTrue();
            }
Esempio n. 13
0
        /// <summary>FileNotFoundException is expected for appending to a non-exisiting file</summary>
        /// <exception cref="System.IO.FileNotFoundException">as the result</exception>
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestFileNotFound()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            try
            {
                Path file1 = new Path("/nonexistingfile.dat");
                fs.Append(file1);
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
Esempio n. 14
0
 /// <exception cref="System.IO.IOException"/>
 public static void TestAppend(FileSystem fs, Path p)
 {
     byte[] bytes = new byte[1000];
     {
         //create file
         FSDataOutputStream @out = fs.Create(p, (short)1);
         @out.Write(bytes);
         @out.Close();
         NUnit.Framework.Assert.AreEqual(bytes.Length, fs.GetFileStatus(p).GetLen());
     }
     for (int i = 2; i < 500; i++)
     {
         //append
         FSDataOutputStream @out = fs.Append(p);
         @out.Write(bytes);
         @out.Close();
         NUnit.Framework.Assert.AreEqual(i * bytes.Length, fs.GetFileStatus(p).GetLen());
     }
 }
Esempio n. 15
0
        /// <summary>
        /// Internal method to actually write to the logs
        /// </summary>
        /// <param name="lines">Text lines to be written</param>
        /// <returns>Awaitable task</returns>
        private async Task Log(IEnumerable <string> lines)
        {
            try
            {
                await Semaphore.WaitAsync();

                if (!SessionId.HasValue)
                {
                    SessionId = Time;
                    await FileSystem.Create(SessionId.Value, FormattedLine("Created"));
                }

                await FileSystem.Append(SessionId.Value, lines.Select(x => FormattedLine(x)));
            }
            finally
            {
                Semaphore.Release();
            }
        }
Esempio n. 16
0
        /*
         * Recover file.
         * Try and open file in append mode.
         * Doing this, we get a hold of the file that crashed writer
         * was writing to.  Once we have it, close it.  This will
         * allow subsequent reader to see up to last sync.
         * NOTE: This is the same algorithm that HBase uses for file recovery
         * @param fs
         * @throws Exception
         */
        /// <exception cref="System.Exception"/>
        private void RecoverFile(FileSystem fs)
        {
            Log.Info("Recovering File Lease");
            // set the soft limit to be 1 second so that the
            // namenode triggers lease recovery upon append request
            cluster.SetLeasePeriod(1000, HdfsConstants.LeaseHardlimitPeriod);
            // Trying recovery
            int  tries              = 60;
            bool recovered          = false;
            FSDataOutputStream @out = null;

            while (!recovered && tries-- > 0)
            {
                try
                {
                    @out = fs.Append(file1);
                    Log.Info("Successfully opened for append");
                    recovered = true;
                }
                catch (IOException)
                {
                    Log.Info("Failed open for append, waiting on lease recovery");
                    try
                    {
                        Sharpen.Thread.Sleep(1000);
                    }
                    catch (Exception)
                    {
                    }
                }
            }
            // ignore it and try again
            if (@out != null)
            {
                @out.Close();
            }
            if (!recovered)
            {
                NUnit.Framework.Assert.Fail("Recovery should take < 1 min");
            }
            Log.Info("Past out lease recovery");
        }
Esempio n. 17
0
        /// <exception cref="System.IO.IOException"/>
        private void AppendWithTwoFs(Path p, FileSystem fs1, FileSystem fs2)
        {
            FSDataOutputStream stm = fs1.Create(p);

            try
            {
                AppendTestUtil.Write(stm, 0, SegmentLength);
            }
            finally
            {
                stm.Close();
            }
            stm = fs2.Append(p);
            try
            {
                AppendTestUtil.Write(stm, SegmentLength, SegmentLength);
            }
            finally
            {
                stm.Close();
            }
        }
Esempio n. 18
0
        public virtual void TestAppendTwice()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs1     = cluster.GetFileSystem();
            FileSystem     fs2     = AppendTestUtil.CreateHdfsWithDifferentUsername(conf);

            try
            {
                Path   p            = new Path("/testAppendTwice/foo");
                int    len          = 1 << 16;
                byte[] fileContents = AppendTestUtil.InitBuffer(len);
                {
                    // create a new file with a full block.
                    FSDataOutputStream @out = fs2.Create(p, true, 4096, (short)1, len);
                    @out.Write(fileContents, 0, len);
                    @out.Close();
                }
                //1st append does not add any data so that the last block remains full
                //and the last block in INodeFileUnderConstruction is a BlockInfo
                //but not BlockInfoUnderConstruction.
                fs2.Append(p);
                //2nd append should get AlreadyBeingCreatedException
                fs1.Append(p);
                NUnit.Framework.Assert.Fail();
            }
            catch (RemoteException re)
            {
                AppendTestUtil.Log.Info("Got an exception:", re);
                NUnit.Framework.Assert.AreEqual(typeof(AlreadyBeingCreatedException).FullName, re
                                                .GetClassName());
            }
            finally
            {
                fs2.Close();
                fs1.Close();
                cluster.Shutdown();
            }
        }
Esempio n. 19
0
        /// <exception cref="System.IO.IOException"/>
        private void WriteAndAppend(FileSystem fs, Path p, int lengthForCreate, int lengthForAppend
                                    )
        {
            // Creating a file with 4096 blockSize to write multiple blocks
            FSDataOutputStream stream = fs.Create(p, true, BlockSize, (short)1, BlockSize);

            try
            {
                AppendTestUtil.Write(stream, 0, lengthForCreate);
                stream.Close();
                stream = fs.Append(p);
                AppendTestUtil.Write(stream, lengthForCreate, lengthForAppend);
                stream.Close();
            }
            finally
            {
                IOUtils.CloseStream(stream);
            }
            int totalLength = lengthForCreate + lengthForAppend;

            NUnit.Framework.Assert.AreEqual(totalLength, fs.GetFileStatus(p).GetLen());
        }
Esempio n. 20
0
        public virtual void TestPipelineRecoveryOnOOB()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsClientDatanodeRestartTimeoutKey, "15");
            MiniDFSCluster cluster = null;

            try
            {
                int numDataNodes = 1;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                FileSystem fileSys = cluster.GetFileSystem();
                Path       file    = new Path("dataprotocol2.dat");
                DFSTestUtil.CreateFile(fileSys, file, 10240L, (short)1, 0L);
                DFSOutputStream @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                @out.Write(1);
                @out.Hflush();
                DFSAdmin dfsadmin = new DFSAdmin(conf);
                DataNode dn       = cluster.GetDataNodes()[0];
                string   dnAddr   = dn.GetDatanodeId().GetIpcAddr(false);
                // issue shutdown to the datanode.
                string[] args1 = new string[] { "-shutdownDatanode", dnAddr, "upgrade" };
                NUnit.Framework.Assert.AreEqual(0, dfsadmin.Run(args1));
                // Wait long enough to receive an OOB ack before closing the file.
                Sharpen.Thread.Sleep(4000);
                // Retart the datanode
                cluster.RestartDataNode(0, true);
                // The following forces a data packet and end of block packets to be sent.
                @out.Close();
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 21
0
        /// <summary>
        /// Ensure that even if a file is in a directory with the sticky bit on,
        /// another user can write to that file (assuming correct permissions).
        /// </summary>
        /// <exception cref="System.Exception"/>
        private void ConfirmCanAppend(Configuration conf, Path p)
        {
            // Write a file to the new tmp directory as a regular user
            Path file = new Path(p, "foo");

            WriteFile(hdfsAsUser1, file);
            hdfsAsUser1.SetPermission(file, new FsPermission((short)0x1ff));
            // Log onto cluster as another user and attempt to append to file
            Path file2           = new Path(p, "foo");
            FSDataOutputStream h = null;

            try
            {
                h = hdfsAsUser2.Append(file2);
                h.Write(Sharpen.Runtime.GetBytesForString("Some more data"));
                h.Close();
                h = null;
            }
            finally
            {
                IOUtils.Cleanup(null, h);
            }
        }
Esempio n. 22
0
 /// <summary>Try openning a file for append.</summary>
 /// <exception cref="System.Exception"/>
 private static FSDataOutputStream Append(FileSystem fs, Path p)
 {
     for (int i = 0; i < 10; i++)
     {
         try
         {
             return(fs.Append(p));
         }
         catch (RemoteException re)
         {
             if (re.GetClassName().Equals(typeof(RecoveryInProgressException).FullName))
             {
                 AppendTestUtil.Log.Info("Will sleep and retry, i=" + i + ", p=" + p, re);
                 Sharpen.Thread.Sleep(1000);
             }
             else
             {
                 throw;
             }
         }
     }
     throw new IOException("Cannot append to " + p);
 }
Esempio n. 23
0
        /// <summary>Common routine to do position read while open the file for write.</summary>
        /// <remarks>
        /// Common routine to do position read while open the file for write.
        /// After each iteration of write, do a read of the file from begin to end.
        /// Return 0 on success, else number of failure.
        /// </remarks>
        /// <exception cref="System.IO.IOException"/>
        private int TestWriteAndRead(string fname, int loopN, int chunkSize, long readBeginPosition
                                     )
        {
            int  countOfFailures    = 0;
            long byteVisibleToRead  = 0;
            FSDataOutputStream @out = null;

            byte[] outBuffer = new byte[BufferSize];
            byte[] inBuffer  = new byte[BufferSize];
            for (int i = 0; i < BufferSize; i++)
            {
                outBuffer[i] = unchecked ((byte)(i & unchecked ((int)(0x00ff))));
            }
            try
            {
                Path path = GetFullyQualifiedPath(fname);
                long fileLengthBeforeOpen = 0;
                if (IfExists(path))
                {
                    if (truncateOption)
                    {
                        @out = useFCOption ? mfc.Create(path, EnumSet.Of(CreateFlag.Overwrite)) : mfs.Create
                                   (path, truncateOption);
                        Log.Info("File already exists. File open with Truncate mode: " + path);
                    }
                    else
                    {
                        @out = useFCOption ? mfc.Create(path, EnumSet.Of(CreateFlag.Append)) : mfs.Append
                                   (path);
                        fileLengthBeforeOpen = GetFileLengthFromNN(path);
                        Log.Info("File already exists of size " + fileLengthBeforeOpen + " File open for Append mode: "
                                 + path);
                    }
                }
                else
                {
                    @out = useFCOption ? mfc.Create(path, EnumSet.Of(CreateFlag.Create)) : mfs.Create
                               (path);
                }
                long totalByteWritten = fileLengthBeforeOpen;
                long totalByteVisible = fileLengthBeforeOpen;
                long totalByteWrittenButNotVisible = 0;
                bool toFlush;
                for (int i_1 = 0; i_1 < loopN; i_1++)
                {
                    toFlush = (i_1 % 2) == 0;
                    WriteData(@out, outBuffer, chunkSize);
                    totalByteWritten += chunkSize;
                    if (toFlush)
                    {
                        @out.Hflush();
                        totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
                        totalByteWrittenButNotVisible = 0;
                    }
                    else
                    {
                        totalByteWrittenButNotVisible += chunkSize;
                    }
                    if (verboseOption)
                    {
                        Log.Info("TestReadWrite - Written " + chunkSize + ". Total written = " + totalByteWritten
                                 + ". TotalByteVisible = " + totalByteVisible + " to file " + fname);
                    }
                    byteVisibleToRead = ReadData(fname, inBuffer, totalByteVisible, readBeginPosition
                                                 );
                    string readmsg = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible
                                     + " ; Got Visible=" + byteVisibleToRead + " of file " + fname;
                    if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten)
                    {
                        readmsg = "pass: reader sees expected number of visible byte. " + readmsg + " [pass]";
                    }
                    else
                    {
                        countOfFailures++;
                        readmsg = "fail: reader see different number of visible byte. " + readmsg + " [fail]";
                        throw new IOException(readmsg);
                    }
                    Log.Info(readmsg);
                }
                // test the automatic flush after close
                WriteData(@out, outBuffer, chunkSize);
                totalByteWritten += chunkSize;
                totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
                totalByteWrittenButNotVisible += 0;
                @out.Close();
                byteVisibleToRead = ReadData(fname, inBuffer, totalByteVisible, readBeginPosition
                                             );
                string readmsg2 = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible
                                  + " ; Got Visible=" + byteVisibleToRead + " of file " + fname;
                string readmsg_1;
                if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten)
                {
                    readmsg_1 = "pass: reader sees expected number of visible byte on close. " + readmsg2
                                + " [pass]";
                }
                else
                {
                    countOfFailures++;
                    readmsg_1 = "fail: reader sees different number of visible byte on close. " + readmsg2
                                + " [fail]";
                    Log.Info(readmsg_1);
                    throw new IOException(readmsg_1);
                }
                // now check if NN got the same length
                long lenFromFc = GetFileLengthFromNN(path);
                if (lenFromFc != byteVisibleToRead)
                {
                    readmsg_1 = "fail: reader sees different number of visible byte from NN " + readmsg2
                                + " [fail]";
                    throw new IOException(readmsg_1);
                }
            }
            catch (IOException e)
            {
                throw new IOException("##### Caught Exception in testAppendWriteAndRead. Close file. "
                                      + "Total Byte Read so far = " + byteVisibleToRead, e);
            }
            finally
            {
                if (@out != null)
                {
                    @out.Close();
                }
            }
            return(-countOfFailures);
        }
Esempio n. 24
0
        public virtual void TestGetNewStamp()
        {
            int            numDataNodes = 1;
            Configuration  conf         = new HdfsConfiguration();
            MiniDFSCluster cluster      = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes
                                                                                        ).Build();

            try
            {
                cluster.WaitActive();
                FileSystem        fileSys  = cluster.GetFileSystem();
                NamenodeProtocols namenode = cluster.GetNameNodeRpc();
                /* Test writing to finalized replicas */
                Path file = new Path("dataprotocol.dat");
                DFSTestUtil.CreateFile(fileSys, file, 1L, (short)numDataNodes, 0L);
                // get the first blockid for the file
                ExtendedBlock firstBlock = DFSTestUtil.GetFirstBlock(fileSys, file);
                // test getNewStampAndToken on a finalized block
                try
                {
                    namenode.UpdateBlockForPipeline(firstBlock, string.Empty);
                    NUnit.Framework.Assert.Fail("Can not get a new GS from a finalized block");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains("is not under Construction"));
                }
                // test getNewStampAndToken on a non-existent block
                try
                {
                    long          newBlockId = firstBlock.GetBlockId() + 1;
                    ExtendedBlock newBlock   = new ExtendedBlock(firstBlock.GetBlockPoolId(), newBlockId
                                                                 , 0, firstBlock.GetGenerationStamp());
                    namenode.UpdateBlockForPipeline(newBlock, string.Empty);
                    NUnit.Framework.Assert.Fail("Cannot get a new GS from a non-existent block");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains("does not exist"));
                }
                /* Test RBW replicas */
                // change first block to a RBW
                DFSOutputStream @out = null;
                try
                {
                    @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                    @out.Write(1);
                    @out.Hflush();
                    FSDataInputStream @in = null;
                    try
                    {
                        @in        = fileSys.Open(file);
                        firstBlock = DFSTestUtil.GetAllBlocks(@in)[0].GetBlock();
                    }
                    finally
                    {
                        IOUtils.CloseStream(@in);
                    }
                    // test non-lease holder
                    DFSClient dfs = ((DistributedFileSystem)fileSys).dfs;
                    try
                    {
                        namenode.UpdateBlockForPipeline(firstBlock, "test" + dfs.clientName);
                        NUnit.Framework.Assert.Fail("Cannot get a new GS for a non lease holder");
                    }
                    catch (LeaseExpiredException e)
                    {
                        NUnit.Framework.Assert.IsTrue(e.Message.StartsWith("Lease mismatch"));
                    }
                    // test null lease holder
                    try
                    {
                        namenode.UpdateBlockForPipeline(firstBlock, null);
                        NUnit.Framework.Assert.Fail("Cannot get a new GS for a null lease holder");
                    }
                    catch (LeaseExpiredException e)
                    {
                        NUnit.Framework.Assert.IsTrue(e.Message.StartsWith("Lease mismatch"));
                    }
                    // test getNewStampAndToken on a rbw block
                    namenode.UpdateBlockForPipeline(firstBlock, dfs.clientName);
                }
                finally
                {
                    IOUtils.CloseStream(@out);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Esempio n. 25
0
 /// <exception cref="System.IO.IOException"/>
 public override FSDataOutputStream Append(Path f, int bufferSize, Progressable progress
                                           )
 {
     return(fs.Append(f, bufferSize, progress));
 }
Esempio n. 26
0
        public virtual void TestOpWrite()
        {
            int            numDataNodes = 1;
            long           BlockIdFudge = 128;
            Configuration  conf         = new HdfsConfiguration();
            MiniDFSCluster cluster      = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes
                                                                                        ).Build();

            try
            {
                cluster.WaitActive();
                string poolId = cluster.GetNamesystem().GetBlockPoolId();
                datanode = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes()[0], poolId
                                                                    );
                dnAddr = NetUtils.CreateSocketAddr(datanode.GetXferAddr());
                FileSystem fileSys = cluster.GetFileSystem();
                /* Test writing to finalized replicas */
                Path file = new Path("dataprotocol.dat");
                DFSTestUtil.CreateFile(fileSys, file, 1L, (short)numDataNodes, 0L);
                // get the first blockid for the file
                ExtendedBlock firstBlock = DFSTestUtil.GetFirstBlock(fileSys, file);
                // test PIPELINE_SETUP_CREATE on a finalized block
                TestWrite(firstBlock, BlockConstructionStage.PipelineSetupCreate, 0L, "Cannot create an existing block"
                          , true);
                // test PIPELINE_DATA_STREAMING on a finalized block
                TestWrite(firstBlock, BlockConstructionStage.DataStreaming, 0L, "Unexpected stage"
                          , true);
                // test PIPELINE_SETUP_STREAMING_RECOVERY on an existing block
                long newGS = firstBlock.GetGenerationStamp() + 1;
                TestWrite(firstBlock, BlockConstructionStage.PipelineSetupStreamingRecovery, newGS
                          , "Cannot recover data streaming to a finalized replica", true);
                // test PIPELINE_SETUP_APPEND on an existing block
                newGS = firstBlock.GetGenerationStamp() + 1;
                TestWrite(firstBlock, BlockConstructionStage.PipelineSetupAppend, newGS, "Append to a finalized replica"
                          , false);
                firstBlock.SetGenerationStamp(newGS);
                // test PIPELINE_SETUP_APPEND_RECOVERY on an existing block
                file = new Path("dataprotocol1.dat");
                DFSTestUtil.CreateFile(fileSys, file, 1L, (short)numDataNodes, 0L);
                firstBlock = DFSTestUtil.GetFirstBlock(fileSys, file);
                newGS      = firstBlock.GetGenerationStamp() + 1;
                TestWrite(firstBlock, BlockConstructionStage.PipelineSetupAppendRecovery, newGS,
                          "Recover appending to a finalized replica", false);
                // test PIPELINE_CLOSE_RECOVERY on an existing block
                file = new Path("dataprotocol2.dat");
                DFSTestUtil.CreateFile(fileSys, file, 1L, (short)numDataNodes, 0L);
                firstBlock = DFSTestUtil.GetFirstBlock(fileSys, file);
                newGS      = firstBlock.GetGenerationStamp() + 1;
                TestWrite(firstBlock, BlockConstructionStage.PipelineCloseRecovery, newGS, "Recover failed close to a finalized replica"
                          , false);
                firstBlock.SetGenerationStamp(newGS);
                // Test writing to a new block. Don't choose the next sequential
                // block ID to avoid conflicting with IDs chosen by the NN.
                long          newBlockId = firstBlock.GetBlockId() + BlockIdFudge;
                ExtendedBlock newBlock   = new ExtendedBlock(firstBlock.GetBlockPoolId(), newBlockId
                                                             , 0, firstBlock.GetGenerationStamp());
                // test PIPELINE_SETUP_CREATE on a new block
                TestWrite(newBlock, BlockConstructionStage.PipelineSetupCreate, 0L, "Create a new block"
                          , false);
                // test PIPELINE_SETUP_STREAMING_RECOVERY on a new block
                newGS = newBlock.GetGenerationStamp() + 1;
                newBlock.SetBlockId(newBlock.GetBlockId() + 1);
                TestWrite(newBlock, BlockConstructionStage.PipelineSetupStreamingRecovery, newGS,
                          "Recover a new block", true);
                // test PIPELINE_SETUP_APPEND on a new block
                newGS = newBlock.GetGenerationStamp() + 1;
                TestWrite(newBlock, BlockConstructionStage.PipelineSetupAppend, newGS, "Cannot append to a new block"
                          , true);
                // test PIPELINE_SETUP_APPEND_RECOVERY on a new block
                newBlock.SetBlockId(newBlock.GetBlockId() + 1);
                newGS = newBlock.GetGenerationStamp() + 1;
                TestWrite(newBlock, BlockConstructionStage.PipelineSetupAppendRecovery, newGS, "Cannot append to a new block"
                          , true);
                /* Test writing to RBW replicas */
                Path file1 = new Path("dataprotocol1.dat");
                DFSTestUtil.CreateFile(fileSys, file1, 1L, (short)numDataNodes, 0L);
                DFSOutputStream @out = (DFSOutputStream)(fileSys.Append(file1).GetWrappedStream()
                                                         );
                @out.Write(1);
                @out.Hflush();
                FSDataInputStream @in = fileSys.Open(file1);
                firstBlock = DFSTestUtil.GetAllBlocks(@in)[0].GetBlock();
                firstBlock.SetNumBytes(2L);
                try
                {
                    // test PIPELINE_SETUP_CREATE on a RBW block
                    TestWrite(firstBlock, BlockConstructionStage.PipelineSetupCreate, 0L, "Cannot create a RBW block"
                              , true);
                    // test PIPELINE_SETUP_APPEND on an existing block
                    newGS = firstBlock.GetGenerationStamp() + 1;
                    TestWrite(firstBlock, BlockConstructionStage.PipelineSetupAppend, newGS, "Cannot append to a RBW replica"
                              , true);
                    // test PIPELINE_SETUP_APPEND on an existing block
                    TestWrite(firstBlock, BlockConstructionStage.PipelineSetupAppendRecovery, newGS,
                              "Recover append to a RBW replica", false);
                    firstBlock.SetGenerationStamp(newGS);
                    // test PIPELINE_SETUP_STREAMING_RECOVERY on a RBW block
                    file = new Path("dataprotocol2.dat");
                    DFSTestUtil.CreateFile(fileSys, file, 1L, (short)numDataNodes, 0L);
                    @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                    @out.Write(1);
                    @out.Hflush();
                    @in        = fileSys.Open(file);
                    firstBlock = DFSTestUtil.GetAllBlocks(@in)[0].GetBlock();
                    firstBlock.SetNumBytes(2L);
                    newGS = firstBlock.GetGenerationStamp() + 1;
                    TestWrite(firstBlock, BlockConstructionStage.PipelineSetupStreamingRecovery, newGS
                              , "Recover a RBW replica", false);
                }
                finally
                {
                    IOUtils.CloseStream(@in);
                    IOUtils.CloseStream(@out);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Esempio n. 27
0
        internal override IList <OperationOutput> Run(FileSystem fs)
        {
            // Operation
            IList <OperationOutput> @out = base.Run(fs);
            OutputStream            os   = null;

            try
            {
                Path fn = GetAppendFile();
                // determine file status for file length requirement
                // to know if should fill in partial bytes
                Range <long> appendSizeRange = GetConfig().GetAppendSize();
                if (GetConfig().ShouldAppendUseBlockSize())
                {
                    appendSizeRange = GetConfig().GetBlockSize();
                }
                long       appendSize    = Range.BetweenPositive(GetRandom(), appendSizeRange);
                long       timeTaken     = 0;
                long       bytesAppended = 0;
                DataWriter writer        = new DataWriter(GetRandom());
                Log.Info("Attempting to append to file at " + fn + " of size " + Helper.ToByteInfo
                             (appendSize));
                {
                    // open
                    long startTime = Timer.Now();
                    os         = fs.Append(fn);
                    timeTaken += Timer.Elapsed(startTime);
                    // append given length
                    DataWriter.GenerateOutput stats = writer.WriteSegment(appendSize, os);
                    timeTaken     += stats.GetTimeTaken();
                    bytesAppended += stats.GetBytesWritten();
                    // capture close time
                    startTime = Timer.Now();
                    os.Close();
                    os         = null;
                    timeTaken += Timer.Elapsed(startTime);
                }
                @out.AddItem(new OperationOutput(OperationOutput.OutputType.Long, GetType(), ReportWriter
                                                 .BytesWritten, bytesAppended));
                @out.AddItem(new OperationOutput(OperationOutput.OutputType.Long, GetType(), ReportWriter
                                                 .OkTimeTaken, timeTaken));
                @out.AddItem(new OperationOutput(OperationOutput.OutputType.Long, GetType(), ReportWriter
                                                 .Successes, 1L));
                Log.Info("Appended " + Helper.ToByteInfo(bytesAppended) + " to file " + fn + " in "
                         + timeTaken + " milliseconds");
            }
            catch (FileNotFoundException e)
            {
                @out.AddItem(new OperationOutput(OperationOutput.OutputType.Long, GetType(), ReportWriter
                                                 .NotFound, 1L));
                Log.Warn("Error with appending", e);
            }
            catch (IOException e)
            {
                @out.AddItem(new OperationOutput(OperationOutput.OutputType.Long, GetType(), ReportWriter
                                                 .Failures, 1L));
                Log.Warn("Error with appending", e);
            }
            finally
            {
                if (os != null)
                {
                    try
                    {
                        os.Close();
                    }
                    catch (IOException e)
                    {
                        Log.Warn("Error with closing append stream", e);
                    }
                }
            }
            return(@out);
        }
Esempio n. 28
0
        public virtual void TestSimpleAppend()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50);
            fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            try
            {
                {
                    // test appending to a file.
                    // create a new file.
                    Path file1             = new Path("/simpleAppend.dat");
                    FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1);
                    System.Console.Out.WriteLine("Created file simpleAppend.dat");
                    // write to file
                    int mid = 186;
                    // io.bytes.per.checksum bytes
                    System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1);
                    stm.Write(fileContents, 0, mid);
                    stm.Close();
                    System.Console.Out.WriteLine("Wrote and Closed first part of file.");
                    // write to file
                    int mid2 = 607;
                    // io.bytes.per.checksum bytes
                    System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1);
                    stm = fs.Append(file1);
                    stm.Write(fileContents, mid, mid2 - mid);
                    stm.Close();
                    System.Console.Out.WriteLine("Wrote and Closed second part of file.");
                    // write the remainder of the file
                    stm = fs.Append(file1);
                    // ensure getPos is set to reflect existing size of the file
                    NUnit.Framework.Assert.IsTrue(stm.GetPos() > 0);
                    System.Console.Out.WriteLine("Writing " + (AppendTestUtil.FileSize - mid2) + " bytes to file "
                                                 + file1);
                    stm.Write(fileContents, mid2, AppendTestUtil.FileSize - mid2);
                    System.Console.Out.WriteLine("Written second part of file");
                    stm.Close();
                    System.Console.Out.WriteLine("Wrote and Closed second part of file.");
                    // verify that entire file is good
                    AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2"
                                                 );
                }
                {
                    // test appending to an non-existing file.
                    FSDataOutputStream @out = null;
                    try
                    {
                        @out = fs.Append(new Path("/non-existing.dat"));
                        NUnit.Framework.Assert.Fail("Expected to have FileNotFoundException");
                    }
                    catch (FileNotFoundException fnfe)
                    {
                        System.Console.Out.WriteLine("Good: got " + fnfe);
                        Sharpen.Runtime.PrintStackTrace(fnfe, System.Console.Out);
                    }
                    finally
                    {
                        IOUtils.CloseStream(@out);
                    }
                }
                {
                    // test append permission.
                    //set root to all writable
                    Path root = new Path("/");
                    fs.SetPermission(root, new FsPermission((short)0x1ff));
                    fs.Close();
                    // login as a different user
                    UserGroupInformation superuser = UserGroupInformation.GetCurrentUser();
                    string username = "******";
                    string group    = "testappendgroup";
                    NUnit.Framework.Assert.IsFalse(superuser.GetShortUserName().Equals(username));
                    NUnit.Framework.Assert.IsFalse(Arrays.AsList(superuser.GetGroupNames()).Contains(
                                                       group));
                    UserGroupInformation appenduser = UserGroupInformation.CreateUserForTesting(username
                                                                                                , new string[] { group });
                    fs = DFSTestUtil.GetFileSystemAs(appenduser, conf);
                    // create a file
                    Path dir = new Path(root, GetType().Name);
                    Path foo = new Path(dir, "foo.dat");
                    FSDataOutputStream @out = null;
                    int offset = 0;
                    try
                    {
                        @out = fs.Create(foo);
                        int len = 10 + AppendTestUtil.NextInt(100);
                        @out.Write(fileContents, offset, len);
                        offset += len;
                    }
                    finally
                    {
                        IOUtils.CloseStream(@out);
                    }
                    // change dir and foo to minimal permissions.
                    fs.SetPermission(dir, new FsPermission((short)0x40));
                    fs.SetPermission(foo, new FsPermission((short)0x80));
                    // try append, should success
                    @out = null;
                    try
                    {
                        @out = fs.Append(foo);
                        int len = 10 + AppendTestUtil.NextInt(100);
                        @out.Write(fileContents, offset, len);
                        offset += len;
                    }
                    finally
                    {
                        IOUtils.CloseStream(@out);
                    }
                    // change dir and foo to all but no write on foo.
                    fs.SetPermission(foo, new FsPermission((short)0x17f));
                    fs.SetPermission(dir, new FsPermission((short)0x1ff));
                    // try append, should fail
                    @out = null;
                    try
                    {
                        @out = fs.Append(foo);
                        NUnit.Framework.Assert.Fail("Expected to have AccessControlException");
                    }
                    catch (AccessControlException ace)
                    {
                        System.Console.Out.WriteLine("Good: got " + ace);
                        Sharpen.Runtime.PrintStackTrace(ace, System.Console.Out);
                    }
                    finally
                    {
                        IOUtils.CloseStream(@out);
                    }
                }
            }
            catch (IOException e)
            {
                System.Console.Out.WriteLine("Exception :" + e);
                throw;
            }
            catch (Exception e)
            {
                System.Console.Out.WriteLine("Throwable :" + e);
                Sharpen.Runtime.PrintStackTrace(e);
                throw new IOException("Throwable : " + e);
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
Esempio n. 29
0
 /// <exception cref="System.IO.IOException"/>
 private void EnsureAppendEditlogFile()
 {
     editlogOs = fs.Append(editLogPath);
 }
Esempio n. 30
0
        public virtual void TestWaitForRegistrationOnRestart()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsDatanodeBpReadyTimeoutKey, 5);
            conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 5000);
            // This makes the datanode appear registered to the NN, but it won't be
            // able to get to the saved dn reg internally.
            DataNodeFaultInjector dnFaultInjector = new _DataNodeFaultInjector_224();
            DataNodeFaultInjector oldDnInjector   = DataNodeFaultInjector.Get();

            DataNodeFaultInjector.Set(dnFaultInjector);
            MiniDFSCluster cluster = null;
            long           start   = 0;
            Path           file    = new Path("/reg");

            try
            {
                int numDNs = 1;
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDNs).Build();
                cluster.WaitActive();
                start = Runtime.CurrentTimeMillis();
                FileSystem fileSys = cluster.GetFileSystem();
                try
                {
                    DFSTestUtil.CreateFile(fileSys, file, 10240L, (short)1, 0L);
                    // It is a bug if this does not fail.
                    throw new IOException("Did not fail!");
                }
                catch (RemoteException e)
                {
                    long elapsed = Runtime.CurrentTimeMillis() - start;
                    // timers have at-least semantics, so it should be at least 5 seconds.
                    if (elapsed < 5000 || elapsed > 10000)
                    {
                        throw new IOException(elapsed + " seconds passed.", e);
                    }
                }
                DataNodeFaultInjector.Set(oldDnInjector);
                // this should succeed now.
                DFSTestUtil.CreateFile(fileSys, file, 10240L, (short)1, 0L);
                // turn it back to under-construction, so that the client calls
                // getReplicaVisibleLength() rpc method against the datanode.
                fileSys.Append(file);
                // back to simulating unregistered node.
                DataNodeFaultInjector.Set(dnFaultInjector);
                byte[] buffer = new byte[8];
                start = Runtime.CurrentTimeMillis();
                try
                {
                    fileSys.Open(file).Read(0L, buffer, 0, 1);
                    throw new IOException("Did not fail!");
                }
                catch (IOException e)
                {
                    long elapsed = Runtime.CurrentTimeMillis() - start;
                    if (e.Message.Contains("readBlockLength"))
                    {
                        throw new IOException("Failed, but with unexpected exception:", e);
                    }
                    // timers have at-least semantics, so it should be at least 5 seconds.
                    if (elapsed < 5000 || elapsed > 10000)
                    {
                        throw new IOException(elapsed + " seconds passed.", e);
                    }
                }
                DataNodeFaultInjector.Set(oldDnInjector);
                fileSys.Open(file).Read(0L, buffer, 0, 1);
            }
            finally
            {
                DataNodeFaultInjector.Set(oldDnInjector);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }