Exemplo n.º 1
0
        /// <summary>TC12: Append to partial CRC chunk</summary>
        /// <exception cref="System.Exception"/>
        private void TestTC12(bool appendToNewBlock)
        {
            Path p = new Path("/TC12/foo" + (appendToNewBlock ? "0" : "1"));

            System.Console.Out.WriteLine("p=" + p);
            //a. Create file with a block size of 64KB
            //   and a default io.bytes.per.checksum of 512 bytes.
            //   Write 25687 bytes of data. Close file.
            int len1 = 25687;
            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            //b. Reopen file in "append" mode. Append another 5877 bytes of data. Close file.
            int len2 = 5877;

            {
                FSDataOutputStream @out = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append
                                                                                     , CreateFlag.NewBlock), 4096, null) : fs.Append(p);
                AppendTestUtil.Write(@out, len1, len2);
                @out.Close();
            }
            //c. Reopen file and read 25687+5877 bytes of data from file. Close file.
            AppendTestUtil.Check(fs, p, len1 + len2);
            if (appendToNewBlock)
            {
                LocatedBlocks blks = fs.dfs.GetLocatedBlocks(p.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(2, blks.GetLocatedBlocks().Count);
                NUnit.Framework.Assert.AreEqual(len1, blks.GetLocatedBlocks()[0].GetBlockSize());
                NUnit.Framework.Assert.AreEqual(len2, blks.GetLocatedBlocks()[1].GetBlockSize());
                AppendTestUtil.Check(fs, p, 0, len1);
                AppendTestUtil.Check(fs, p, len1, len2);
            }
        }
Exemplo n.º 2
0
        public virtual void TestSmallAppendRace()
        {
            Path   file  = new Path("/testSmallAppendRace");
            string fName = file.ToUri().GetPath();
            // Create the file and write a small amount of data.
            FSDataOutputStream stm = fs.Create(file);

            AppendTestUtil.Write(stm, 0, 123);
            stm.Close();
            // Introduce a delay between getFileInfo and calling append() against NN.
            DFSClient client    = DFSClientAdapter.GetDFSClient(fs);
            DFSClient spyClient = Org.Mockito.Mockito.Spy(client);

            Org.Mockito.Mockito.When(spyClient.GetFileInfo(fName)).ThenAnswer(new _Answer_548
                                                                                  (client, fName));
            DFSClientAdapter.SetDFSClient(fs, spyClient);
            // Create two threads for doing appends to the same file.
            Sharpen.Thread worker1 = new _Thread_564(this, file);
            Sharpen.Thread worker2 = new _Thread_574(this, file);
            worker1.Start();
            worker2.Start();
            // append will fail when the file size crosses the checksum chunk boundary,
            // if append was called with a stale file stat.
            DoSmallAppends(file, fs, 20);
        }
Exemplo n.º 3
0
        public virtual void TestAppendWithPipelineRecovery()
        {
            Configuration      conf    = new Configuration();
            MiniDFSCluster     cluster = null;
            FSDataOutputStream @out    = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).ManageDataDfsDirs(true).ManageNameDfsDirs
                              (true).NumDataNodes(4).Racks(new string[] { "/rack1", "/rack1", "/rack2", "/rack2" }).Build();
                cluster.WaitActive();
                DistributedFileSystem fs = cluster.GetFileSystem();
                Path path = new Path("/test1");
                @out = fs.Create(path, true, BlockSize, (short)3, BlockSize);
                AppendTestUtil.Write(@out, 0, 1024);
                @out.Close();
                cluster.StopDataNode(3);
                @out = fs.Append(path);
                AppendTestUtil.Write(@out, 1024, 1024);
                @out.Close();
                cluster.RestartNameNode(true);
                AppendTestUtil.Check(fs, path, 2048);
            }
            finally
            {
                IOUtils.CloseStream(@out);
                if (null != cluster)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemplo n.º 4
0
        public virtual void TestAppend()
        {
            int maxOldFileLen   = 2 * BlockSize + 1;
            int maxFlushedBytes = BlockSize;

            byte[] contents = AppendTestUtil.InitBuffer(maxOldFileLen + 2 * maxFlushedBytes);
            for (int oldFileLen = 0; oldFileLen <= maxOldFileLen; oldFileLen++)
            {
                for (int flushedBytes1 = 0; flushedBytes1 <= maxFlushedBytes; flushedBytes1++)
                {
                    for (int flushedBytes2 = 0; flushedBytes2 <= maxFlushedBytes; flushedBytes2++)
                    {
                        int fileLen = oldFileLen + flushedBytes1 + flushedBytes2;
                        // create the initial file of oldFileLen
                        Path p = new Path("foo" + oldFileLen + "_" + flushedBytes1 + "_" + flushedBytes2);
                        Log.Info("Creating file " + p);
                        FSDataOutputStream @out = fs.Create(p, false, conf.GetInt(CommonConfigurationKeys
                                                                                  .IoFileBufferSizeKey, 4096), Replication, BlockSize);
                        @out.Write(contents, 0, oldFileLen);
                        @out.Close();
                        // append flushedBytes bytes to the file
                        @out = fs.Append(p);
                        @out.Write(contents, oldFileLen, flushedBytes1);
                        @out.Hflush();
                        // write another flushedBytes2 bytes to the file
                        @out.Write(contents, oldFileLen + flushedBytes1, flushedBytes2);
                        @out.Close();
                        // validate the file content
                        AppendTestUtil.CheckFullFile(fs, p, fileLen, contents, p.ToString());
                        fs.Delete(p, false);
                    }
                }
            }
        }
Exemplo n.º 5
0
        public virtual void TestTC2()
        {
            Path p = new Path("/TC2/foo");

            System.Console.Out.WriteLine("p=" + p);
            //a. Create file with one and a half block of data. Close file.
            int len1 = (int)(BlockSize + BlockSize / 2);

            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            AppendTestUtil.Check(fs, p, len1);
            //   Reopen file to append quarter block of data. Close file.
            int len2 = (int)BlockSize / 4;

            {
                FSDataOutputStream @out = fs.Append(p);
                AppendTestUtil.Write(@out, len1, len2);
                @out.Close();
            }
            //b. Reopen file and read 1.75 blocks of data. Close file.
            AppendTestUtil.Check(fs, p, len1 + len2);
        }
Exemplo n.º 6
0
        public virtual void TestTC2ForAppend2()
        {
            Path p = new Path("/TC2/foo2");
            //a. Create file with one and a half block of data. Close file.
            int len1 = (int)(BlockSize + BlockSize / 2);

            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            AppendTestUtil.Check(fs, p, len1);
            //   Reopen file to append quarter block of data. Close file.
            int len2 = (int)BlockSize / 4;

            {
                FSDataOutputStream @out = fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock
                                                                  ), 4096, null);
                AppendTestUtil.Write(@out, len1, len2);
                @out.Close();
            }
            // b. Reopen file and read 1.75 blocks of data. Close file.
            AppendTestUtil.Check(fs, p, len1 + len2);
            IList <LocatedBlock> blocks = fs.GetClient().GetLocatedBlocks(p.ToString(), 0L).GetLocatedBlocks
                                              ();

            NUnit.Framework.Assert.AreEqual(3, blocks.Count);
            NUnit.Framework.Assert.AreEqual(BlockSize, blocks[0].GetBlockSize());
            NUnit.Framework.Assert.AreEqual(BlockSize / 2, blocks[1].GetBlockSize());
            NUnit.Framework.Assert.AreEqual(BlockSize / 4, blocks[2].GetBlockSize());
        }
Exemplo n.º 7
0
        /// <summary>
        /// Test which randomly alternates between appending with
        /// CRC32 and with CRC32C, crossing several block boundaries.
        /// </summary>
        /// <remarks>
        /// Test which randomly alternates between appending with
        /// CRC32 and with CRC32C, crossing several block boundaries.
        /// Then, checks that all of the data can be read back correct.
        /// </remarks>
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestAlgoSwitchRandomized()
        {
            FileSystem fsWithCrc32  = CreateFsWithChecksum("CRC32", 512);
            FileSystem fsWithCrc32C = CreateFsWithChecksum("CRC32C", 512);
            Path       p            = new Path("/testAlgoSwitchRandomized");
            long       seed         = Time.Now();

            System.Console.Out.WriteLine("seed: " + seed);
            Random r = new Random(seed);

            // Create empty to start
            IOUtils.CloseStream(fsWithCrc32.Create(p));
            long st  = Time.Now();
            int  len = 0;

            while (Time.Now() - st < RandomTestRuntime)
            {
                int                thisLen = r.Next(500);
                FileSystem         fs      = (r.NextBoolean() ? fsWithCrc32 : fsWithCrc32C);
                FSDataOutputStream stm     = fs.Append(p);
                try
                {
                    AppendTestUtil.Write(stm, len, thisLen);
                }
                finally
                {
                    stm.Close();
                }
                len += thisLen;
            }
            AppendTestUtil.Check(fsWithCrc32, p, len);
            AppendTestUtil.Check(fsWithCrc32C, p, len);
        }
Exemplo n.º 8
0
        //
        // writes to file
        //
        /// <exception cref="System.IO.IOException"/>
        private static void WriteFile(FSDataOutputStream stm, long seed)
        {
            byte[] buffer = AppendTestUtil.RandomBytes(seed, fileSize);
            int    mid    = fileSize / 2;

            stm.Write(buffer, 0, mid);
            stm.Write(buffer, mid, fileSize - mid);
        }
Exemplo n.º 9
0
 /// <summary>
 /// Test case that stops a writer after finalizing a block but
 /// before calling completeFile, recovers a file from another writer,
 /// starts writing from that writer, and then has the old lease holder
 /// call completeFile
 /// </summary>
 /// <exception cref="System.Exception"/>
 public virtual void TestCompleteOtherLeaseHoldersFile()
 {
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Build();
     try
     {
         cluster.WaitActive();
         NamenodeProtocols preSpyNN = cluster.GetNameNodeRpc();
         NamenodeProtocols spyNN    = Org.Mockito.Mockito.Spy(preSpyNN);
         // Delay completeFile
         GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(Log);
         Org.Mockito.Mockito.DoAnswer(delayer).When(spyNN).Complete(Matchers.AnyString(),
                                                                    Matchers.AnyString(), (ExtendedBlock)Matchers.AnyObject(), Matchers.AnyLong());
         DFSClient client = new DFSClient(null, spyNN, conf, null);
         file1 = new Path("/testCompleteOtherLease");
         OutputStream stm = client.Create("/testCompleteOtherLease", true);
         // write 1/2 block
         AppendTestUtil.Write(stm, 0, 4096);
         AtomicReference <Exception> err = new AtomicReference <Exception>();
         Sharpen.Thread t = new _Thread_242(stm, err);
         t.Start();
         Log.Info("Waiting for close to get to latch...");
         delayer.WaitForCall();
         // At this point, the block is finalized on the DNs, but the file
         // has not been completed in the NN.
         // Lose the leases
         Log.Info("Killing lease checker");
         client.GetLeaseRenewer().InterruptAndJoin();
         FileSystem fs1 = cluster.GetFileSystem();
         FileSystem fs2 = AppendTestUtil.CreateHdfsWithDifferentUsername(fs1.GetConf());
         Log.Info("Recovering file");
         RecoverFile(fs2);
         Log.Info("Opening file for append from new fs");
         FSDataOutputStream appenderStream = fs2.Append(file1);
         Log.Info("Writing some data from new appender");
         AppendTestUtil.Write(appenderStream, 0, 4096);
         Log.Info("Telling old close to proceed.");
         delayer.Proceed();
         Log.Info("Waiting for close to finish.");
         t.Join();
         Log.Info("Close finished.");
         // We expect that close will get a "Lease mismatch"
         // error.
         Exception thrownByClose = err.Get();
         NUnit.Framework.Assert.IsNotNull(thrownByClose);
         NUnit.Framework.Assert.IsTrue(thrownByClose is IOException);
         if (!thrownByClose.Message.Contains("Lease mismatch"))
         {
             throw thrownByClose;
         }
         // The appender should be able to close properly
         appenderStream.Close();
     }
     finally
     {
         cluster.Shutdown();
     }
 }
Exemplo n.º 10
0
        //
        // verify that the data written are sane
        //
        /// <exception cref="System.IO.IOException"/>
        private static void CheckFile(FileSystem fileSys, Path name, int repl, int numblocks
                                      , int filesize, long seed)
        {
            bool done    = false;
            int  attempt = 0;
            long len     = fileSys.GetFileStatus(name).GetLen();

            NUnit.Framework.Assert.IsTrue(name + " should be of size " + filesize + " but found to be of size "
                                          + len, len == filesize);
            // wait till all full blocks are confirmed by the datanodes.
            while (!done)
            {
                attempt++;
                try
                {
                    Sharpen.Thread.Sleep(1000);
                }
                catch (Exception)
                {
                }
                done = true;
                BlockLocation[] locations = fileSys.GetFileBlockLocations(fileSys.GetFileStatus(name
                                                                                                ), 0, filesize);
                if (locations.Length < numblocks)
                {
                    if (attempt > 100)
                    {
                        System.Console.Out.WriteLine("File " + name + " has only " + locations.Length + " blocks, "
                                                     + " but is expected to have " + numblocks + " blocks.");
                    }
                    done = false;
                    continue;
                }
                for (int idx = 0; idx < locations.Length; idx++)
                {
                    if (locations[idx].GetHosts().Length < repl)
                    {
                        if (attempt > 100)
                        {
                            System.Console.Out.WriteLine("File " + name + " has " + locations.Length + " blocks: "
                                                         + " The " + idx + " block has only " + locations[idx].GetHosts().Length + " replicas but is expected to have "
                                                         + repl + " replicas.");
                        }
                        done = false;
                        break;
                    }
                }
            }
            FSDataInputStream stm = fileSys.Open(name);

            byte[] expected = AppendTestUtil.RandomBytes(seed, fileSize);
            // do a sanity check. Read the file
            byte[] actual = new byte[filesize];
            stm.ReadFully(0, actual);
            CheckData(actual, 0, expected, "Read 1");
        }
Exemplo n.º 11
0
        public virtual void TestSwitchChunkSize()
        {
            FileSystem fsWithSmallChunk = CreateFsWithChecksum("CRC32", 512);
            FileSystem fsWithBigChunk   = CreateFsWithChecksum("CRC32", 1024);
            Path       p = new Path("/testSwitchChunkSize");

            AppendWithTwoFs(p, fsWithSmallChunk, fsWithBigChunk);
            AppendTestUtil.Check(fsWithSmallChunk, p, SegmentLength * 2);
            AppendTestUtil.Check(fsWithBigChunk, p, SegmentLength * 2);
        }
Exemplo n.º 12
0
 public override void Run()
 {
     while (this.running)
     {
         try
         {
             Sharpen.Thread.Sleep(1000);
         }
         catch (Exception)
         {
             continue;
         }
         // check if all threads have a new stamp.
         // If so, then all workers have finished at least one file
         // since the last stamp.
         bool loop = false;
         for (int i = 0; i < this._enclosing.numThreads; i++)
         {
             if (this._enclosing.workload[i].GetStamp() == 0)
             {
                 loop = true;
                 break;
             }
         }
         if (loop)
         {
             continue;
         }
         // Now it is guaranteed that there will be at least one valid
         // replica of a file.
         for (int i_1 = 0; i_1 < TestDatanodeDeath.replication - 1; i_1++)
         {
             // pick a random datanode to shutdown
             int victim = AppendTestUtil.NextInt(TestDatanodeDeath.numDatanodes);
             try
             {
                 System.Console.Out.WriteLine("Stopping datanode " + victim);
                 this.cluster.RestartDataNode(victim);
             }
             catch (IOException e)
             {
                 // cluster.startDataNodes(conf, 1, true, null, null);
                 System.Console.Out.WriteLine("TestDatanodeDeath Modify exception " + e);
                 NUnit.Framework.Assert.IsTrue("TestDatanodeDeath Modify exception " + e, false);
                 this.running = false;
             }
         }
         // set a new stamp for all workers
         for (int i_2 = 0; i_2 < this._enclosing.numThreads; i_2++)
         {
             this._enclosing.workload[i_2].ResetStamp();
         }
     }
 }
Exemplo n.º 13
0
        /// <exception cref="System.IO.IOException"/>
        private static void RollbackRollingUpgrade(Path foo, Path bar, Path file, byte[]
                                                   data, MiniDFSCluster cluster)
        {
            MiniDFSCluster.DataNodeProperties dnprop = cluster.StopDataNode(0);
            cluster.RestartNameNode("-rollingUpgrade", "rollback");
            cluster.RestartDataNode(dnprop, true);
            DistributedFileSystem dfs = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
            NUnit.Framework.Assert.IsFalse(dfs.Exists(bar));
            AppendTestUtil.CheckFullFile(dfs, file, data.Length, data);
        }
Exemplo n.º 14
0
        public virtual void TestSwitchAlgorithms()
        {
            FileSystem fsWithCrc32  = CreateFsWithChecksum("CRC32", 512);
            FileSystem fsWithCrc32C = CreateFsWithChecksum("CRC32C", 512);
            Path       p            = new Path("/testSwitchAlgorithms");

            AppendWithTwoFs(p, fsWithCrc32, fsWithCrc32C);
            // Regardless of which FS is used to read, it should pick up
            // the on-disk checksum!
            AppendTestUtil.Check(fsWithCrc32C, p, SegmentLength * 2);
            AppendTestUtil.Check(fsWithCrc32, p, SegmentLength * 2);
        }
Exemplo n.º 15
0
        public virtual void TestSimpleFlush()
        {
            Configuration conf = new HdfsConfiguration();

            fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            try
            {
                // create a new file.
                Path file1             = new Path("/simpleFlush.dat");
                FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1);
                System.Console.Out.WriteLine("Created file simpleFlush.dat");
                // write to file
                int mid = AppendTestUtil.FileSize / 2;
                stm.Write(fileContents, 0, mid);
                stm.Hflush();
                System.Console.Out.WriteLine("Wrote and Flushed first part of file.");
                // write the remainder of the file
                stm.Write(fileContents, mid, AppendTestUtil.FileSize - mid);
                System.Console.Out.WriteLine("Written second part of file");
                stm.Hflush();
                stm.Hflush();
                System.Console.Out.WriteLine("Wrote and Flushed second part of file.");
                // verify that full blocks are sane
                CheckFile(fs, file1, 1);
                stm.Close();
                System.Console.Out.WriteLine("Closed file.");
                // verify that entire file is good
                AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2"
                                             );
            }
            catch (IOException e)
            {
                System.Console.Out.WriteLine("Exception :" + e);
                throw;
            }
            catch (Exception e)
            {
                System.Console.Out.WriteLine("Throwable :" + e);
                Sharpen.Runtime.PrintStackTrace(e);
                throw new IOException("Throwable : " + e);
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
Exemplo n.º 16
0
        /// <summary>TC7: Corrupted replicas are present.</summary>
        /// <exception cref="System.IO.IOException">an exception might be thrown</exception>
        /// <exception cref="System.Exception"/>
        private void TestTC7(bool appendToNewBlock)
        {
            short repl = 2;
            Path  p    = new Path("/TC7/foo" + (appendToNewBlock ? "0" : "1"));

            System.Console.Out.WriteLine("p=" + p);
            //a. Create file with replication factor of 2. Write half block of data. Close file.
            int len1 = (int)(BlockSize / 2);

            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, repl, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            DFSTestUtil.WaitReplication(fs, p, repl);
            //b. Log into one datanode that has one replica of this block.
            //   Find the block file on this datanode and truncate it to zero size.
            LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(p.ToString()
                                                                                 , 0L, len1);

            NUnit.Framework.Assert.AreEqual(1, locatedblocks.LocatedBlockCount());
            LocatedBlock  lb  = locatedblocks.Get(0);
            ExtendedBlock blk = lb.GetBlock();

            NUnit.Framework.Assert.AreEqual(len1, lb.GetBlockSize());
            DatanodeInfo[] datanodeinfos = lb.GetLocations();
            NUnit.Framework.Assert.AreEqual(repl, datanodeinfos.Length);
            DataNode dn = cluster.GetDataNode(datanodeinfos[0].GetIpcPort());
            FilePath f  = DataNodeTestUtils.GetBlockFile(dn, blk.GetBlockPoolId(), blk.GetLocalBlock
                                                             ());
            RandomAccessFile raf = new RandomAccessFile(f, "rw");

            AppendTestUtil.Log.Info("dn=" + dn + ", blk=" + blk + " (length=" + blk.GetNumBytes
                                        () + ")");
            NUnit.Framework.Assert.AreEqual(len1, raf.Length());
            raf.SetLength(0);
            raf.Close();
            //c. Open file in "append mode".  Append a new block worth of data. Close file.
            int len2 = (int)BlockSize;

            {
                FSDataOutputStream @out = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append
                                                                                     , CreateFlag.NewBlock), 4096, null) : fs.Append(p);
                AppendTestUtil.Write(@out, len1, len2);
                @out.Close();
            }
            //d. Reopen file and read two blocks worth of data.
            AppendTestUtil.Check(fs, p, len1 + len2);
        }
Exemplo n.º 17
0
        public virtual void TestComplexFlush()
        {
            Configuration conf = new HdfsConfiguration();

            fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            try
            {
                // create a new file.
                Path file1             = new Path("/complexFlush.dat");
                FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1);
                System.Console.Out.WriteLine("Created file complexFlush.dat");
                int start = 0;
                for (start = 0; (start + 29) < AppendTestUtil.FileSize;)
                {
                    stm.Write(fileContents, start, 29);
                    stm.Hflush();
                    start += 29;
                }
                stm.Write(fileContents, start, AppendTestUtil.FileSize - start);
                // need to make sure we completely write out all full blocks before
                // the checkFile() call (see FSOutputSummer#flush)
                stm.Flush();
                // verify that full blocks are sane
                CheckFile(fs, file1, 1);
                stm.Close();
                // verify that entire file is good
                AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2"
                                             );
            }
            catch (IOException e)
            {
                System.Console.Out.WriteLine("Exception :" + e);
                throw;
            }
            catch (Exception e)
            {
                System.Console.Out.WriteLine("Throwable :" + e);
                Sharpen.Runtime.PrintStackTrace(e);
                throw new IOException("Throwable : " + e);
            }
            finally
            {
                fs.Close();
                cluster.Shutdown();
            }
        }
Exemplo n.º 18
0
        public virtual void TestHardLeaseRecovery()
        {
            //create a file
            string filestr = "/hardLeaseRecovery";

            AppendTestUtil.Log.Info("filestr=" + filestr);
            Path filepath          = new Path(filestr);
            FSDataOutputStream stm = dfs.Create(filepath, true, BufSize, ReplicationNum, BlockSize
                                                );

            NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr));
            // write bytes into the file.
            int size = AppendTestUtil.NextInt(FileSize);

            AppendTestUtil.Log.Info("size=" + size);
            stm.Write(buffer, 0, size);
            // hflush file
            AppendTestUtil.Log.Info("hflush");
            stm.Hflush();
            // kill the lease renewal thread
            AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()");
            dfs.dfs.GetLeaseRenewer().InterruptAndJoin();
            // set the hard limit to be 1 second
            cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod);
            // wait for lease recovery to complete
            LocatedBlocks locatedBlocks;

            do
            {
                Sharpen.Thread.Sleep(ShortLeasePeriod);
                locatedBlocks = dfs.dfs.GetLocatedBlocks(filestr, 0L, size);
            }while (locatedBlocks.IsUnderConstruction());
            NUnit.Framework.Assert.AreEqual(size, locatedBlocks.GetFileLength());
            // make sure that the writer thread gets killed
            try
            {
                stm.Write('b');
                stm.Close();
                NUnit.Framework.Assert.Fail("Writer thread should have been killed");
            }
            catch (IOException e)
            {
                Sharpen.Runtime.PrintStackTrace(e);
            }
            // verify data
            AppendTestUtil.Log.Info("File size is good. Now validating sizes from datanodes..."
                                    );
            AppendTestUtil.CheckFullFile(dfs, filepath, size, buffer, filestr);
        }
Exemplo n.º 19
0
        /// <summary>TC11: Racing rename</summary>
        /// <exception cref="System.Exception"/>
        private void TestTC11(bool appendToNewBlock)
        {
            Path p = new Path("/TC11/foo" + (appendToNewBlock ? "0" : "1"));

            System.Console.Out.WriteLine("p=" + p);
            //a. Create file and write one block of data. Close file.
            int len1 = (int)BlockSize;
            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            //b. Reopen file in "append" mode. Append half block of data.
            FSDataOutputStream out_1 = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.
                                                                                  Append, CreateFlag.NewBlock), 4096, null) : fs.Append(p);
            int len2 = (int)BlockSize / 2;

            AppendTestUtil.Write(out_1, len1, len2);
            out_1.Hflush();
            //c. Rename file to file.new.
            Path pnew = new Path(p + ".new");

            NUnit.Framework.Assert.IsTrue(fs.Rename(p, pnew));
            //d. Close file handle that was opened in (b).
            out_1.Close();
            //check block sizes
            long          len           = fs.GetFileStatus(pnew).GetLen();
            LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(pnew.ToString
                                                                                     (), 0L, len);
            int numblock = locatedblocks.LocatedBlockCount();

            for (int i = 0; i < numblock; i++)
            {
                LocatedBlock  lb   = locatedblocks.Get(i);
                ExtendedBlock blk  = lb.GetBlock();
                long          size = lb.GetBlockSize();
                if (i < numblock - 1)
                {
                    NUnit.Framework.Assert.AreEqual(BlockSize, size);
                }
                foreach (DatanodeInfo datanodeinfo in lb.GetLocations())
                {
                    DataNode dn       = cluster.GetDataNode(datanodeinfo.GetIpcPort());
                    Block    metainfo = DataNodeTestUtils.GetFSDataset(dn).GetStoredBlock(blk.GetBlockPoolId
                                                                                              (), blk.GetBlockId());
                    NUnit.Framework.Assert.AreEqual(size, metainfo.GetNumBytes());
                }
            }
        }
Exemplo n.º 20
0
        public virtual void TestAppend2AfterSoftLimit()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
            //Set small soft-limit for lease
            long           softLimit = 1L;
            long           hardLimit = 9999999L;
            MiniDFSCluster cluster   = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.SetLeasePeriod(softLimit, hardLimit);
            cluster.WaitActive();
            DistributedFileSystem fs  = cluster.GetFileSystem();
            DistributedFileSystem fs2 = new DistributedFileSystem();

            fs2.Initialize(fs.GetUri(), conf);
            Path testPath = new Path("/testAppendAfterSoftLimit");

            byte[] fileContents = AppendTestUtil.InitBuffer(32);
            // create a new file without closing
            FSDataOutputStream @out = fs.Create(testPath);

            @out.Write(fileContents);
            //Wait for > soft-limit
            Sharpen.Thread.Sleep(250);
            try
            {
                FSDataOutputStream appendStream2 = fs2.Append(testPath, EnumSet.Of(CreateFlag.Append
                                                                                   , CreateFlag.NewBlock), 4096, null);
                appendStream2.Write(fileContents);
                appendStream2.Close();
                NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen
                                                    ());
                // make sure we now have 1 block since the first writer was revoked
                LocatedBlocks blks = fs.GetClient().GetLocatedBlocks(testPath.ToString(), 0L);
                NUnit.Framework.Assert.AreEqual(1, blks.GetLocatedBlocks().Count);
                foreach (LocatedBlock blk in blks.GetLocatedBlocks())
                {
                    NUnit.Framework.Assert.AreEqual(fileContents.Length, blk.GetBlockSize());
                }
            }
            finally
            {
                fs.Close();
                fs2.Close();
                cluster.Shutdown();
            }
        }
Exemplo n.º 21
0
        /// <summary>
        /// Append to a partial CRC chunk and the first write does not fill up the
        /// partial CRC trunk
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        private void TestAppendToPartialChunk(bool appendToNewBlock)
        {
            Path p       = new Path("/partialChunk/foo" + (appendToNewBlock ? "0" : "1"));
            int  fileLen = 513;

            System.Console.Out.WriteLine("p=" + p);
            byte[] fileContents = AppendTestUtil.InitBuffer(fileLen);
            // create a new file.
            FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, p, 1);

            // create 1 byte file
            stm.Write(fileContents, 0, 1);
            stm.Close();
            System.Console.Out.WriteLine("Wrote 1 byte and closed the file " + p);
            // append to file
            stm = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock
                                                             ), 4096, null) : fs.Append(p);
            // Append to a partial CRC trunk
            stm.Write(fileContents, 1, 1);
            stm.Hflush();
            // The partial CRC trunk is not full yet and close the file
            stm.Close();
            System.Console.Out.WriteLine("Append 1 byte and closed the file " + p);
            // write the remainder of the file
            stm = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock
                                                             ), 4096, null) : fs.Append(p);
            // ensure getPos is set to reflect existing size of the file
            NUnit.Framework.Assert.AreEqual(2, stm.GetPos());
            // append to a partial CRC trunk
            stm.Write(fileContents, 2, 1);
            // The partial chunk is not full yet, force to send a packet to DN
            stm.Hflush();
            System.Console.Out.WriteLine("Append and flush 1 byte");
            // The partial chunk is not full yet, force to send another packet to DN
            stm.Write(fileContents, 3, 2);
            stm.Hflush();
            System.Console.Out.WriteLine("Append and flush 2 byte");
            // fill up the partial chunk and close the file
            stm.Write(fileContents, 5, fileLen - 5);
            stm.Close();
            System.Console.Out.WriteLine("Flush 508 byte and closed the file " + p);
            // verify that entire file is good
            AppendTestUtil.CheckFullFile(fs, p, fileLen, fileContents, "Failed to append to a partial chunk"
                                         );
        }
            /// <exception cref="System.IO.IOException"/>
            internal static string CheckFullFile(Path file, FilePath localFile)
            {
                StringBuilder b = new StringBuilder("checkFullFile: ").Append(file.GetName()).Append
                                      (" vs ").Append(localFile);

                byte[] bytes = new byte[CheckLength(file, localFile)];
                b.Append(", length=").Append(bytes.Length);
                FileInputStream @in = new FileInputStream(localFile);

                for (int n = 0; n < bytes.Length;)
                {
                    n += @in.Read(bytes, n, bytes.Length - n);
                }
                @in.Close();
                AppendTestUtil.CheckFullFile(dfs, file, bytes.Length, bytes, "File content mismatch: "
                                             + b, false);
                return(b.ToString());
            }
Exemplo n.º 23
0
        /// <exception cref="System.IO.IOException"/>
        private static void StartRollingUpgrade(Path foo, Path bar, Path file, byte[] data
                                                , MiniDFSCluster cluster)
        {
            DistributedFileSystem dfs = cluster.GetFileSystem();

            //start rolling upgrade
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            dfs.Mkdirs(bar);
            NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
            NUnit.Framework.Assert.IsTrue(dfs.Exists(bar));
            //truncate a file
            int newLength = DFSUtil.GetRandom().Next(data.Length - 1) + 1;

            dfs.Truncate(file, newLength);
            TestFileTruncate.CheckBlockRecovery(file, dfs);
            AppendTestUtil.CheckFullFile(dfs, file, newLength, data);
        }
Exemplo n.º 24
0
        /// <exception cref="System.Exception"/>
        private void RecoverLeaseUsingCreate2(Path filepath)
        {
            FileSystem            dfs2 = GetFSAsAnotherUser(conf);
            int                   size = AppendTestUtil.NextInt(FileSize);
            DistributedFileSystem dfsx = (DistributedFileSystem)dfs2;
            //create file using dfsx
            Path filepath2         = new Path("/immediateRecoverLease-x2");
            FSDataOutputStream stm = dfsx.Create(filepath2, true, BufSize, ReplicationNum, BlockSize
                                                 );

            NUnit.Framework.Assert.IsTrue(dfsx.dfs.Exists("/immediateRecoverLease-x2"));
            try
            {
                Sharpen.Thread.Sleep(10000);
            }
            catch (Exception)
            {
            }
            dfsx.Append(filepath);
        }
Exemplo n.º 25
0
 // Do small appends.
 /// <exception cref="System.IO.IOException"/>
 internal virtual void DoSmallAppends(Path file, DistributedFileSystem fs, int iterations
                                      )
 {
     for (int i = 0; i < iterations; i++)
     {
         FSDataOutputStream stm;
         try
         {
             stm = fs.Append(file);
         }
         catch (IOException)
         {
             // If another thread is already appending, skip this time.
             continue;
         }
         // Failure in write or close will be terminal.
         AppendTestUtil.Write(stm, 0, 123);
         stm.Close();
     }
 }
Exemplo n.º 26
0
        public virtual void TestAppendAfterSoftLimit()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
            conf.SetBoolean(DFSConfigKeys.DfsSupportAppendKey, true);
            //Set small soft-limit for lease
            long           softLimit = 1L;
            long           hardLimit = 9999999L;
            MiniDFSCluster cluster   = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.SetLeasePeriod(softLimit, hardLimit);
            cluster.WaitActive();
            FileSystem fs  = cluster.GetFileSystem();
            FileSystem fs2 = new DistributedFileSystem();

            fs2.Initialize(fs.GetUri(), conf);
            Path testPath = new Path("/testAppendAfterSoftLimit");

            byte[] fileContents = AppendTestUtil.InitBuffer(32);
            // create a new file without closing
            FSDataOutputStream @out = fs.Create(testPath);

            @out.Write(fileContents);
            //Wait for > soft-limit
            Sharpen.Thread.Sleep(250);
            try
            {
                FSDataOutputStream appendStream2 = fs2.Append(testPath);
                appendStream2.Write(fileContents);
                appendStream2.Close();
                NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen
                                                    ());
            }
            finally
            {
                fs.Close();
                fs2.Close();
                cluster.Shutdown();
            }
        }
Exemplo n.º 27
0
        public virtual void TestAppend2Twice()
        {
            Configuration         conf    = new HdfsConfiguration();
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).Build();
            DistributedFileSystem fs1     = cluster.GetFileSystem();
            FileSystem            fs2     = AppendTestUtil.CreateHdfsWithDifferentUsername(conf);

            try
            {
                Path   p            = new Path("/testAppendTwice/foo");
                int    len          = 1 << 16;
                byte[] fileContents = AppendTestUtil.InitBuffer(len);
                {
                    // create a new file with a full block.
                    FSDataOutputStream @out = fs2.Create(p, true, 4096, (short)1, len);
                    @out.Write(fileContents, 0, len);
                    @out.Close();
                }
                //1st append does not add any data so that the last block remains full
                //and the last block in INodeFileUnderConstruction is a BlockInfo
                //but not BlockInfoUnderConstruction.
                ((DistributedFileSystem)fs2).Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock
                                                                  ), 4096, null);
                // 2nd append should get AlreadyBeingCreatedException
                fs1.Append(p);
                NUnit.Framework.Assert.Fail();
            }
            catch (RemoteException re)
            {
                AppendTestUtil.Log.Info("Got an exception:", re);
                NUnit.Framework.Assert.AreEqual(typeof(AlreadyBeingCreatedException).FullName, re
                                                .GetClassName());
            }
            finally
            {
                fs2.Close();
                fs1.Close();
                cluster.Shutdown();
            }
        }
Exemplo n.º 28
0
        //
        // verify that the data written to the full blocks are sane
        //
        /// <exception cref="System.IO.IOException"/>
        private void CheckFile(FileSystem fileSys, Path name, int repl)
        {
            bool done = false;

            // wait till all full blocks are confirmed by the datanodes.
            while (!done)
            {
                try
                {
                    Sharpen.Thread.Sleep(1000);
                }
                catch (Exception)
                {
                }
                done = true;
                BlockLocation[] locations = fileSys.GetFileBlockLocations(fileSys.GetFileStatus(name
                                                                                                ), 0, AppendTestUtil.FileSize);
                if (locations.Length < AppendTestUtil.NumBlocks)
                {
                    System.Console.Out.WriteLine("Number of blocks found " + locations.Length);
                    done = false;
                    continue;
                }
                for (int idx = 0; idx < AppendTestUtil.NumBlocks; idx++)
                {
                    if (locations[idx].GetHosts().Length < repl)
                    {
                        System.Console.Out.WriteLine("Block index " + idx + " not yet replciated.");
                        done = false;
                        break;
                    }
                }
            }
            byte[] expected = new byte[AppendTestUtil.NumBlocks * AppendTestUtil.BlockSize];
            System.Array.Copy(fileContents, 0, expected, 0, expected.Length);
            // do a sanity check. Read the file
            // do not check file status since the file is not yet closed.
            AppendTestUtil.CheckFullFile(fileSys, name, AppendTestUtil.NumBlocks * AppendTestUtil
                                         .BlockSize, expected, "Read 1", false);
        }
Exemplo n.º 29
0
        public virtual void TestTC1ForAppend2()
        {
            Path p = new Path("/TC1/foo2");
            //a. Create file and write one block of data. Close file.
            int len1 = (int)BlockSize;
            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            // Reopen file to append. Append half block of data. Close file.
            int len2 = (int)BlockSize / 2;

            {
                FSDataOutputStream @out = fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock
                                                                  ), 4096, null);
                AppendTestUtil.Write(@out, len1, len2);
                @out.Close();
            }
            // b. Reopen file and read 1.5 blocks worth of data. Close file.
            AppendTestUtil.Check(fs, p, len1 + len2);
        }