Exemplo n.º 1
0
 /// <summary>Create empty edits logs file.</summary>
 /// <exception cref="System.IO.IOException"/>
 public override void Create(int layoutVersion)
 {
     fc.Truncate(0);
     fc.Position(0);
     WriteHeader(layoutVersion, doubleBuf.GetCurrentBuf());
     SetReadyToFlush();
     Flush();
 }
Exemplo n.º 2
0
        public virtual void TestRecoveryMode()
        {
            // edits generated by nnHelper (MiniDFSCluster), should have all op codes
            // binary, XML, reparsed binary
            string           edits = nnHelper.GenerateEdits();
            FileOutputStream os    = new FileOutputStream(edits, true);
            // Corrupt the file by truncating the end
            FileChannel editsFile = os.GetChannel();

            editsFile.Truncate(editsFile.Size() - 5);
            string editsParsedXml = folder.NewFile("editsRecoveredParsed.xml").GetAbsolutePath
                                        ();
            string editsReparsed   = folder.NewFile("editsRecoveredReparsed").GetAbsolutePath();
            string editsParsedXml2 = folder.NewFile("editsRecoveredParsed2.xml").GetAbsolutePath
                                         ();

            // Can't read the corrupted file without recovery mode
            NUnit.Framework.Assert.AreEqual(-1, RunOev(edits, editsParsedXml, "xml", false));
            // parse to XML then back to binary
            NUnit.Framework.Assert.AreEqual(0, RunOev(edits, editsParsedXml, "xml", true));
            NUnit.Framework.Assert.AreEqual(0, RunOev(editsParsedXml, editsReparsed, "binary"
                                                      , false));
            NUnit.Framework.Assert.AreEqual(0, RunOev(editsReparsed, editsParsedXml2, "xml",
                                                      false));
            // judgment time
            NUnit.Framework.Assert.IsTrue("Test round trip", FileUtils.ContentEqualsIgnoreEOL
                                              (new FilePath(editsParsedXml), new FilePath(editsParsedXml2), "UTF-8"));
            os.Close();
        }
Exemplo n.º 3
0
 /// <summary>Truncate a block file</summary>
 /// <exception cref="System.IO.IOException"/>
 private long TruncateBlockFile()
 {
     lock (fds)
     {
         foreach (ReplicaInfo b in FsDatasetTestUtil.GetReplicas(fds, bpid))
         {
             FilePath f  = b.GetBlockFile();
             FilePath mf = b.GetMetaFile();
             // Truncate a block file that has a corresponding metadata file
             if (f.Exists() && f.Length() != 0 && mf.Exists())
             {
                 FileOutputStream s       = null;
                 FileChannel      channel = null;
                 try
                 {
                     s       = new FileOutputStream(f);
                     channel = s.GetChannel();
                     channel.Truncate(0);
                     Log.Info("Truncated block file " + f.GetAbsolutePath());
                     return(b.GetBlockId());
                 }
                 finally
                 {
                     IOUtils.Cleanup(Log, channel, s);
                 }
             }
         }
     }
     return(0);
 }
Exemplo n.º 4
0
        /// <summary>check if DFS can handle corrupted CRC blocks</summary>
        /// <exception cref="System.Exception"/>
        private void Thistest(Configuration conf, DFSTestUtil util)
        {
            MiniDFSCluster cluster      = null;
            int            numDataNodes = 2;
            short          replFactor   = 2;
            Random         random       = new Random();

            // Set short retry timeouts so this test runs faster
            conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10);
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                FileSystem fs = cluster.GetFileSystem();
                util.CreateFiles(fs, "/srcdat", replFactor);
                util.WaitReplication(fs, "/srcdat", (short)2);
                // Now deliberately remove/truncate meta blocks from the first
                // directory of the first datanode. The complete absense of a meta
                // file disallows this Datanode to send data to another datanode.
                // However, a client is alowed access to this block.
                //
                FilePath storageDir = cluster.GetInstanceStorageDir(0, 1);
                string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
                FilePath data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                NUnit.Framework.Assert.IsTrue("data directory does not exist", data_dir.Exists());
                FilePath[] blocks = data_dir.ListFiles();
                NUnit.Framework.Assert.IsTrue("Blocks do not exist in data-dir", (blocks != null) &&
                                              (blocks.Length > 0));
                int num = 0;
                for (int idx = 0; idx < blocks.Length; idx++)
                {
                    if (blocks[idx].GetName().StartsWith(Block.BlockFilePrefix) && blocks[idx].GetName
                            ().EndsWith(".meta"))
                    {
                        num++;
                        if (num % 3 == 0)
                        {
                            //
                            // remove .meta file
                            //
                            System.Console.Out.WriteLine("Deliberately removing file " + blocks[idx].GetName(
                                                             ));
                            NUnit.Framework.Assert.IsTrue("Cannot remove file.", blocks[idx].Delete());
                        }
                        else
                        {
                            if (num % 3 == 1)
                            {
                                //
                                // shorten .meta file
                                //
                                RandomAccessFile file    = new RandomAccessFile(blocks[idx], "rw");
                                FileChannel      channel = file.GetChannel();
                                int newsize = random.Next((int)channel.Size() / 2);
                                System.Console.Out.WriteLine("Deliberately truncating file " + blocks[idx].GetName
                                                                 () + " to size " + newsize + " bytes.");
                                channel.Truncate(newsize);
                                file.Close();
                            }
                            else
                            {
                                //
                                // corrupt a few bytes of the metafile
                                //
                                RandomAccessFile file     = new RandomAccessFile(blocks[idx], "rw");
                                FileChannel      channel  = file.GetChannel();
                                long             position = 0;
                                //
                                // The very first time, corrupt the meta header at offset 0
                                //
                                if (num != 2)
                                {
                                    position = (long)random.Next((int)channel.Size());
                                }
                                int    length = random.Next((int)(channel.Size() - position + 1));
                                byte[] buffer = new byte[length];
                                random.NextBytes(buffer);
                                channel.Write(ByteBuffer.Wrap(buffer), position);
                                System.Console.Out.WriteLine("Deliberately corrupting file " + blocks[idx].GetName
                                                                 () + " at offset " + position + " length " + length);
                                file.Close();
                            }
                        }
                    }
                }
                //
                // Now deliberately corrupt all meta blocks from the second
                // directory of the first datanode
                //
                storageDir = cluster.GetInstanceStorageDir(0, 1);
                data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                NUnit.Framework.Assert.IsTrue("data directory does not exist", data_dir.Exists());
                blocks = data_dir.ListFiles();
                NUnit.Framework.Assert.IsTrue("Blocks do not exist in data-dir", (blocks != null) &&
                                              (blocks.Length > 0));
                int      count    = 0;
                FilePath previous = null;
                for (int idx_1 = 0; idx_1 < blocks.Length; idx_1++)
                {
                    if (blocks[idx_1].GetName().StartsWith("blk_") && blocks[idx_1].GetName().EndsWith
                            (".meta"))
                    {
                        //
                        // Move the previous metafile into the current one.
                        //
                        count++;
                        if (count % 2 == 0)
                        {
                            System.Console.Out.WriteLine("Deliberately insertimg bad crc into files " + blocks
                                                         [idx_1].GetName() + " " + previous.GetName());
                            NUnit.Framework.Assert.IsTrue("Cannot remove file.", blocks[idx_1].Delete());
                            NUnit.Framework.Assert.IsTrue("Cannot corrupt meta file.", previous.RenameTo(blocks
                                                                                                         [idx_1]));
                            NUnit.Framework.Assert.IsTrue("Cannot recreate empty meta file.", previous.CreateNewFile
                                                              ());
                            previous = null;
                        }
                        else
                        {
                            previous = blocks[idx_1];
                        }
                    }
                }
                //
                // Only one replica is possibly corrupted. The other replica should still
                // be good. Verify.
                //
                NUnit.Framework.Assert.IsTrue("Corrupted replicas not handled properly.", util.CheckFiles
                                                  (fs, "/srcdat"));
                System.Console.Out.WriteLine("All File still have a valid replica");
                //
                // set replication factor back to 1. This causes only one replica of
                // of each block to remain in HDFS. The check is to make sure that
                // the corrupted replica generated above is the one that gets deleted.
                // This test is currently disabled until HADOOP-1557 is solved.
                //
                util.SetReplication(fs, "/srcdat", (short)1);
                //util.waitReplication(fs, "/srcdat", (short)1);
                //System.out.println("All Files done with removing replicas");
                //assertTrue("Excess replicas deleted. Corrupted replicas found.",
                //           util.checkFiles(fs, "/srcdat"));
                System.Console.Out.WriteLine("The excess-corrupted-replica test is disabled " + " pending HADOOP-1557"
                                             );
                util.Cleanup(fs, "/srcdat");
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }