Пример #1
0
 /// <exception cref="System.IO.IOException"></exception>
 internal virtual ByteWindow Mmap(long pos, int size)
 {
     lock (readLock)
     {
         if (length < pos + size)
         {
             size = (int)(length - pos);
         }
         MappedByteBuffer map;
         try
         {
             map = fd.GetChannel().Map(FileChannel.MapMode.READ_ONLY, pos, size);
         }
         catch (IOException)
         {
             // The most likely reason this failed is the JVM has run out
             // of virtual memory. We need to discard quickly, and try to
             // force the GC to finalize and release any existing mappings.
             //
             System.GC.Collect();
             System.GC.WaitForPendingFinalizers();
             map = fd.GetChannel().Map(FileChannel.MapMode.READ_ONLY, pos, size);
         }
         if (map.HasArray())
         {
             return(new ByteArrayWindow(this, pos, ((byte[])map.Array())));
         }
         return(new ByteBufferWindow(this, pos, map));
     }
 }
Пример #2
0
 /// <exception cref="System.IO.IOException"/>
 public FadvisedFileRegion(RandomAccessFile file, long position, long count, bool
                           manageOsCache, int readaheadLength, ReadaheadPool readaheadPool, string identifier
                           , int shuffleBufferSize, bool shuffleTransferToAllowed)
     : base(file.GetChannel(), position, count)
 {
     this.manageOsCache   = manageOsCache;
     this.readaheadLength = readaheadLength;
     this.readaheadPool   = readaheadPool;
     this.fd                       = file.GetFD();
     this.identifier               = identifier;
     this.fileChannel              = file.GetChannel();
     this.count                    = count;
     this.position                 = position;
     this.shuffleBufferSize        = shuffleBufferSize;
     this.shuffleTransferToAllowed = shuffleTransferToAllowed;
 }
Пример #3
0
 /// <exception cref="System.IO.IOException"></exception>
 public override PackLock Parse(ProgressMonitor receiving, ProgressMonitor resolving
                                )
 {
     tmpPack = FilePath.CreateTempFile("incoming_", ".pack", db.GetDirectory());
     tmpIdx  = new FilePath(db.GetDirectory(), BaseName(tmpPack) + ".idx");
     try
     {
         @out = new RandomAccessFile(tmpPack, "rw");
         base.Parse(receiving, resolving);
         @out.Seek(packEnd);
         @out.Write(packHash);
         @out.GetChannel().Force(true);
         @out.Close();
         WriteIdx();
         tmpPack.SetReadOnly();
         tmpIdx.SetReadOnly();
         return(RenameAndOpenPack(GetLockMessage()));
     }
     finally
     {
         if (def != null)
         {
             def.Finish();
         }
         try
         {
             if (@out != null && @out.GetChannel().IsOpen())
             {
                 @out.Close();
             }
         }
         catch (IOException)
         {
         }
         // Ignored. We want to delete the file.
         CleanupTemporaryFiles();
     }
 }
Пример #4
0
        public virtual void TestWriteFully()
        {
            int InputBufferLen = 10000;
            int Halfway        = 1 + (InputBufferLen / 2);

            byte[] input = new byte[InputBufferLen];
            for (int i = 0; i < input.Length; i++)
            {
                input[i] = unchecked ((byte)(i & unchecked ((int)(0xff))));
            }
            byte[] output = new byte[input.Length];
            try
            {
                RandomAccessFile raf = new RandomAccessFile(TestFileName, "rw");
                FileChannel      fc  = raf.GetChannel();
                ByteBuffer       buf = ByteBuffer.Wrap(input);
                IOUtils.WriteFully(fc, buf);
                raf.Seek(0);
                raf.Read(output);
                for (int i_1 = 0; i_1 < input.Length; i_1++)
                {
                    Assert.Equal(input[i_1], output[i_1]);
                }
                buf.Rewind();
                IOUtils.WriteFully(fc, buf, Halfway);
                for (int i_2 = 0; i_2 < Halfway; i_2++)
                {
                    Assert.Equal(input[i_2], output[i_2]);
                }
                raf.Seek(0);
                raf.Read(output);
                for (int i_3 = Halfway; i_3 < input.Length; i_3++)
                {
                    Assert.Equal(input[i_3 - Halfway], output[i_3]);
                }
            }
            finally
            {
                FilePath f = new FilePath(TestFileName);
                if (f.Exists())
                {
                    f.Delete();
                }
            }
        }
Пример #5
0
        private string InitHostPort()
        {
            Basetest.Mkdirs();
            int port;

            for (; ;)
            {
                port = PortAssignment.Unique();
                FileLock Lock = null;
                portNumLockFile = null;
                try
                {
                    try
                    {
                        portNumFile     = new FilePath(Basetest, port + ".lock");
                        portNumLockFile = new RandomAccessFile(portNumFile, "rw");
                        try
                        {
                            Lock = portNumLockFile.GetChannel().TryLock();
                        }
                        catch (OverlappingFileLockException)
                        {
                            continue;
                        }
                    }
                    finally
                    {
                        if (Lock != null)
                        {
                            break;
                        }
                        if (portNumLockFile != null)
                        {
                            portNumLockFile.Close();
                        }
                    }
                }
                catch (IOException e)
                {
                    throw new RuntimeException(e);
                }
            }
            return("127.0.0.1:" + port);
        }
 /// <summary>Read the cache file into memory.</summary>
 /// <remarks>Read the cache file into memory.</remarks>
 /// <exception cref="System.IO.IOException">System.IO.IOException</exception>
 public virtual void Read()
 {
     changed   = false;
     statDirty = false;
     if (!cacheFile.Exists())
     {
         header = null;
         entries.Clear();
         lastCacheTime = 0;
         return;
     }
     cache = new RandomAccessFile(cacheFile, "r");
     try
     {
         FileChannel channel = cache.GetChannel();
         ByteBuffer  buffer  = ByteBuffer.AllocateDirect((int)cacheFile.Length());
         buffer.Order(ByteOrder.BIG_ENDIAN);
         int j = channel.Read(buffer);
         if (j != buffer.Capacity())
         {
             throw new IOException(MessageFormat.Format(JGitText.Get().couldNotReadIndexInOneGo
                                                        , j, buffer.Capacity()));
         }
         buffer.Flip();
         header = new GitIndex.Header(buffer);
         entries.Clear();
         for (int i = 0; i < header.entries; ++i)
         {
             GitIndex.Entry entry    = new GitIndex.Entry(this, buffer);
             GitIndex.Entry existing = entries.Get(entry.name);
             entries.Put(entry.name, entry);
             if (existing != null)
             {
                 entry.stages |= existing.stages;
             }
         }
         lastCacheTime = cacheFile.LastModified();
     }
     finally
     {
         cache.Close();
     }
 }
Пример #7
0
        /// <exception cref="System.IO.IOException"/>
        private void LazyOpen()
        {
            if (ch != null)
            {
                return;
            }
            // Load current value.
            byte[] data = null;
            try
            {
                data = Files.ToByteArray(file);
            }
            catch (FileNotFoundException)
            {
            }
            // Expected - this will use default value.
            if (data != null && data.Length != 0)
            {
                if (data.Length != Longs.Bytes)
                {
                    throw new IOException("File " + file + " had invalid length: " + data.Length);
                }
                value = Longs.FromByteArray(data);
            }
            else
            {
                value = defaultVal;
            }
            // Now open file for future writes.
            RandomAccessFile raf = new RandomAccessFile(file, "rw");

            try
            {
                ch = raf.GetChannel();
            }
            finally
            {
                if (ch == null)
                {
                    IOUtils.CloseStream(raf);
                }
            }
        }
Пример #8
0
        /// <summary>
        /// Asserts that the storage lock file in the given directory has been
        /// released.
        /// </summary>
        /// <remarks>
        /// Asserts that the storage lock file in the given directory has been
        /// released.  This method works by trying to acquire the lock file itself.  If
        /// locking fails here, then the main code must have failed to release it.
        /// </remarks>
        /// <param name="dir">the storage directory to check</param>
        /// <exception cref="System.IO.IOException">if there is an unexpected I/O error</exception>
        public static void AssertFileLockReleased(string dir)
        {
            StorageLocation sl       = StorageLocation.Parse(dir);
            FilePath        lockFile = new FilePath(sl.GetFile(), Storage.StorageFileLock);

            try
            {
                using (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws"))
                {
                    using (FileChannel channel = raf.GetChannel())
                    {
                        FileLock Lock = channel.TryLock();
                        NUnit.Framework.Assert.IsNotNull(string.Format("Lock file at %s appears to be held by a different process."
                                                                       , lockFile.GetAbsolutePath()), Lock);
                        if (Lock != null)
                        {
                            try
                            {
                                Lock.Release();
                            }
                            catch (IOException e)
                            {
                                FsDatasetImpl.Log.Warn(string.Format("I/O error releasing file lock %s.", lockFile
                                                                     .GetAbsolutePath()), e);
                                throw;
                            }
                        }
                    }
                }
            }
            catch (OverlappingFileLockException)
            {
                NUnit.Framework.Assert.Fail(string.Format("Must release lock file at %s.", lockFile
                                                          .GetAbsolutePath()));
            }
        }
Пример #9
0
        /// <exception cref="System.Exception"/>
        public virtual void TestCopyFileUnbuffered()
        {
            string   MethodName = GenericTestUtils.GetMethodName();
            FilePath srcFile    = new FilePath(TestDir, MethodName + ".src.dat");
            FilePath dstFile    = new FilePath(TestDir, MethodName + ".dst.dat");
            int      fileSize   = unchecked ((int)(0x8000000));
            // 128 MB
            int              Seed       = unchecked ((int)(0xBEEF));
            int              batchSize  = 4096;
            int              numBatches = fileSize / batchSize;
            Random           rb         = new Random(Seed);
            FileChannel      channel    = null;
            RandomAccessFile raSrcFile  = null;

            try
            {
                raSrcFile = new RandomAccessFile(srcFile, "rw");
                channel   = raSrcFile.GetChannel();
                byte[]           bytesToWrite = new byte[batchSize];
                MappedByteBuffer mapBuf;
                mapBuf = channel.Map(FileChannel.MapMode.ReadWrite, 0, fileSize);
                for (int i = 0; i < numBatches; i++)
                {
                    rb.NextBytes(bytesToWrite);
                    mapBuf.Put(bytesToWrite);
                }
                NativeIO.CopyFileUnbuffered(srcFile, dstFile);
                Assert.Equal(srcFile.Length(), dstFile.Length());
            }
            finally
            {
                IOUtils.Cleanup(Log, channel);
                IOUtils.Cleanup(Log, raSrcFile);
                FileUtils.DeleteQuietly(TestDir);
            }
        }
Пример #10
0
        /// <summary>check if DFS can handle corrupted CRC blocks</summary>
        /// <exception cref="System.Exception"/>
        private void Thistest(Configuration conf, DFSTestUtil util)
        {
            MiniDFSCluster cluster      = null;
            int            numDataNodes = 2;
            short          replFactor   = 2;
            Random         random       = new Random();

            // Set short retry timeouts so this test runs faster
            conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10);
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                FileSystem fs = cluster.GetFileSystem();
                util.CreateFiles(fs, "/srcdat", replFactor);
                util.WaitReplication(fs, "/srcdat", (short)2);
                // Now deliberately remove/truncate meta blocks from the first
                // directory of the first datanode. The complete absense of a meta
                // file disallows this Datanode to send data to another datanode.
                // However, a client is alowed access to this block.
                //
                FilePath storageDir = cluster.GetInstanceStorageDir(0, 1);
                string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
                FilePath data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                NUnit.Framework.Assert.IsTrue("data directory does not exist", data_dir.Exists());
                FilePath[] blocks = data_dir.ListFiles();
                NUnit.Framework.Assert.IsTrue("Blocks do not exist in data-dir", (blocks != null) &&
                                              (blocks.Length > 0));
                int num = 0;
                for (int idx = 0; idx < blocks.Length; idx++)
                {
                    if (blocks[idx].GetName().StartsWith(Block.BlockFilePrefix) && blocks[idx].GetName
                            ().EndsWith(".meta"))
                    {
                        num++;
                        if (num % 3 == 0)
                        {
                            //
                            // remove .meta file
                            //
                            System.Console.Out.WriteLine("Deliberately removing file " + blocks[idx].GetName(
                                                             ));
                            NUnit.Framework.Assert.IsTrue("Cannot remove file.", blocks[idx].Delete());
                        }
                        else
                        {
                            if (num % 3 == 1)
                            {
                                //
                                // shorten .meta file
                                //
                                RandomAccessFile file    = new RandomAccessFile(blocks[idx], "rw");
                                FileChannel      channel = file.GetChannel();
                                int newsize = random.Next((int)channel.Size() / 2);
                                System.Console.Out.WriteLine("Deliberately truncating file " + blocks[idx].GetName
                                                                 () + " to size " + newsize + " bytes.");
                                channel.Truncate(newsize);
                                file.Close();
                            }
                            else
                            {
                                //
                                // corrupt a few bytes of the metafile
                                //
                                RandomAccessFile file     = new RandomAccessFile(blocks[idx], "rw");
                                FileChannel      channel  = file.GetChannel();
                                long             position = 0;
                                //
                                // The very first time, corrupt the meta header at offset 0
                                //
                                if (num != 2)
                                {
                                    position = (long)random.Next((int)channel.Size());
                                }
                                int    length = random.Next((int)(channel.Size() - position + 1));
                                byte[] buffer = new byte[length];
                                random.NextBytes(buffer);
                                channel.Write(ByteBuffer.Wrap(buffer), position);
                                System.Console.Out.WriteLine("Deliberately corrupting file " + blocks[idx].GetName
                                                                 () + " at offset " + position + " length " + length);
                                file.Close();
                            }
                        }
                    }
                }
                //
                // Now deliberately corrupt all meta blocks from the second
                // directory of the first datanode
                //
                storageDir = cluster.GetInstanceStorageDir(0, 1);
                data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                NUnit.Framework.Assert.IsTrue("data directory does not exist", data_dir.Exists());
                blocks = data_dir.ListFiles();
                NUnit.Framework.Assert.IsTrue("Blocks do not exist in data-dir", (blocks != null) &&
                                              (blocks.Length > 0));
                int      count    = 0;
                FilePath previous = null;
                for (int idx_1 = 0; idx_1 < blocks.Length; idx_1++)
                {
                    if (blocks[idx_1].GetName().StartsWith("blk_") && blocks[idx_1].GetName().EndsWith
                            (".meta"))
                    {
                        //
                        // Move the previous metafile into the current one.
                        //
                        count++;
                        if (count % 2 == 0)
                        {
                            System.Console.Out.WriteLine("Deliberately insertimg bad crc into files " + blocks
                                                         [idx_1].GetName() + " " + previous.GetName());
                            NUnit.Framework.Assert.IsTrue("Cannot remove file.", blocks[idx_1].Delete());
                            NUnit.Framework.Assert.IsTrue("Cannot corrupt meta file.", previous.RenameTo(blocks
                                                                                                         [idx_1]));
                            NUnit.Framework.Assert.IsTrue("Cannot recreate empty meta file.", previous.CreateNewFile
                                                              ());
                            previous = null;
                        }
                        else
                        {
                            previous = blocks[idx_1];
                        }
                    }
                }
                //
                // Only one replica is possibly corrupted. The other replica should still
                // be good. Verify.
                //
                NUnit.Framework.Assert.IsTrue("Corrupted replicas not handled properly.", util.CheckFiles
                                                  (fs, "/srcdat"));
                System.Console.Out.WriteLine("All File still have a valid replica");
                //
                // set replication factor back to 1. This causes only one replica of
                // of each block to remain in HDFS. The check is to make sure that
                // the corrupted replica generated above is the one that gets deleted.
                // This test is currently disabled until HADOOP-1557 is solved.
                //
                util.SetReplication(fs, "/srcdat", (short)1);
                //util.waitReplication(fs, "/srcdat", (short)1);
                //System.out.println("All Files done with removing replicas");
                //assertTrue("Excess replicas deleted. Corrupted replicas found.",
                //           util.checkFiles(fs, "/srcdat"));
                System.Console.Out.WriteLine("The excess-corrupted-replica test is disabled " + " pending HADOOP-1557"
                                             );
                util.Cleanup(fs, "/srcdat");
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <summary>check if nn.getCorruptFiles() returns a file that has corrupted blocks</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestListCorruptFilesCorruptedBlock()
        {
            MiniDFSCluster cluster = null;
            Random         random  = new Random();

            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.SetInt(DFSConfigKeys.DfsDatanodeDirectoryscanIntervalKey, 1);
                // datanode scans directories
                conf.SetInt(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 3 * 1000);
                // datanode sends block reports
                // Set short retry timeouts so this test runs faster
                conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10);
                cluster = new MiniDFSCluster.Builder(conf).Build();
                FileSystem fs = cluster.GetFileSystem();
                // create two files with one block each
                DFSTestUtil util = new DFSTestUtil.Builder().SetName("testCorruptFilesCorruptedBlock"
                                                                     ).SetNumFiles(2).SetMaxLevels(1).SetMaxSize(512).Build();
                util.CreateFiles(fs, "/srcdat10");
                // fetch bad file list from namenode. There should be none.
                NameNode namenode = cluster.GetNameNode();
                ICollection <FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.GetNamesystem(
                    ).ListCorruptFileBlocks("/", null);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " corrupt files. Expecting None."
                                              , badFiles.Count == 0);
                // Now deliberately corrupt one block
                string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
                FilePath storageDir = cluster.GetInstanceStorageDir(0, 1);
                FilePath data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                NUnit.Framework.Assert.IsTrue("data directory does not exist", data_dir.Exists());
                IList <FilePath> metaFiles = MiniDFSCluster.GetAllBlockMetadataFiles(data_dir);
                NUnit.Framework.Assert.IsTrue("Data directory does not contain any blocks or there was an "
                                              + "IO error", metaFiles != null && !metaFiles.IsEmpty());
                FilePath         metaFile = metaFiles[0];
                RandomAccessFile file     = new RandomAccessFile(metaFile, "rw");
                FileChannel      channel  = file.GetChannel();
                long             position = channel.Size() - 2;
                int    length             = 2;
                byte[] buffer             = new byte[length];
                random.NextBytes(buffer);
                channel.Write(ByteBuffer.Wrap(buffer), position);
                file.Close();
                Log.Info("Deliberately corrupting file " + metaFile.GetName() + " at offset " + position
                         + " length " + length);
                // read all files to trigger detection of corrupted replica
                try
                {
                    util.CheckFiles(fs, "/srcdat10");
                }
                catch (BlockMissingException)
                {
                    System.Console.Out.WriteLine("Received BlockMissingException as expected.");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue("Corrupted replicas not handled properly. Expecting BlockMissingException "
                                                  + " but received IOException " + e, false);
                }
                // fetch bad file list from namenode. There should be one file.
                badFiles = namenode.GetNamesystem().ListCorruptFileBlocks("/", null);
                Log.Info("Namenode has bad files. " + badFiles.Count);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " bad files. Expecting 1."
                                              , badFiles.Count == 1);
                util.Cleanup(fs, "/srcdat10");
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <summary>Check that listCorruptFileBlocks works while the namenode is still in safemode.
        ///     </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestListCorruptFileBlocksInSafeMode()
        {
            MiniDFSCluster cluster = null;
            Random         random  = new Random();

            try
            {
                Configuration conf = new HdfsConfiguration();
                // datanode scans directories
                conf.SetInt(DFSConfigKeys.DfsDatanodeDirectoryscanIntervalKey, 1);
                // datanode sends block reports
                conf.SetInt(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 3 * 1000);
                // never leave safemode automatically
                conf.SetFloat(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey, 1.5f);
                // start populating repl queues immediately
                conf.SetFloat(DFSConfigKeys.DfsNamenodeReplQueueThresholdPctKey, 0f);
                // Set short retry timeouts so this test runs faster
                conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10);
                cluster = new MiniDFSCluster.Builder(conf).WaitSafeMode(false).Build();
                cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                     false);
                FileSystem fs = cluster.GetFileSystem();
                // create two files with one block each
                DFSTestUtil util = new DFSTestUtil.Builder().SetName("testListCorruptFileBlocksInSafeMode"
                                                                     ).SetNumFiles(2).SetMaxLevels(1).SetMaxSize(512).Build();
                util.CreateFiles(fs, "/srcdat10");
                // fetch bad file list from namenode. There should be none.
                ICollection <FSNamesystem.CorruptFileBlockInfo> badFiles = cluster.GetNameNode().GetNamesystem
                                                                               ().ListCorruptFileBlocks("/", null);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " corrupt files. Expecting None."
                                              , badFiles.Count == 0);
                // Now deliberately corrupt one block
                FilePath storageDir = cluster.GetInstanceStorageDir(0, 0);
                FilePath data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, cluster.GetNamesystem
                                                                         ().GetBlockPoolId());
                NUnit.Framework.Assert.IsTrue("data directory does not exist", data_dir.Exists());
                IList <FilePath> metaFiles = MiniDFSCluster.GetAllBlockMetadataFiles(data_dir);
                NUnit.Framework.Assert.IsTrue("Data directory does not contain any blocks or there was an "
                                              + "IO error", metaFiles != null && !metaFiles.IsEmpty());
                FilePath         metaFile = metaFiles[0];
                RandomAccessFile file     = new RandomAccessFile(metaFile, "rw");
                FileChannel      channel  = file.GetChannel();
                long             position = channel.Size() - 2;
                int    length             = 2;
                byte[] buffer             = new byte[length];
                random.NextBytes(buffer);
                channel.Write(ByteBuffer.Wrap(buffer), position);
                file.Close();
                Log.Info("Deliberately corrupting file " + metaFile.GetName() + " at offset " + position
                         + " length " + length);
                // read all files to trigger detection of corrupted replica
                try
                {
                    util.CheckFiles(fs, "/srcdat10");
                }
                catch (BlockMissingException)
                {
                    System.Console.Out.WriteLine("Received BlockMissingException as expected.");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException "
                                                  + " but received IOException " + e, false);
                }
                // fetch bad file list from namenode. There should be one file.
                badFiles = cluster.GetNameNode().GetNamesystem().ListCorruptFileBlocks("/", null);
                Log.Info("Namenode has bad files. " + badFiles.Count);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " bad files. Expecting 1."
                                              , badFiles.Count == 1);
                // restart namenode
                cluster.RestartNameNode(0);
                fs = cluster.GetFileSystem();
                // wait until replication queues have been initialized
                while (!cluster.GetNameNode().namesystem.IsPopulatingReplQueues())
                {
                    try
                    {
                        Log.Info("waiting for replication queues");
                        Sharpen.Thread.Sleep(1000);
                    }
                    catch (Exception)
                    {
                    }
                }
                // read all files to trigger detection of corrupted replica
                try
                {
                    util.CheckFiles(fs, "/srcdat10");
                }
                catch (BlockMissingException)
                {
                    System.Console.Out.WriteLine("Received BlockMissingException as expected.");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException "
                                                  + " but received IOException " + e, false);
                }
                // fetch bad file list from namenode. There should be one file.
                badFiles = cluster.GetNameNode().GetNamesystem().ListCorruptFileBlocks("/", null);
                Log.Info("Namenode has bad files. " + badFiles.Count);
                NUnit.Framework.Assert.IsTrue("Namenode has " + badFiles.Count + " bad files. Expecting 1."
                                              , badFiles.Count == 1);
                // check that we are still in safe mode
                NUnit.Framework.Assert.IsTrue("Namenode is not in safe mode", cluster.GetNameNode
                                                  ().IsInSafeMode());
                // now leave safe mode so that we can clean up
                cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                     false);
                util.Cleanup(fs, "/srcdat10");
            }
            catch (Exception e)
            {
                Log.Error(StringUtils.StringifyException(e));
                throw;
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Пример #13
0
        /// <exception cref="System.IO.IOException"/>
        public virtual void TestCustomShuffleTransfer()
        {
            FilePath absLogDir = new FilePath("target", typeof(TestFadvisedFileRegion).Name +
                                              "LocDir").GetAbsoluteFile();
            string testDirPath = StringUtils.Join(Path.Separator, new string[] { absLogDir.GetAbsolutePath
                                                                                     (), "testCustomShuffleTransfer" });
            FilePath testDir = new FilePath(testDirPath);

            testDir.Mkdirs();
            System.Console.Out.WriteLine(testDir.GetAbsolutePath());
            FilePath inFile  = new FilePath(testDir, "fileIn.out");
            FilePath outFile = new FilePath(testDir, "fileOut.out");

            //Initialize input file
            byte[] initBuff = new byte[FileSize];
            Random rand     = new Random();

            rand.NextBytes(initBuff);
            FileOutputStream @out = new FileOutputStream(inFile);

            try
            {
                @out.Write(initBuff);
            }
            finally
            {
                IOUtils.Cleanup(Log, @out);
            }
            //define position and count to read from a file region.
            int position = 2 * 1024 * 1024;
            int count    = 4 * 1024 * 1024 - 1;
            RandomAccessFile    inputFile  = null;
            RandomAccessFile    targetFile = null;
            WritableByteChannel target     = null;
            FadvisedFileRegion  fileRegion = null;

            try
            {
                inputFile  = new RandomAccessFile(inFile.GetAbsolutePath(), "r");
                targetFile = new RandomAccessFile(outFile.GetAbsolutePath(), "rw");
                target     = targetFile.GetChannel();
                NUnit.Framework.Assert.AreEqual(FileSize, inputFile.Length());
                //create FadvisedFileRegion
                fileRegion = new FadvisedFileRegion(inputFile, position, count, false, 0, null, null
                                                    , 1024, false);
                //test corner cases
                CustomShuffleTransferCornerCases(fileRegion, target, count);
                long pos = 0;
                long size;
                while ((size = fileRegion.CustomShuffleTransfer(target, pos)) > 0)
                {
                    pos += size;
                }
                //assert size
                NUnit.Framework.Assert.AreEqual(count, (int)pos);
                NUnit.Framework.Assert.AreEqual(count, targetFile.Length());
            }
            finally
            {
                if (fileRegion != null)
                {
                    fileRegion.ReleaseExternalResources();
                }
                IOUtils.Cleanup(Log, target);
                IOUtils.Cleanup(Log, targetFile);
                IOUtils.Cleanup(Log, inputFile);
            }
            //Read the target file and verify that copy is done correctly
            byte[]          buff = new byte[FileSize];
            FileInputStream @in  = new FileInputStream(outFile);

            try
            {
                int total = @in.Read(buff, 0, count);
                NUnit.Framework.Assert.AreEqual(count, total);
                for (int i = 0; i < count; i++)
                {
                    NUnit.Framework.Assert.AreEqual(initBuff[position + i], buff[i]);
                }
            }
            finally
            {
                IOUtils.Cleanup(Log, @in);
            }
            //delete files and folders
            inFile.Delete();
            outFile.Delete();
            testDir.Delete();
            absLogDir.Delete();
        }