public virtual void TestRamDiskShortCircuitRead() { StartUpCluster(ReplFactor, new StorageType[] { StorageType.RamDisk, StorageType.Default }, 2 * BlockSize - 1, true); // 1 replica + delta, SCR read string MethodName = GenericTestUtils.GetMethodName(); int Seed = unchecked ((int)(0xFADED)); Path path = new Path("/" + MethodName + ".dat"); MakeRandomTestFile(path, BlockSize, true, Seed); EnsureFileReplicasOnStorageType(path, StorageType.RamDisk); // Sleep for a short time to allow the lazy writer thread to do its job Sharpen.Thread.Sleep(3 * LazyWriterIntervalSec * 1000); //assertThat(verifyReadRandomFile(path, BLOCK_SIZE, SEED), is(true)); FSDataInputStream fis = fs.Open(path); // Verify SCR read counters try { fis = fs.Open(path); byte[] buf = new byte[BufferLength]; fis.Read(0, buf, 0, BufferLength); HdfsDataInputStream dfsis = (HdfsDataInputStream)fis; NUnit.Framework.Assert.AreEqual(BufferLength, dfsis.GetReadStatistics().GetTotalBytesRead ()); NUnit.Framework.Assert.AreEqual(BufferLength, dfsis.GetReadStatistics().GetTotalShortCircuitBytesRead ()); } finally { fis.Close(); fis = null; } }
public virtual void TestRamDiskEvictionWithShortCircuitReadHandle() { StartUpCluster(ReplFactor, new StorageType[] { StorageType.RamDisk, StorageType.Default }, (6 * BlockSize - 1), true); // 5 replica + delta, SCR. string MethodName = GenericTestUtils.GetMethodName(); Path path1 = new Path("/" + MethodName + ".01.dat"); Path path2 = new Path("/" + MethodName + ".02.dat"); int Seed = unchecked ((int)(0xFADED)); MakeRandomTestFile(path1, BlockSize, true, Seed); EnsureFileReplicasOnStorageType(path1, StorageType.RamDisk); // Sleep for a short time to allow the lazy writer thread to do its job. // However the block replica should not be evicted from RAM_DISK yet. Sharpen.Thread.Sleep(3 * LazyWriterIntervalSec * 1000); // No eviction should happen as the free ratio is below the threshold FSDataInputStream fis = fs.Open(path1); try { // Keep and open read handle to path1 while creating path2 byte[] buf = new byte[BufferLength]; fis.Read(0, buf, 0, BufferLength); // Create the 2nd file that will trigger RAM_DISK eviction. MakeTestFile(path2, BlockSize * 2, true); EnsureFileReplicasOnStorageType(path2, StorageType.RamDisk); // Ensure path1 is still readable from the open SCR handle. fis.Read(fis.GetPos(), buf, 0, BufferLength); HdfsDataInputStream dfsis = (HdfsDataInputStream)fis; NUnit.Framework.Assert.AreEqual(2 * BufferLength, dfsis.GetReadStatistics().GetTotalBytesRead ()); NUnit.Framework.Assert.AreEqual(2 * BufferLength, dfsis.GetReadStatistics().GetTotalShortCircuitBytesRead ()); } finally { IOUtils.CloseQuietly(fis); } // After the open handle is closed, path1 should be evicted to DISK. TriggerBlockReport(); EnsureFileReplicasOnStorageType(path1, StorageType.Default); }
/// <exception cref="System.Exception"/> private void TestStatistics(bool isShortCircuit) { Assume.AssumeTrue(DomainSocket.GetLoadingFailureReason() == null); HdfsConfiguration conf = new HdfsConfiguration(); TemporarySocketDirectory sockDir = null; if (isShortCircuit) { DFSInputStream.tcpReadsDisabledForTesting = true; sockDir = new TemporarySocketDirectory(); conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), "TestStatisticsForLocalRead.%d.sock" ).GetAbsolutePath()); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true); DomainSocket.DisableBindPathValidation(); } else { conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, false); } MiniDFSCluster cluster = null; Path TestPath = new Path("/a"); long RandomSeed = 4567L; FSDataInputStream fsIn = null; byte[] original = new byte[TestBlockReaderLocal.BlockReaderLocalTest.TestLength]; FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); fs = cluster.GetFileSystem(); DFSTestUtil.CreateFile(fs, TestPath, TestBlockReaderLocal.BlockReaderLocalTest.TestLength , (short)1, RandomSeed); try { DFSTestUtil.WaitReplication(fs, TestPath, (short)1); } catch (Exception e) { NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: " + e); } catch (TimeoutException e) { NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: " + e); } fsIn = fs.Open(TestPath); IOUtils.ReadFully(fsIn, original, 0, TestBlockReaderLocal.BlockReaderLocalTest.TestLength ); HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn; NUnit.Framework.Assert.AreEqual(TestBlockReaderLocal.BlockReaderLocalTest.TestLength , dfsIn.GetReadStatistics().GetTotalBytesRead()); NUnit.Framework.Assert.AreEqual(TestBlockReaderLocal.BlockReaderLocalTest.TestLength , dfsIn.GetReadStatistics().GetTotalLocalBytesRead()); if (isShortCircuit) { NUnit.Framework.Assert.AreEqual(TestBlockReaderLocal.BlockReaderLocalTest.TestLength , dfsIn.GetReadStatistics().GetTotalShortCircuitBytesRead()); } else { NUnit.Framework.Assert.AreEqual(0, dfsIn.GetReadStatistics().GetTotalShortCircuitBytesRead ()); } fsIn.Close(); fsIn = null; } finally { DFSInputStream.tcpReadsDisabledForTesting = false; if (fsIn != null) { fsIn.Close(); } if (fs != null) { fs.Close(); } if (cluster != null) { cluster.Shutdown(); } if (sockDir != null) { sockDir.Close(); } } }
public virtual void TestZeroCopyReads() { HdfsConfiguration conf = InitZeroCopyTest(); MiniDFSCluster cluster = null; Path TestPath = new Path("/a"); FSDataInputStream fsIn = null; int TestFileLength = 3 * BlockSize; FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); fs = cluster.GetFileSystem(); DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, 7567L); try { DFSTestUtil.WaitReplication(fs, TestPath, (short)1); } catch (Exception e) { NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: " + e); } catch (TimeoutException e) { NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: " + e); } fsIn = fs.Open(TestPath); byte[] original = new byte[TestFileLength]; IOUtils.ReadFully(fsIn, original, 0, TestFileLength); fsIn.Close(); fsIn = fs.Open(TestPath); ByteBuffer result = fsIn.Read(null, BlockSize, EnumSet.Of(ReadOption.SkipChecksums )); NUnit.Framework.Assert.AreEqual(BlockSize, result.Remaining()); HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn; NUnit.Framework.Assert.AreEqual(BlockSize, dfsIn.GetReadStatistics().GetTotalBytesRead ()); NUnit.Framework.Assert.AreEqual(BlockSize, dfsIn.GetReadStatistics().GetTotalZeroCopyBytesRead ()); Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray (result)); fsIn.ReleaseBuffer(result); } finally { if (fsIn != null) { fsIn.Close(); } if (fs != null) { fs.Close(); } if (cluster != null) { cluster.Shutdown(); } } }