public virtual void TestHedgedReadLoopTooManyTimes() { Configuration conf = new Configuration(); int numHedgedReadPoolThreads = 5; int hedgedReadTimeoutMillis = 50; conf.SetInt(DFSConfigKeys.DfsDfsclientHedgedReadThreadpoolSize, numHedgedReadPoolThreads ); conf.SetLong(DFSConfigKeys.DfsDfsclientHedgedReadThresholdMillis, hedgedReadTimeoutMillis ); conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 0); // Set up the InjectionHandler DFSClientFaultInjector.instance = Org.Mockito.Mockito.Mock <DFSClientFaultInjector >(); DFSClientFaultInjector injector = DFSClientFaultInjector.instance; int sleepMs = 100; Org.Mockito.Mockito.DoAnswer(new _Answer_296(hedgedReadTimeoutMillis, sleepMs)).When (injector).FetchFromDatanodeException(); Org.Mockito.Mockito.DoAnswer(new _Answer_309(sleepMs)).When(injector).ReadFromDatanodeDelay (); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Format( true).Build(); DistributedFileSystem fileSys = cluster.GetFileSystem(); DFSClient dfsClient = fileSys.GetClient(); FSDataOutputStream output = null; DFSInputStream input = null; string filename = "/hedgedReadMaxOut.dat"; try { Path file = new Path(filename); output = fileSys.Create(file, (short)2); byte[] data = new byte[64 * 1024]; output.Write(data); output.Flush(); output.Write(data); output.Flush(); output.Write(data); output.Flush(); output.Close(); byte[] buffer = new byte[64 * 1024]; input = dfsClient.Open(filename); input.Read(0, buffer, 0, 1024); input.Close(); NUnit.Framework.Assert.AreEqual(3, input.GetHedgedReadOpsLoopNumForTesting()); } catch (BlockMissingException) { NUnit.Framework.Assert.IsTrue(false); } finally { Org.Mockito.Mockito.Reset(injector); IOUtils.Cleanup(null, input); IOUtils.Cleanup(null, output); fileSys.Close(); cluster.Shutdown(); } }
public virtual void TestPipelineRecoveryForLastBlock() { DFSClientFaultInjector faultInjector = Org.Mockito.Mockito.Mock <DFSClientFaultInjector >(); DFSClientFaultInjector oldInjector = DFSClientFaultInjector.instance; DFSClientFaultInjector.instance = faultInjector; Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsClientBlockWriteLocatefollowingblockRetriesKey, 3); MiniDFSCluster cluster = null; try { int numDataNodes = 3; cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build(); cluster.WaitActive(); FileSystem fileSys = cluster.GetFileSystem(); Path file = new Path("dataprotocol1.dat"); Org.Mockito.Mockito.When(faultInjector.FailPacket()).ThenReturn(true); DFSTestUtil.CreateFile(fileSys, file, 68000000L, (short)numDataNodes, 0L); // At this point, NN should have accepted only valid replicas. // Read should succeed. FSDataInputStream @in = fileSys.Open(file); try { int c = @in.Read(); } catch (BlockMissingException) { // Test will fail with BlockMissingException if NN does not update the // replica state based on the latest report. NUnit.Framework.Assert.Fail("Block is missing because the file was closed with" + " corrupt replicas."); } } finally { DFSClientFaultInjector.instance = oldInjector; if (cluster != null) { cluster.Shutdown(); } } }
/// <summary>Write the full packet, including the header, to the given output stream. /// </summary> /// <param name="stm"/> /// <exception cref="System.IO.IOException"/> internal virtual void WriteTo(DataOutputStream stm) { lock (this) { CheckBuffer(); int dataLen = dataPos - dataStart; int checksumLen = checksumPos - checksumStart; int pktLen = HdfsConstants.BytesInInteger + dataLen + checksumLen; PacketHeader header = new PacketHeader(pktLen, offsetInBlock, seqno, lastPacketInBlock , dataLen, syncBlock); if (checksumPos != dataStart) { // Move the checksum to cover the gap. This can happen for the last // packet or during an hflush/hsync call. System.Array.Copy(buf, checksumStart, buf, dataStart - checksumLen, checksumLen); checksumPos = dataStart; checksumStart = checksumPos - checksumLen; } int headerStart = checksumStart - header.GetSerializedSize(); System.Diagnostics.Debug.Assert(checksumStart + 1 >= header.GetSerializedSize()); System.Diagnostics.Debug.Assert(headerStart >= 0); System.Diagnostics.Debug.Assert(headerStart + header.GetSerializedSize() == checksumStart ); // Copy the header data into the buffer immediately preceding the checksum // data. System.Array.Copy(header.GetBytes(), 0, buf, headerStart, header.GetSerializedSize ()); // corrupt the data for testing. if (DFSClientFaultInjector.Get().CorruptPacket()) { buf[headerStart + header.GetSerializedSize() + checksumLen + dataLen - 1] ^= unchecked ( (int)(0xff)); } // Write the now contiguous full packet to the output stream. stm.Write(buf, headerStart, header.GetSerializedSize() + checksumLen + dataLen); // undo corruption. if (DFSClientFaultInjector.Get().UncorruptPacket()) { buf[headerStart + header.GetSerializedSize() + checksumLen + dataLen - 1] ^= unchecked ( (int)(0xff)); } } }
public virtual void SetUp() { faultInjector = Org.Mockito.Mockito.Mock <DFSClientFaultInjector>(); DFSClientFaultInjector.instance = faultInjector; }
public virtual void TestMaxOutHedgedReadPool() { isHedgedRead = true; Configuration conf = new Configuration(); int numHedgedReadPoolThreads = 5; int initialHedgedReadTimeoutMillis = 50000; int fixedSleepIntervalMillis = 50; conf.SetInt(DFSConfigKeys.DfsDfsclientHedgedReadThreadpoolSize, numHedgedReadPoolThreads ); conf.SetLong(DFSConfigKeys.DfsDfsclientHedgedReadThresholdMillis, initialHedgedReadTimeoutMillis ); // Set up the InjectionHandler DFSClientFaultInjector.instance = Org.Mockito.Mockito.Mock <DFSClientFaultInjector >(); DFSClientFaultInjector injector = DFSClientFaultInjector.instance; // make preads sleep for 50ms Org.Mockito.Mockito.DoAnswer(new _Answer_372(fixedSleepIntervalMillis)).When(injector ).StartFetchFromDatanode(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Format( true).Build(); DistributedFileSystem fileSys = cluster.GetFileSystem(); DFSClient dfsClient = fileSys.GetClient(); DFSHedgedReadMetrics metrics = dfsClient.GetHedgedReadMetrics(); // Metrics instance is static, so we need to reset counts from prior tests. metrics.hedgedReadOps.Set(0); metrics.hedgedReadOpsWin.Set(0); metrics.hedgedReadOpsInCurThread.Set(0); try { Path file1 = new Path("hedgedReadMaxOut.dat"); WriteFile(fileSys, file1); // Basic test. Reads complete within timeout. Assert that there were no // hedged reads. PReadFile(fileSys, file1); // assert that there were no hedged reads. 50ms + delta < 500ms NUnit.Framework.Assert.IsTrue(metrics.GetHedgedReadOps() == 0); NUnit.Framework.Assert.IsTrue(metrics.GetHedgedReadOpsInCurThread() == 0); /* * Reads take longer than timeout. But, only one thread reading. Assert * that there were hedged reads. But, none of the reads had to run in the * current thread. */ dfsClient.SetHedgedReadTimeout(50); // 50ms PReadFile(fileSys, file1); // assert that there were hedged reads NUnit.Framework.Assert.IsTrue(metrics.GetHedgedReadOps() > 0); NUnit.Framework.Assert.IsTrue(metrics.GetHedgedReadOpsInCurThread() == 0); /* * Multiple threads reading. Reads take longer than timeout. Assert that * there were hedged reads. And that reads had to run in the current * thread. */ int factor = 10; int numHedgedReads = numHedgedReadPoolThreads * factor; long initialReadOpsValue = metrics.GetHedgedReadOps(); ExecutorService executor = Executors.NewFixedThreadPool(numHedgedReads); AList <Future <Void> > futures = new AList <Future <Void> >(); for (int i = 0; i < numHedgedReads; i++) { futures.AddItem(executor.Submit(GetPReadFileCallable(fileSys, file1))); } for (int i_1 = 0; i_1 < numHedgedReads; i_1++) { futures[i_1].Get(); } NUnit.Framework.Assert.IsTrue(metrics.GetHedgedReadOps() > initialReadOpsValue); NUnit.Framework.Assert.IsTrue(metrics.GetHedgedReadOpsInCurThread() > 0); CleanupFile(fileSys, file1); executor.Shutdown(); } finally { fileSys.Close(); cluster.Shutdown(); Org.Mockito.Mockito.Reset(injector); } }