public virtual void TestReadFromOneDN() { HdfsConfiguration configuration = new HdfsConfiguration(); // One of the goals of this test is to verify that we don't open more // than one socket. So use a different client context, so that we // get our own socket cache, rather than sharing with the other test // instances. Also use a really long socket timeout so that nothing // gets closed before we get around to checking the cache size at the end. string contextName = "testReadFromOneDNContext"; configuration.Set(DFSConfigKeys.DfsClientContext, contextName); configuration.SetLong(DFSConfigKeys.DfsClientSocketTimeoutKey, 100000000L); BlockReaderTestUtil util = new BlockReaderTestUtil(1, configuration); Path testFile = new Path("/testConnCache.dat"); byte[] authenticData = util.WriteFile(testFile, FileSize / 1024); DFSClient client = new DFSClient(new IPEndPoint("localhost", util.GetCluster().GetNameNodePort ()), util.GetConf()); ClientContext cacheContext = ClientContext.Get(contextName, client.GetConf()); DFSInputStream @in = client.Open(testFile.ToString()); Log.Info("opened " + testFile.ToString()); byte[] dataBuf = new byte[BlockSize]; // Initial read Pread(@in, 0, dataBuf, 0, dataBuf.Length, authenticData); // Read again and verify that the socket is the same Pread(@in, FileSize - dataBuf.Length, dataBuf, 0, dataBuf.Length, authenticData); Pread(@in, 1024, dataBuf, 0, dataBuf.Length, authenticData); // No seek; just read Pread(@in, -1, dataBuf, 0, dataBuf.Length, authenticData); Pread(@in, 64, dataBuf, 0, dataBuf.Length / 2, authenticData); @in.Close(); client.Close(); NUnit.Framework.Assert.AreEqual(1, ClientContext.GetFromConf(configuration).GetPeerCache ().Size()); }
/// <summary>Start the parallel read with the given parameters.</summary> /// <exception cref="System.IO.IOException"/> internal virtual bool RunParallelRead(int nFiles, int nWorkerEach, TestParallelReadUtil.ReadWorkerHelper helper) { TestParallelReadUtil.ReadWorker[] workers = new TestParallelReadUtil.ReadWorker[nFiles * nWorkerEach]; TestParallelReadUtil.TestFileInfo[] testInfoArr = new TestParallelReadUtil.TestFileInfo [nFiles]; // Prepare the files and workers int nWorkers = 0; for (int i = 0; i < nFiles; ++i) { TestParallelReadUtil.TestFileInfo testInfo = new TestParallelReadUtil.TestFileInfo (this); testInfoArr[i] = testInfo; testInfo.filepath = new Path("/TestParallelRead.dat." + i); testInfo.authenticData = util.WriteFile(testInfo.filepath, FileSizeK); testInfo.dis = dfsClient.Open(testInfo.filepath.ToString(), dfsClient.GetConf().ioBufferSize , verifyChecksums); for (int j = 0; j < nWorkerEach; ++j) { workers[nWorkers++] = new TestParallelReadUtil.ReadWorker(testInfo, nWorkers, helper ); } } // Start the workers and wait long starttime = Time.MonotonicNow(); foreach (TestParallelReadUtil.ReadWorker worker in workers) { worker.Start(); } foreach (TestParallelReadUtil.ReadWorker worker_1 in workers) { try { worker_1.Join(); } catch (Exception) { } } long endtime = Time.MonotonicNow(); // Cleanup foreach (TestParallelReadUtil.TestFileInfo testInfo_1 in testInfoArr) { testInfo_1.dis.Close(); } // Report bool res = true; long totalRead = 0; foreach (TestParallelReadUtil.ReadWorker worker_2 in workers) { long nread = worker_2.GetBytesRead(); Log.Info("--- Report: " + worker_2.GetName() + " read " + nread + " B; " + "average " + nread / TestParallelReadUtil.ReadWorker.NIterations + " B per read"); totalRead += nread; if (worker_2.HasError()) { res = false; } } double timeTakenSec = (endtime - starttime) / 1000.0; long totalReadKB = totalRead / 1024; Log.Info("=== Report: " + nWorkers + " threads read " + totalReadKB + " KB (across " + nFiles + " file(s)) in " + timeTakenSec + "s; average " + totalReadKB / timeTakenSec + " KB/s"); return res; }