/// <summary>Ask dfs client to read the file</summary> /// <exception cref="System.IO.IOException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> private void DfsClientReadFile(Path corruptedFile) { DFSInputStream @in = dfs.dfs.Open(corruptedFile.ToUri().GetPath()); byte[] buf = new byte[buffersize]; int nRead = 0; // total number of bytes read try { do { nRead = @in.Read(buf, 0, buf.Length); }while (nRead > 0); } catch (ChecksumException) { // caught ChecksumException if all replicas are bad, ignore and continue. Log.Debug("DfsClientReadFile caught ChecksumException."); } catch (BlockMissingException) { // caught BlockMissingException, ignore. Log.Debug("DfsClientReadFile caught BlockMissingException."); } }
public virtual void TestHedgedReadLoopTooManyTimes() { Configuration conf = new Configuration(); int numHedgedReadPoolThreads = 5; int hedgedReadTimeoutMillis = 50; conf.SetInt(DFSConfigKeys.DfsDfsclientHedgedReadThreadpoolSize, numHedgedReadPoolThreads ); conf.SetLong(DFSConfigKeys.DfsDfsclientHedgedReadThresholdMillis, hedgedReadTimeoutMillis ); conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 0); // Set up the InjectionHandler DFSClientFaultInjector.instance = Org.Mockito.Mockito.Mock <DFSClientFaultInjector >(); DFSClientFaultInjector injector = DFSClientFaultInjector.instance; int sleepMs = 100; Org.Mockito.Mockito.DoAnswer(new _Answer_296(hedgedReadTimeoutMillis, sleepMs)).When (injector).FetchFromDatanodeException(); Org.Mockito.Mockito.DoAnswer(new _Answer_309(sleepMs)).When(injector).ReadFromDatanodeDelay (); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Format( true).Build(); DistributedFileSystem fileSys = cluster.GetFileSystem(); DFSClient dfsClient = fileSys.GetClient(); FSDataOutputStream output = null; DFSInputStream input = null; string filename = "/hedgedReadMaxOut.dat"; try { Path file = new Path(filename); output = fileSys.Create(file, (short)2); byte[] data = new byte[64 * 1024]; output.Write(data); output.Flush(); output.Write(data); output.Flush(); output.Write(data); output.Flush(); output.Close(); byte[] buffer = new byte[64 * 1024]; input = dfsClient.Open(filename); input.Read(0, buffer, 0, 1024); input.Close(); NUnit.Framework.Assert.AreEqual(3, input.GetHedgedReadOpsLoopNumForTesting()); } catch (BlockMissingException) { NUnit.Framework.Assert.IsTrue(false); } finally { Org.Mockito.Mockito.Reset(injector); IOUtils.Cleanup(null, input); IOUtils.Cleanup(null, output); fileSys.Close(); cluster.Shutdown(); } }
/// <summary>(Optionally) seek to position, read and verify data.</summary> /// <remarks> /// (Optionally) seek to position, read and verify data. /// Seek to specified position if pos is non-negative. /// </remarks> /// <exception cref="System.IO.IOException"/> private void Pread(DFSInputStream @in, long pos, byte[] buffer, int offset, int length , byte[] authenticData) { NUnit.Framework.Assert.IsTrue("Test buffer too small", buffer.Length >= offset + length); if (pos >= 0) { @in.Seek(pos); } Log.Info("Reading from file of size " + @in.GetFileLength() + " at offset " + @in .GetPos()); while (length > 0) { int cnt = @in.Read(buffer, offset, length); NUnit.Framework.Assert.IsTrue("Error in read", cnt > 0); offset += cnt; length -= cnt; } // Verify for (int i = 0; i < length; ++i) { byte actual = buffer[i]; byte expect = authenticData[(int)pos + i]; NUnit.Framework.Assert.AreEqual("Read data mismatch at file offset " + (pos + i) + ". Expects " + expect + "; got " + actual, actual, expect); } }
/// <exception cref="System.IO.IOException"/> public virtual int PRead(DFSInputStream dis, byte[] target, int startOff, int len ) { int cnt = 0; while (cnt < len) { int read = dis.Read(startOff, target, cnt, len - cnt); if (read == -1) { return read; } cnt += read; } return cnt; }
/// <exception cref="System.IO.IOException"/> public virtual int Read(DFSInputStream dis, byte[] target, int startOff, int len) { int cnt = 0; lock (dis) { dis.Seek(startOff); while (cnt < len) { int read = dis.Read(target, cnt, len - cnt); if (read == -1) { return read; } cnt += read; } } return cnt; }
/// <exception cref="System.IO.IOException"/> public virtual int Read(DFSInputStream dis, byte[] target, int startOff, int len) { ByteBuffer bb = ByteBuffer.AllocateDirect(target.Length); int cnt = 0; lock (dis) { dis.Seek(startOff); while (cnt < len) { int read = dis.Read(bb); if (read == -1) { return read; } cnt += read; } } bb.Clear(); bb.Get(target); return cnt; }
/// <summary>DFS client read bytes starting from the specified position.</summary> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="System.IO.IOException"/> private void DfsClientReadFileFromPosition(Path corruptedFile) { DFSInputStream @in = dfs.dfs.Open(corruptedFile.ToUri().GetPath()); byte[] buf = new byte[buffersize]; int startPosition = 2; int nRead = 0; // total number of bytes read try { do { nRead = @in.Read(startPosition, buf, 0, buf.Length); startPosition += buf.Length; }while (nRead > 0); } catch (BlockMissingException) { Log.Debug("DfsClientReadFile caught BlockMissingException."); } }
public virtual void TestLeaseAbort() { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build(); try { cluster.WaitActive(); NamenodeProtocols preSpyNN = cluster.GetNameNodeRpc(); NamenodeProtocols spyNN = Org.Mockito.Mockito.Spy(preSpyNN); DFSClient dfs = new DFSClient(null, spyNN, conf, null); byte[] buf = new byte[1024]; FSDataOutputStream c_out = CreateFsOut(dfs, dirString + "c"); c_out.Write(buf, 0, 1024); c_out.Close(); DFSInputStream c_in = dfs.Open(dirString + "c"); FSDataOutputStream d_out = CreateFsOut(dfs, dirString + "d"); // stub the renew method. Org.Mockito.Mockito.DoThrow(new RemoteException(typeof(SecretManager.InvalidToken ).FullName, "Your token is worthless")).When(spyNN).RenewLease(Matchers.AnyString ()); // We don't need to wait the lease renewer thread to act. // call renewLease() manually. // make it look like the soft limit has been exceeded. LeaseRenewer originalRenewer = dfs.GetLeaseRenewer(); dfs.lastLeaseRenewal = Time.MonotonicNow() - HdfsConstants.LeaseSoftlimitPeriod - 1000; try { dfs.RenewLease(); } catch (IOException) { } // Things should continue to work it passes hard limit without // renewing. try { d_out.Write(buf, 0, 1024); Log.Info("Write worked beyond the soft limit as expected."); } catch (IOException) { NUnit.Framework.Assert.Fail("Write failed."); } // make it look like the hard limit has been exceeded. dfs.lastLeaseRenewal = Time.MonotonicNow() - HdfsConstants.LeaseHardlimitPeriod - 1000; dfs.RenewLease(); // this should not work. try { d_out.Write(buf, 0, 1024); d_out.Close(); NUnit.Framework.Assert.Fail("Write did not fail even after the fatal lease renewal failure" ); } catch (IOException e) { Log.Info("Write failed as expected. ", e); } // If aborted, the renewer should be empty. (no reference to clients) Sharpen.Thread.Sleep(1000); NUnit.Framework.Assert.IsTrue(originalRenewer.IsEmpty()); // unstub Org.Mockito.Mockito.DoNothing().When(spyNN).RenewLease(Matchers.AnyString()); // existing input streams should work try { int num = c_in.Read(buf, 0, 1); if (num != 1) { NUnit.Framework.Assert.Fail("Failed to read 1 byte"); } c_in.Close(); } catch (IOException e) { Log.Error("Read failed with ", e); NUnit.Framework.Assert.Fail("Read after lease renewal failure failed"); } // new file writes should work. try { c_out = CreateFsOut(dfs, dirString + "c"); c_out.Write(buf, 0, 1024); c_out.Close(); } catch (IOException e) { Log.Error("Write failed with ", e); NUnit.Framework.Assert.Fail("Write failed"); } } finally { cluster.Shutdown(); } }