/// <exception cref="System.IO.IOException"/> private void Parse() { FSDataInputStream @in = null; try { @in = GetPreviousJobHistoryFileStream(GetConfig(), applicationAttemptId); } catch (IOException e) { Log.Warn("error trying to open previous history file. No history data " + "will be copied over." , e); return; } JobHistoryParser parser = new JobHistoryParser(@in); parser.Parse(this); Exception parseException = parser.GetParseException(); if (parseException != null) { Log.Info("Got an error parsing job-history file" + ", ignoring incomplete events." , parseException); } }
/// <summary>Constructor</summary> /// <param name="fin">FS input stream.</param> /// <param name="fileLength">Length of the corresponding file</param> /// <exception cref="System.IO.IOException"/> public Reader(FSDataInputStream fin, long fileLength, Configuration conf) { this.@in = fin; this.conf = conf; // move the cursor to the beginning of the tail, containing: offset to the // meta block index, version and magic fin.Seek(fileLength - BCFile.Magic.Size() - Utils.Version.Size() - long.Size / byte .Size); long offsetIndexMeta = fin.ReadLong(); version = new Utils.Version(fin); BCFile.Magic.ReadAndVerify(fin); if (!version.CompatibleWith(BCFile.ApiVersion)) { throw new RuntimeException("Incompatible BCFile fileBCFileVersion."); } // read meta index fin.Seek(offsetIndexMeta); metaIndex = new BCFile.MetaIndex(fin); // read data:BCFile.index, the data block index BCFile.Reader.BlockReader blockR = GetMetaBlock(BCFile.DataIndex.BlockName); try { dataIndex = new BCFile.DataIndex(blockR); } finally { blockR.Close(); } }
public override void Close() { // Invalidate the state of the stream. @in = null; pos = end; mark = -1; }
/// <summary>test seek</summary> /// <exception cref="System.IO.IOException"/> internal static void VerifySeek(FileSystem fs, Path p, long offset, long length, byte[] buf, byte[] expected) { long remaining = length - offset; long @checked = 0; Log.Info("XXX SEEK: offset=" + offset + ", remaining=" + remaining); TestWebHDFS.Ticker t = new TestWebHDFS.Ticker("SEEK", "offset=%d, remaining=%d", offset, remaining); FSDataInputStream @in = fs.Open(p, 64 << 10); @in.Seek(offset); for (; remaining > 0;) { t.Tick(@checked, "offset=%d, remaining=%d", offset, remaining); int n = (int)Math.Min(remaining, buf.Length); @in.ReadFully(buf, 0, n); CheckData(offset, remaining, n, buf, expected); offset += n; remaining -= n; @checked += n; } @in.Close(); t.End(@checked); }
// test pread can survive datanode restarts /// <exception cref="System.IO.IOException"/> private void DatanodeRestartTest(MiniDFSCluster cluster, FileSystem fileSys, Path name) { // skip this test if using simulated storage since simulated blocks // don't survive datanode restarts. if (simulatedStorage) { return; } int numBlocks = 1; NUnit.Framework.Assert.IsTrue(numBlocks <= DFSConfigKeys.DfsClientMaxBlockAcquireFailuresDefault ); byte[] expected = new byte[numBlocks * blockSize]; Random rand = new Random(seed); rand.NextBytes(expected); byte[] actual = new byte[numBlocks * blockSize]; FSDataInputStream stm = fileSys.Open(name); // read a block and get block locations cached as a result stm.ReadFully(0, actual); CheckAndEraseData(actual, 0, expected, "Pread Datanode Restart Setup"); // restart all datanodes. it is expected that they will // restart on different ports, hence, cached block locations // will no longer work. NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes()); cluster.WaitActive(); // verify the block can be read again using the same InputStream // (via re-fetching of block locations from namenode). there is a // 3 sec sleep in chooseDataNode(), which can be shortened for // this test if configurable. stm.ReadFully(0, actual); CheckAndEraseData(actual, 0, expected, "Pread Datanode Restart Test"); }
public virtual void TestOpenFileTwice() { Describe("verify that two opened file streams are independent"); Path path = Path("testopenfiletwice.txt"); byte[] block = ContractTestUtils.Dataset(TestFileLen, 0, 255); //this file now has a simple rule: offset => value ContractTestUtils.CreateFile(GetFileSystem(), path, false, block); //open first FSDataInputStream instream1 = GetFileSystem().Open(path); int c = instream1.Read(); Assert.Equal(0, c); FSDataInputStream instream2 = null; try { instream2 = GetFileSystem().Open(path); Assert.Equal("first read of instream 2", 0, instream2.Read()); Assert.Equal("second read of instream 1", 1, instream1.Read()); instream1.Close(); Assert.Equal("second read of instream 2", 1, instream2.Read()); //close instream1 again instream1.Close(); } finally { IOUtils.CloseStream(instream1); IOUtils.CloseStream(instream2); } }
public virtual void TestGetPos() { Path testFile = new Path("/testfile+1"); // Write a test file. FSDataOutputStream @out = hdfs.Create(testFile, true); @out.WriteBytes("0123456789"); @out.Close(); FSDataInputStream @in = hftpFs.Open(testFile); // Test read(). for (int i = 0; i < 5; ++i) { NUnit.Framework.Assert.AreEqual(i, @in.GetPos()); @in.Read(); } // Test read(b, off, len). NUnit.Framework.Assert.AreEqual(5, @in.GetPos()); byte[] buffer = new byte[10]; NUnit.Framework.Assert.AreEqual(2, @in.Read(buffer, 0, 2)); NUnit.Framework.Assert.AreEqual(7, @in.GetPos()); // Test read(b). int bytesRead = @in.Read(buffer); NUnit.Framework.Assert.AreEqual(7 + bytesRead, @in.GetPos()); // Test EOF. for (int i_1 = 0; i_1 < 100; ++i_1) { @in.Read(); } NUnit.Framework.Assert.AreEqual(10, @in.GetPos()); @in.Close(); }
public virtual void TestReadClosedStream() { Path testFile = new Path("/testfile+2"); FSDataOutputStream os = hdfs.Create(testFile, true); os.WriteBytes("0123456789"); os.Close(); // ByteRangeInputStream delays opens until reads. Make sure it doesn't // open a closed stream that has never been opened FSDataInputStream @in = hftpFs.Open(testFile); @in.Close(); CheckClosedStream(@in); CheckClosedStream(@in.GetWrappedStream()); // force the stream to connect and then close it @in = hftpFs.Open(testFile); int ch = @in.Read(); NUnit.Framework.Assert.AreEqual('0', ch); @in.Close(); CheckClosedStream(@in); CheckClosedStream(@in.GetWrappedStream()); // make sure seeking doesn't automagically reopen the stream @in.Seek(4); CheckClosedStream(@in); CheckClosedStream(@in.GetWrappedStream()); }
public virtual void TestOpenReadDirWithChild() { Describe("create & read a directory which has a child"); Path path = Path("zero.dir"); Mkdirs(path); Path path2 = new Path(path, "child"); Mkdirs(path2); try { instream = GetFileSystem().Open(path); //at this point we've opened a directory NUnit.Framework.Assert.Fail("A directory has been opened for reading"); } catch (FileNotFoundException e) { HandleExpectedException(e); } catch (IOException e) { HandleRelaxedException("opening a directory for reading", "FileNotFoundException" , e); } }
/* * Read some data, skip a few bytes and read more. HADOOP-922. */ /// <exception cref="System.IO.IOException"/> private void SmallReadSeek(FileSystem fileSys, Path name) { if (fileSys is ChecksumFileSystem) { fileSys = ((ChecksumFileSystem)fileSys).GetRawFileSystem(); } // Make the buffer size small to trigger code for HADOOP-922 FSDataInputStream stmRaw = fileSys.Open(name, 1); byte[] expected = new byte[Onemb]; Random rand = new Random(seed); rand.NextBytes(expected); // Issue a simple read first. byte[] actual = new byte[128]; stmRaw.Seek(100000); stmRaw.Read(actual, 0, actual.Length); CheckAndEraseData(actual, 100000, expected, "First Small Read Test"); // now do a small seek of 4 bytes, within the same block. int newpos1 = 100000 + 128 + 4; stmRaw.Seek(newpos1); stmRaw.Read(actual, 0, actual.Length); CheckAndEraseData(actual, newpos1, expected, "Small Seek Bug 1"); // seek another 256 bytes this time int newpos2 = newpos1 + 256; stmRaw.Seek(newpos2); stmRaw.Read(actual, 0, actual.Length); CheckAndEraseData(actual, newpos2, expected, "Small Seek Bug 2"); // all done stmRaw.Close(); }
/// <exception cref="System.IO.IOException"/> private long DumpFromOffset(PathData item, long offset) { long fileSize = item.RefreshStatus().GetLen(); if (offset > fileSize) { return(fileSize); } // treat a negative offset as relative to end of the file, floor of 0 if (offset < 0) { offset = Math.Max(fileSize + offset, 0); } FSDataInputStream @in = item.fs.Open(item.path); try { @in.Seek(offset); // use conf so the system configured io block size is used IOUtils.CopyBytes(@in, System.Console.Out, GetConf(), false); offset = @in.GetPos(); } finally { @in.Close(); } return(offset); }
/// <exception cref="System.IO.IOException"/> public UncompressedSplitLineReader(FSDataInputStream @in, Configuration conf, byte [] recordDelimiterBytes, long splitLength) : base(@in, conf, recordDelimiterBytes) { this.splitLength = splitLength; usingCRLF = (recordDelimiterBytes == null); }
public virtual void TestNegativeSeek() { instream = GetFileSystem().Open(smallSeekFile); Assert.Equal(0, instream.GetPos()); try { instream.Seek(-1); long p = instream.GetPos(); Log.Warn("Seek to -1 returned a position of " + p); int result = instream.Read(); NUnit.Framework.Assert.Fail("expected an exception, got data " + result + " at a position of " + p); } catch (EOFException e) { //bad seek -expected HandleExpectedException(e); } catch (IOException e) { //bad seek -expected, but not as preferred as an EOFException HandleRelaxedException("a negative seek", "EOFException", e); } Assert.Equal(0, instream.GetPos()); }
/// <exception cref="System.IO.IOException"/> private void CheckFile(FileSystem fileSys, Path name) { BlockLocation[] locations = fileSys.GetFileBlockLocations(fileSys.GetFileStatus(name ), 0, fileSize); NUnit.Framework.Assert.AreEqual("Number of blocks", fileSize, locations.Length); FSDataInputStream stm = fileSys.Open(name); byte[] expected = new byte[fileSize]; if (simulatedStorage) { for (int i = 0; i < expected.Length; ++i) { expected[i] = SimulatedFSDataset.DefaultDatabyte; } } else { Random rand = new Random(seed); rand.NextBytes(expected); } // do a sanity check. Read the file byte[] actual = new byte[fileSize]; stm.ReadFully(0, actual); CheckAndEraseData(actual, 0, expected, "Read Sanity Test"); stm.Close(); }
public virtual void TestRamDiskShortCircuitRead() { StartUpCluster(ReplFactor, new StorageType[] { StorageType.RamDisk, StorageType.Default }, 2 * BlockSize - 1, true); // 1 replica + delta, SCR read string MethodName = GenericTestUtils.GetMethodName(); int Seed = unchecked ((int)(0xFADED)); Path path = new Path("/" + MethodName + ".dat"); MakeRandomTestFile(path, BlockSize, true, Seed); EnsureFileReplicasOnStorageType(path, StorageType.RamDisk); // Sleep for a short time to allow the lazy writer thread to do its job Sharpen.Thread.Sleep(3 * LazyWriterIntervalSec * 1000); //assertThat(verifyReadRandomFile(path, BLOCK_SIZE, SEED), is(true)); FSDataInputStream fis = fs.Open(path); // Verify SCR read counters try { fis = fs.Open(path); byte[] buf = new byte[BufferLength]; fis.Read(0, buf, 0, BufferLength); HdfsDataInputStream dfsis = (HdfsDataInputStream)fis; NUnit.Framework.Assert.AreEqual(BufferLength, dfsis.GetReadStatistics().GetTotalBytesRead ()); NUnit.Framework.Assert.AreEqual(BufferLength, dfsis.GetReadStatistics().GetTotalShortCircuitBytesRead ()); } finally { fis.Close(); fis = null; } }
/// <summary> /// Tally up the values and ensure that we got as much data /// out as we put in. /// </summary> /// <remarks> /// Tally up the values and ensure that we got as much data /// out as we put in. /// Each mapper generated 'NUMBER_FILE_VAL' values (0..NUMBER_FILE_VAL-1). /// Verify that across all our reducers we got exactly this much /// data back. /// </remarks> /// <exception cref="System.Exception"/> private void VerifyNumberJob(int numMaps) { Path outputDir = GetOutputPath(); Configuration conf = new Configuration(); FileSystem fs = FileSystem.GetLocal(conf); FileStatus[] stats = fs.ListStatus(outputDir); int valueSum = 0; foreach (FileStatus f in stats) { FSDataInputStream istream = fs.Open(f.GetPath()); BufferedReader r = new BufferedReader(new InputStreamReader(istream)); string line = null; while ((line = r.ReadLine()) != null) { valueSum += Sharpen.Extensions.ValueOf(line.Trim()); } r.Close(); } int maxVal = NumberFileVal - 1; int expectedPerMapper = maxVal * (maxVal + 1) / 2; int expectedSum = expectedPerMapper * numMaps; Log.Info("expected sum: " + expectedSum + ", got " + valueSum); NUnit.Framework.Assert.AreEqual("Didn't get all our results back", expectedSum, valueSum ); }
/// <exception cref="System.Exception"/> public virtual void TestShmBasedStaleness() { BlockReaderTestUtil.EnableShortCircuitShmTracing(); TemporarySocketDirectory sockDir = new TemporarySocketDirectory(); Configuration conf = CreateShortCircuitConf("testShmBasedStaleness", sockDir); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); DistributedFileSystem fs = cluster.GetFileSystem(); ShortCircuitCache cache = fs.GetClient().GetClientContext().GetShortCircuitCache( ); string TestFile = "/test_file"; int TestFileLen = 8193; int Seed = unchecked ((int)(0xFADED)); DFSTestUtil.CreateFile(fs, new Path(TestFile), TestFileLen, (short)1, Seed); FSDataInputStream fis = fs.Open(new Path(TestFile)); int first = fis.Read(); ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, new Path(TestFile)); NUnit.Framework.Assert.IsTrue(first != -1); cache.Accept(new _CacheVisitor_502(block)); // Stop the Namenode. This will close the socket keeping the client's // shared memory segment alive, and make it stale. cluster.GetDataNodes()[0].Shutdown(); cache.Accept(new _CacheVisitor_518(block)); cluster.Shutdown(); sockDir.Close(); }
/// <exception cref="System.IO.IOException"/> public SpillRecord(Path indexFileName, JobConf job, Checksum crc, string expectedIndexOwner ) { FileSystem rfs = FileSystem.GetLocal(job).GetRaw(); FSDataInputStream @in = SecureIOUtils.OpenFSDataInputStream(new FilePath(indexFileName .ToUri().GetRawPath()), expectedIndexOwner, null); try { long length = rfs.GetFileStatus(indexFileName).GetLen(); int partitions = (int)length / MapTask.MapOutputIndexRecordLength; int size = partitions * MapTask.MapOutputIndexRecordLength; buf = ByteBuffer.Allocate(size); if (crc != null) { crc.Reset(); CheckedInputStream chk = new CheckedInputStream(@in, crc); IOUtils.ReadFully(chk, ((byte[])buf.Array()), 0, size); if (chk.GetChecksum().GetValue() != @in.ReadLong()) { throw new ChecksumException("Checksum error reading spill index: " + indexFileName , -1); } } else { IOUtils.ReadFully(@in, ((byte[])buf.Array()), 0, size); } entries = buf.AsLongBuffer(); } finally { @in.Close(); } }
/// <exception cref="System.IO.IOException"/> protected internal override void ProcessArguments(List <PathData> items) { base.ProcessArguments(items); if (exitCode != 0) { // check for error collecting paths return; } FSDataOutputStream @out = dst.fs.Create(dst.path); try { foreach (PathData src in srcs) { FSDataInputStream @in = src.fs.Open(src.path); try { IOUtils.CopyBytes(@in, @out, GetConf(), false); if (delimiter != null) { @out.Write(Runtime.GetBytesForString(delimiter, "UTF-8")); } } finally { @in.Close(); } } } finally { @out.Close(); } }
public virtual void TestAppendLessThanChecksumChunk() { byte[] buf = new byte[1024]; MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).NumDataNodes (1).Build(); cluster.WaitActive(); try { using (DistributedFileSystem fs = cluster.GetFileSystem()) { int len1 = 200; int len2 = 300; Path p = new Path("/foo"); FSDataOutputStream @out = fs.Create(p); @out.Write(buf, 0, len1); @out.Close(); @out = fs.Append(p); @out.Write(buf, 0, len2); // flush but leave open @out.Hflush(); // read data to verify the replica's content and checksum are correct FSDataInputStream @in = fs.Open(p); int length = @in.Read(0, buf, 0, len1 + len2); NUnit.Framework.Assert.IsTrue(length > 0); @in.Close(); @out.Close(); } } finally { cluster.Shutdown(); } }
public virtual void TestSeekBigFile() { Describe("Seek round a large file and verify the bytes are what is expected"); Path testSeekFile = Path("bigseekfile.txt"); byte[] block = ContractTestUtils.Dataset(65536, 0, 255); ContractTestUtils.CreateFile(GetFileSystem(), testSeekFile, false, block); instream = GetFileSystem().Open(testSeekFile); Assert.Equal(0, instream.GetPos()); //expect that seek to 0 works instream.Seek(0); int result = instream.Read(); Assert.Equal(0, result); Assert.Equal(1, instream.Read()); Assert.Equal(2, instream.Read()); //do seek 32KB ahead instream.Seek(32768); Assert.Equal("@32768", block[32768], unchecked ((byte)instream. Read())); instream.Seek(40000); Assert.Equal("@40000", block[40000], unchecked ((byte)instream. Read())); instream.Seek(8191); Assert.Equal("@8191", block[8191], unchecked ((byte)instream.Read ())); instream.Seek(0); Assert.Equal("@0", 0, unchecked ((byte)instream.Read())); }
/// <summary>Tests read/seek/getPos/skipped opeation for input stream.</summary> /// <exception cref="System.Exception"/> private void TestChecker(FileSystem fileSys, bool readCS) { Path file = new Path("try.dat"); WriteFile(fileSys, file); try { if (!readCS) { fileSys.SetVerifyChecksum(false); } stm = fileSys.Open(file); CheckReadAndGetPos(); CheckSeek(); CheckSkip(); //checkMark NUnit.Framework.Assert.IsFalse(stm.MarkSupported()); stm.Close(); } finally { if (!readCS) { fileSys.SetVerifyChecksum(true); } CleanupFile(fileSys, file); } }
/// <exception cref="System.IO.IOException"/> public static void Check(FileSystem fs, Path p, long length) { int i = -1; try { FileStatus status = fs.GetFileStatus(p); FSDataInputStream @in = fs.Open(p); if (@in.GetWrappedStream() is DFSInputStream) { long len = ((DFSInputStream)@in.GetWrappedStream()).GetFileLength(); NUnit.Framework.Assert.AreEqual(length, len); } else { NUnit.Framework.Assert.AreEqual(length, status.GetLen()); } for (i++; i < length; i++) { NUnit.Framework.Assert.AreEqual(unchecked ((byte)i), unchecked ((byte)@in.Read())); } i = -(int)length; NUnit.Framework.Assert.AreEqual(-1, @in.Read()); //EOF @in.Close(); } catch (IOException ioe) { throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe); } }
/// <exception cref="System.IO.IOException"/> internal virtual void ReadFile(FileSystem fs, Path path, int fileLen) { byte[] arr = new byte[fileLen]; FSDataInputStream @in = fs.Open(path); @in.ReadFully(arr); }
/// <exception cref="System.IO.IOException"/> private void DoPread(FSDataInputStream stm, long position, byte[] buffer, int offset , int length) { int nread = 0; long totalRead = 0; DFSInputStream dfstm = null; if (stm.GetWrappedStream() is DFSInputStream) { dfstm = (DFSInputStream)(stm.GetWrappedStream()); totalRead = dfstm.GetReadStatistics().GetTotalBytesRead(); } while (nread < length) { int nbytes = stm.Read(position + nread, buffer, offset + nread, length - nread); NUnit.Framework.Assert.IsTrue("Error in pread", nbytes > 0); nread += nbytes; } if (dfstm != null) { if (isHedgedRead) { NUnit.Framework.Assert.IsTrue("Expected read statistic to be incremented", length <= dfstm.GetReadStatistics().GetTotalBytesRead() - totalRead); } else { NUnit.Framework.Assert.AreEqual("Expected read statistic to be incremented", length , dfstm.GetReadStatistics().GetTotalBytesRead() - totalRead); } } }
/// <summary>Read the file and convert to a byte dataset.</summary> /// <remarks> /// Read the file and convert to a byte dataset. /// This implements readfully internally, so that it will read /// in the file without ever having to seek() /// </remarks> /// <param name="fs">filesystem</param> /// <param name="path">path to read from</param> /// <param name="len">length of data to read</param> /// <returns>the bytes</returns> /// <exception cref="System.IO.IOException">IO problems</exception> public static byte[] ReadDataset(FileSystem fs, Path path, int len) { FSDataInputStream @in = fs.Open(path); byte[] dest = new byte[len]; int offset = 0; int nread = 0; try { while (nread < len) { int nbytes = @in.Read(dest, offset + nread, len - nread); if (nbytes < 0) { throw new EOFException("End of file reached before reading fully."); } nread += nbytes; } } finally { @in.Close(); } return(dest); }
public virtual void TestShortCircuitTraceHooks() { Assume.AssumeTrue(NativeCodeLoader.IsNativeCodeLoaded() && !Path.Windows); conf = new Configuration(); conf.Set(DFSConfigKeys.DfsClientHtracePrefix + SpanReceiverHost.SpanReceiversConfSuffix , typeof(TestTracing.SetSpanReceiver).FullName); conf.SetLong("dfs.blocksize", 100 * 1024); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false); conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, "testShortCircuitTraceHooks._PORT" ); conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "CRC32C"); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); dfs = cluster.GetFileSystem(); try { DFSTestUtil.CreateFile(dfs, TestPath, TestLength, (short)1, 5678L); TraceScope ts = Trace.StartSpan("testShortCircuitTraceHooks", Sampler.Always); FSDataInputStream stream = dfs.Open(TestPath); byte[] buf = new byte[TestLength]; IOUtils.ReadFully(stream, buf, 0, TestLength); stream.Close(); ts.Close(); string[] expectedSpanNames = new string[] { "OpRequestShortCircuitAccessProto", "ShortCircuitShmRequestProto" }; TestTracing.AssertSpanNamesFound(expectedSpanNames); } finally { dfs.Close(); cluster.Shutdown(); } }
/// <exception cref="System.IO.IOException"/> public CombineFileLineRecordReader(CombineFileSplit split, TaskAttemptContext context , int index) { //offset of the chunk; //end of the chunk; // current pos this.path = split.GetPath(index); fs = this.path.GetFileSystem(context.GetConfiguration()); this.startOffset = split.GetOffset(index); this.end = startOffset + split.GetLength(index); bool skipFirstLine = false; //open the file fileIn = fs.Open(path); if (startOffset != 0) { skipFirstLine = true; --startOffset; fileIn.Seek(startOffset); } reader = new LineReader(fileIn); if (skipFirstLine) { // skip first line and re-establish "startOffset". startOffset += reader.ReadLine(new Text(), 0, (int)Math.Min((long)int.MaxValue, end - startOffset)); } this.pos = startOffset; }
/// <summary> /// Test for the case where the client beings to read a long block, but doesn't /// read bytes off the stream quickly. /// </summary> /// <remarks> /// Test for the case where the client beings to read a long block, but doesn't /// read bytes off the stream quickly. The datanode should time out sending the /// chunks and the transceiver should die, even if it has a long keepalive. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestSlowReader() { // Set a client socket cache expiry time much longer than // the datanode-side expiration time. long ClientExpiryMs = 600000L; Configuration clientConf = new Configuration(conf); clientConf.SetLong(DFSConfigKeys.DfsClientSocketCacheExpiryMsecKey, ClientExpiryMs ); clientConf.Set(DFSConfigKeys.DfsClientContext, "testSlowReader"); DistributedFileSystem fs = (DistributedFileSystem)FileSystem.Get(cluster.GetURI() , clientConf); // Restart the DN with a shorter write timeout. MiniDFSCluster.DataNodeProperties props = cluster.StopDataNode(0); props.conf.SetInt(DFSConfigKeys.DfsDatanodeSocketWriteTimeoutKey, WriteTimeout); props.conf.SetInt(DFSConfigKeys.DfsDatanodeSocketReuseKeepaliveKey, 120000); NUnit.Framework.Assert.IsTrue(cluster.RestartDataNode(props, true)); dn = cluster.GetDataNodes()[0]; // Wait for heartbeats to avoid a startup race where we // try to write the block while the DN is still starting. cluster.TriggerHeartbeats(); DFSTestUtil.CreateFile(fs, TestFile, 1024 * 1024 * 8L, (short)1, 0L); FSDataInputStream stm = fs.Open(TestFile); stm.Read(); AssertXceiverCount(1); GenericTestUtils.WaitFor(new _Supplier_193(this), 500, 50000); // DN should time out in sendChunks, and this should force // the xceiver to exit. IOUtils.CloseStream(stm); }
/// <exception cref="System.Exception"/> private void ReadTestFile(string testFileName) { Path filePath = new Path(testFileName); FSDataInputStream istream = dfs.Open(filePath, 10240); ByteBuffer buf = ByteBuffer.Allocate(10240); int count = 0; try { while (istream.Read(buf) > 0) { count += 1; buf.Clear(); istream.Seek(istream.GetPos() + 5); } } catch (IOException) { } finally { // Ignore this it's probably a seek after eof. istream.Close(); } }
public void testMaxLengthToReader() { Configuration conf = new Configuration(); OrcProto.Type rowType = OrcProto.Type.CreateBuilder() .SetKind(OrcProto.Type.Types.Kind.STRUCT).Build(); OrcProto.Footer footer = OrcProto.Footer.CreateBuilder() .SetHeaderLength(0).SetContentLength(0).SetNumberOfRows(0) .SetRowIndexStride(0).AddTypes(rowType).Build(); OrcProto.PostScript ps = OrcProto.PostScript.CreateBuilder() .SetCompression(OrcProto.CompressionKind.NONE) .SetFooterLength((ulong)footer.SerializedSize) .SetMagic("ORC").AddVersion(0).AddVersion(11).Build(); DataOutputBuffer buffer = new DataOutputBuffer(); footer.WriteTo(buffer); ps.WriteTo(buffer); buffer.write(ps.SerializedSize); FileSystem fs = Mockito.mock(typeof(FileSystem), settings); FSDataInputStream file = new FSDataInputStream(new BufferInStream(buffer.getData(), buffer.getLength())); string p = "/dir/file.orc"; Mockito.when(fs.open(p)).thenReturn(file); OrcFile.ReaderOptions options = OrcFile.readerOptions(conf); options.filesystem(fs); options.maxLength(buffer.getLength()); Mockito.when(fs.getFileStatus(p)) .thenReturn(new FileStatus(10, false, 3, 3000, 0, p)); Reader reader = OrcFile.createReader(p, options); }