/// <exception cref="System.IO.IOException"/> public static void CreateSplitFiles(Path jobSubmitDir, Configuration conf, FileSystem fs, InputSplit[] splits) { FSDataOutputStream @out = CreateFile(fs, JobSubmissionFiles.GetJobSplitFile(jobSubmitDir ), conf); JobSplit.SplitMetaInfo[] info = WriteOldSplits(splits, @out, conf); @out.Close(); WriteJobSplitMetaInfo(fs, JobSubmissionFiles.GetJobSplitMetaFile(jobSubmitDir), new FsPermission(JobSubmissionFiles.JobFilePermission), splitVersion, info); }
/// <summary>Generates data that can be used for Job Control tests.</summary> /// <param name="fs">FileSystem to create data in.</param> /// <param name="dirPath">Path to create the data in.</param> /// <exception cref="System.IO.IOException">If an error occurs creating the data.</exception> internal static void GenerateData(FileSystem fs, Path dirPath) { FSDataOutputStream @out = fs.Create(new Path(dirPath, "data.txt")); for (int i = 0; i < 10000; i++) { string line = GenerateRandomLine(); @out.Write(Sharpen.Runtime.GetBytesForString(line, "UTF-8")); } @out.Close(); }
/// <exception cref="System.IO.IOException"/> private void CreateFile(FileSystem fileSys, Path name) { FSDataOutputStream stm = fileSys.Create(name, true, fileSys.GetConf().GetInt(CommonConfigurationKeys .IoFileBufferSizeKey, 4096), (short)2, blockSize); byte[] buffer = new byte[1024]; Random rand = new Random(seed); rand.NextBytes(buffer); stm.Write(buffer); stm.Close(); }
/// <exception cref="System.Exception"/> public virtual void TestBlockReaderLocalLegacyWithAppend() { short ReplFactor = 1; HdfsConfiguration conf = GetConfiguration(null); conf.SetBoolean(DFSConfigKeys.DfsClientUseLegacyBlockreaderlocal, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); DistributedFileSystem dfs = cluster.GetFileSystem(); Path path = new Path("/testBlockReaderLocalLegacy"); DFSTestUtil.CreateFile(dfs, path, 10, ReplFactor, 0); DFSTestUtil.WaitReplication(dfs, path, ReplFactor); ClientDatanodeProtocol proxy; Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token; ExtendedBlock originalBlock; long originalGS; { LocatedBlock lb = cluster.GetNameNode().GetRpcServer().GetBlockLocations(path.ToString (), 0, 1).Get(0); proxy = DFSUtil.CreateClientDatanodeProtocolProxy(lb.GetLocations()[0], conf, 60000 , false); token = lb.GetBlockToken(); // get block and generation stamp ExtendedBlock blk = new ExtendedBlock(lb.GetBlock()); originalBlock = new ExtendedBlock(blk); originalGS = originalBlock.GetGenerationStamp(); // test getBlockLocalPathInfo BlockLocalPathInfo info = proxy.GetBlockLocalPathInfo(blk, token); NUnit.Framework.Assert.AreEqual(originalGS, info.GetBlock().GetGenerationStamp()); } { // append one byte FSDataOutputStream @out = dfs.Append(path); @out.Write(1); @out.Close(); } { // get new generation stamp LocatedBlock lb = cluster.GetNameNode().GetRpcServer().GetBlockLocations(path.ToString (), 0, 1).Get(0); long newGS = lb.GetBlock().GetGenerationStamp(); NUnit.Framework.Assert.IsTrue(newGS > originalGS); // getBlockLocalPathInfo using the original block. NUnit.Framework.Assert.AreEqual(originalGS, originalBlock.GetGenerationStamp()); BlockLocalPathInfo info = proxy.GetBlockLocalPathInfo(originalBlock, token); NUnit.Framework.Assert.AreEqual(newGS, info.GetBlock().GetGenerationStamp()); } cluster.Shutdown(); }
/// <summary> /// Create a file with the name <code>file</code> and /// a length of <code>fileSize</code>. /// </summary> /// <remarks> /// Create a file with the name <code>file</code> and /// a length of <code>fileSize</code>. The file is filled with character 'a'. /// </remarks> /// <exception cref="System.IO.IOException"/> private void GenFile(Path file, long fileSize) { FSDataOutputStream @out = fc.Create(file, EnumSet.Of(CreateFlag.Create, CreateFlag .Overwrite), Options.CreateOpts.CreateParent(), Options.CreateOpts.BufferSize(4096 ), Options.CreateOpts.RepFac((short)3)); for (long i = 0; i < fileSize; i++) { @out.WriteByte('a'); } @out.Close(); }
/// <summary>Write a file.</summary> /// <remarks> /// Write a file. /// Optional flags control /// whether file overwrite operations should be enabled /// </remarks> /// <param name="fs">filesystem</param> /// <param name="path">path to write to</param> /// <param name="len">length of data</param> /// <param name="overwrite">should the create option allow overwrites?</param> /// <exception cref="System.IO.IOException">IO problems</exception> public static void WriteDataset(FileSystem fs, Path path, byte[] src, int len, int buffersize, bool overwrite) { Assert.True("Not enough data in source array to write " + len + " bytes", src.Length >= len); FSDataOutputStream @out = fs.Create(path, overwrite, fs.GetConf().GetInt(IoFileBufferSize , 4096), (short)1, buffersize); @out.Write(src, 0, len); @out.Close(); AssertFileHasLength(fs, path, len); }
public void Run() { try { FSDataOutputStream outputStream = this._enclosing.fileSystem.Create(file); if (syncType == TestFileConcurrentReader.SyncType.Append) { outputStream.Close(); outputStream = this._enclosing.fileSystem.Append(file); } try { for (int i = 0; !error.Get() && i < numWrites; i++) { byte[] writeBuf = DFSTestUtil.GenerateSequentialBytes(i * writeSize, writeSize); outputStream.Write(writeBuf); if (syncType == TestFileConcurrentReader.SyncType.Sync) { outputStream.Hflush(); } writerStarted.Set(true); } } catch (IOException e) { error.Set(true); TestFileConcurrentReader.Log.Error("error writing to file", e); } finally { outputStream.Close(); } writerDone.Set(true); } catch (Exception e) { TestFileConcurrentReader.Log.Error("error in writer", e); throw new RuntimeException(e); } }
public virtual void Pipeline_02_03() { Configuration conf = new HdfsConfiguration(); conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1); // create cluster MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build(); try { //change the lease limits. cluster.SetLeasePeriod(SoftLeaseLimit, HardLeaseLimit); //wait for the cluster cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); Path p = new Path(Dir, "file1"); int half = BlockSize / 2; { //a. On Machine M1, Create file. Write half block of data. // Invoke DFSOutputStream.hflush() on the dfs file handle. // Do not close file yet. FSDataOutputStream @out = fs.Create(p, true, fs.GetConf().GetInt(CommonConfigurationKeys .IoFileBufferSizeKey, 4096), (short)3, BlockSize); Write(@out, 0, half); //hflush ((DFSOutputStream)@out.GetWrappedStream()).Hflush(); } //b. On another machine M2, open file and verify that the half-block // of data can be read successfully. CheckFile(p, half, conf); AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()"); ((DistributedFileSystem)fs).dfs.GetLeaseRenewer().InterruptAndJoin(); { //c. On M1, append another half block of data. Close file on M1. //sleep to let the lease is expired. Sharpen.Thread.Sleep(2 * SoftLeaseLimit); UserGroupInformation current = UserGroupInformation.GetCurrentUser(); UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(current.GetShortUserName () + "x", new string[] { "supergroup" }); DistributedFileSystem dfs = ugi.DoAs(new _PrivilegedExceptionAction_102(conf)); FSDataOutputStream @out = Append(dfs, p); Write(@out, 0, half); @out.Close(); } //d. On M2, open file and read 1 block of data from it. Close file. CheckFile(p, 2 * half, conf); } finally { cluster.Shutdown(); } }
/// <exception cref="System.Exception"/> public static void Launch() { JobConf conf = new JobConf(typeof(Org.Apache.Hadoop.Mapred.TestFieldSelection)); FileSystem fs = FileSystem.Get(conf); int numOfInputLines = 10; Path OutputDir = new Path("build/test/output_for_field_selection_test"); Path InputDir = new Path("build/test/input_for_field_selection_test"); string inputFile = "input.txt"; fs.Delete(InputDir, true); fs.Mkdirs(InputDir); fs.Delete(OutputDir, true); StringBuilder inputData = new StringBuilder(); StringBuilder expectedOutput = new StringBuilder(); TestMRFieldSelection.ConstructInputOutputData(inputData, expectedOutput, numOfInputLines ); FSDataOutputStream fileOut = fs.Create(new Path(InputDir, inputFile)); fileOut.Write(Sharpen.Runtime.GetBytesForString(inputData.ToString(), "utf-8")); fileOut.Close(); System.Console.Out.WriteLine("inputData:"); System.Console.Out.WriteLine(inputData.ToString()); JobConf job = new JobConf(conf, typeof(Org.Apache.Hadoop.Mapred.TestFieldSelection )); FileInputFormat.SetInputPaths(job, InputDir); job.SetInputFormat(typeof(TextInputFormat)); job.SetMapperClass(typeof(FieldSelectionMapReduce)); job.SetReducerClass(typeof(FieldSelectionMapReduce)); FileOutputFormat.SetOutputPath(job, OutputDir); job.SetOutputKeyClass(typeof(Org.Apache.Hadoop.IO.Text)); job.SetOutputValueClass(typeof(Org.Apache.Hadoop.IO.Text)); job.SetOutputFormat(typeof(TextOutputFormat)); job.SetNumReduceTasks(1); job.Set(FieldSelectionHelper.DataFieldSeperator, "-"); job.Set(FieldSelectionHelper.MapOutputKeyValueSpec, "6,5,1-3:0-"); job.Set(FieldSelectionHelper.ReduceOutputKeyValueSpec, ":4,3,2,1,0,0-"); JobClient.RunJob(job); // // Finally, we compare the reconstructed answer key with the // original one. Remember, we need to ignore zero-count items // in the original key. // bool success = true; Path outPath = new Path(OutputDir, "part-00000"); string outdata = MapReduceTestUtil.ReadOutput(outPath, job); NUnit.Framework.Assert.AreEqual(expectedOutput.ToString(), outdata); fs.Delete(OutputDir, true); fs.Delete(InputDir, true); }
public virtual void TestSeek() { Path testFile = new Path("/testfile+1"); FSDataOutputStream @out = hdfs.Create(testFile, true); @out.WriteBytes("0123456789"); @out.Close(); FSDataInputStream @in = hftpFs.Open(testFile); @in.Seek(7); NUnit.Framework.Assert.AreEqual('7', @in.Read()); @in.Close(); }
/// <exception cref="System.IO.IOException"/> private static void WriteFile(FileContext fc, Path name, int fileSize) { // Create and write a file that contains three blocks of data FSDataOutputStream stm = fc.Create(name, EnumSet.Of(CreateFlag.Create), Options.CreateOpts .CreateParent()); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.NextBytes(buffer); stm.Write(buffer); stm.Close(); }
/// <exception cref="System.IO.IOException"/> private void AppendWithTwoFs(Path p, FileSystem fs1, FileSystem fs2) { FSDataOutputStream stm = fs1.Create(p); try { AppendTestUtil.Write(stm, 0, SegmentLength); } finally { stm.Close(); } stm = fs2.Append(p); try { AppendTestUtil.Write(stm, SegmentLength, SegmentLength); } finally { stm.Close(); } }
public virtual void TestAppendToExistingFile() { byte[] original = ContractTestUtils.Dataset(8192, 'A', 'Z'); byte[] appended = ContractTestUtils.Dataset(8192, '0', '9'); ContractTestUtils.CreateFile(GetFileSystem(), target, false, original); FSDataOutputStream outputStream = GetFileSystem().Append(target); outputStream.Write(appended); outputStream.Close(); byte[] bytes = ContractTestUtils.ReadDataset(GetFileSystem(), target, original.Length + appended.Length); ContractTestUtils.ValidateFileContent(bytes, new byte[][] { original, appended }); }
/// <exception cref="System.IO.IOException"/> private void CloseOutput() { if (writer != null) { writer.Close(); writer = null; } if (@out != null) { @out.Close(); @out = null; } }
/// <exception cref="System.IO.IOException"/> private void WriteAndAppend(FileSystem fs, Path p, int lengthForCreate, int lengthForAppend ) { // Creating a file with 4096 blockSize to write multiple blocks FSDataOutputStream stream = fs.Create(p, true, BlockSize, (short)1, BlockSize); try { AppendTestUtil.Write(stream, 0, lengthForCreate); stream.Close(); stream = fs.Append(p); AppendTestUtil.Write(stream, lengthForCreate, lengthForAppend); stream.Close(); } finally { IOUtils.CloseStream(stream); } int totalLength = lengthForCreate + lengthForAppend; NUnit.Framework.Assert.AreEqual(totalLength, fs.GetFileStatus(p).GetLen()); }
/// <exception cref="System.IO.IOException"/> private static void CreateFile(Path inFile, Configuration conf) { FileSystem fs = inFile.GetFileSystem(conf); if (fs.Exists(inFile)) { return; } FSDataOutputStream @out = fs.Create(inFile); @out.WriteBytes("This is a test file"); @out.Close(); }
/// <exception cref="System.IO.IOException"/> public override void Close() { try { FlushBuffer(); sums.Close(); datas.Close(); } finally { isClosed = true; } }
/// <exception cref="System.IO.IOException"/> internal virtual void Close() { try { encoder.Flush(); @out.Close(); @out = null; } finally { IOUtils.Cleanup(Log, @out); } }
/// <summary>Test that that writes to an incomplete block are available to a reader</summary> /// <exception cref="System.IO.IOException"/> public virtual void TestUnfinishedBlockRead() { // create a new file in the root, write data, do no close Path file1 = new Path("/unfinished-block"); FSDataOutputStream stm = TestFileCreation.CreateFile(fileSystem, file1, 1); // write partial block and sync int partialBlockSize = blockSize / 2; WriteFileAndSync(stm, partialBlockSize); // Make sure a client can read it before it is closed CheckCanRead(fileSystem, file1, partialBlockSize); stm.Close(); }
/// <exception cref="System.IO.IOException"/> private void WriteFile(FileSystem fileSys, Path name, int repl) { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.Create(name, true, fileSys.GetConf().GetInt(CommonConfigurationKeys .IoFileBufferSizeKey, 4096), (short)repl, blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.NextBytes(buffer); stm.Write(buffer); stm.Close(); }
/// <summary>Test to verify the race between finalizeBlock and Lease recovery</summary> /// <exception cref="System.Exception"/> public virtual void TestRaceBetweenReplicaRecoveryAndFinalizeBlock() { TearDown(); // Stop the Mocked DN started in startup() Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsDatanodeXceiverStopTimeoutMillisKey, "1000"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); try { cluster.WaitClusterUp(); DistributedFileSystem fs = cluster.GetFileSystem(); Path path = new Path("/test"); FSDataOutputStream @out = fs.Create(path); @out.WriteBytes("data"); @out.Hsync(); IList <LocatedBlock> blocks = DFSTestUtil.GetAllBlocks(fs.Open(path)); LocatedBlock block = blocks[0]; DataNode dataNode = cluster.GetDataNodes()[0]; AtomicBoolean recoveryInitResult = new AtomicBoolean(true); Sharpen.Thread recoveryThread = new _Thread_612(block, dataNode, recoveryInitResult ); recoveryThread.Start(); try { @out.Close(); } catch (IOException e) { NUnit.Framework.Assert.IsTrue("Writing should fail", e.Message.Contains("are bad. Aborting..." )); } finally { recoveryThread.Join(); } NUnit.Framework.Assert.IsTrue("Recovery should be initiated successfully", recoveryInitResult .Get()); dataNode.UpdateReplicaUnderRecovery(block.GetBlock(), block.GetBlock().GetGenerationStamp () + 1, block.GetBlock().GetBlockId(), block.GetBlockSize()); } finally { if (null != cluster) { cluster.Shutdown(); cluster = null; } } }
/// <exception cref="System.Exception"/> private void WriteTestFile(string testFileName) { Path filePath = new Path(testFileName); FSDataOutputStream stream = dfs.Create(filePath); for (int i = 0; i < 10; i++) { byte[] data = Sharpen.Runtime.GetBytesForString(RandomStringUtils.RandomAlphabetic (102400)); stream.Write(data); } stream.Hsync(); stream.Close(); }
//200MB file length /// <summary>Test read and write large files.</summary> /// <exception cref="System.Exception"/> internal static void LargeFileTest(long fileLength) { Configuration conf = WebHdfsTestUtil.CreateConf(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); try { cluster.WaitActive(); FileSystem fs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem.Scheme ); Path dir = new Path("/test/largeFile"); NUnit.Framework.Assert.IsTrue(fs.Mkdirs(dir)); byte[] data = new byte[1 << 20]; Random.NextBytes(data); byte[] expected = new byte[2 * data.Length]; System.Array.Copy(data, 0, expected, 0, data.Length); System.Array.Copy(data, 0, expected, data.Length, data.Length); Path p = new Path(dir, "file"); TestWebHDFS.Ticker t = new TestWebHDFS.Ticker("WRITE", "fileLength=" + fileLength ); FSDataOutputStream @out = fs.Create(p); try { long remaining = fileLength; for (; remaining > 0;) { t.Tick(fileLength - remaining, "remaining=%d", remaining); int n = (int)Math.Min(remaining, data.Length); @out.Write(data, 0, n); remaining -= n; } } finally { @out.Close(); } t.End(fileLength); NUnit.Framework.Assert.AreEqual(fileLength, fs.GetFileStatus(p).GetLen()); long smallOffset = Random.Next(1 << 20) + (1 << 20); long largeOffset = fileLength - smallOffset; byte[] buf = new byte[data.Length]; VerifySeek(fs, p, largeOffset, fileLength, buf, expected); VerifySeek(fs, p, smallOffset, fileLength, buf, expected); VerifyPread(fs, p, largeOffset, fileLength, buf, expected); } finally { cluster.Shutdown(); } }
public virtual void TestAppend() { Configuration conf = new HdfsConfiguration(); short Replication = (short)3; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); try { DistributedFileSystem fs = cluster.GetFileSystem(); Path f = new Path(Dir, "testAppend"); { Log.Info("create an empty file " + f); fs.Create(f, Replication).Close(); FileStatus status = fs.GetFileStatus(f); NUnit.Framework.Assert.AreEqual(Replication, status.GetReplication()); NUnit.Framework.Assert.AreEqual(0L, status.GetLen()); } byte[] bytes = new byte[1000]; { Log.Info("append " + bytes.Length + " bytes to " + f); FSDataOutputStream @out = fs.Append(f); @out.Write(bytes); @out.Close(); FileStatus status = fs.GetFileStatus(f); NUnit.Framework.Assert.AreEqual(Replication, status.GetReplication()); NUnit.Framework.Assert.AreEqual(bytes.Length, status.GetLen()); } { Log.Info("append another " + bytes.Length + " bytes to " + f); try { FSDataOutputStream @out = fs.Append(f); @out.Write(bytes); @out.Close(); NUnit.Framework.Assert.Fail(); } catch (IOException ioe) { Log.Info("This exception is expected", ioe); } } } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <summary> /// Test to run benchmarks between short circuit read vs regular read with /// specified number of threads simultaneously reading. /// </summary> /// <remarks> /// Test to run benchmarks between short circuit read vs regular read with /// specified number of threads simultaneously reading. /// <br /> /// Run this using the following command: /// bin/hadoop --config confdir \ /// org.apache.hadoop.hdfs.TestShortCircuitLocalRead \ /// <shortcircuit on?> <checsum on?> <Number of threads> /// </remarks> /// <exception cref="System.Exception"/> public static void Main(string[] args) { if (args.Length != 3) { System.Console.Out.WriteLine("Usage: test shortcircuit checksum threadCount"); System.Environment.Exit(1); } bool shortcircuit = Sharpen.Extensions.ValueOf(args[0]); bool checksum = Sharpen.Extensions.ValueOf(args[1]); int threadCount = System.Convert.ToInt32(args[2]); // Setup create a file Configuration conf = new Configuration(); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, shortcircuit); conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, "/tmp/TestShortCircuitLocalRead._PORT" ); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, checksum); //Override fileSize and DATA_TO_WRITE to much larger values for benchmark test int fileSize = 1000 * blockSize + 100; // File with 1000 blocks byte[] dataToWrite = AppendTestUtil.RandomBytes(seed, fileSize); // create a new file in home directory. Do not close it. Path file1 = new Path("filelocal.dat"); FileSystem fs = FileSystem.Get(conf); FSDataOutputStream stm = CreateFile(fs, file1, 1); stm.Write(dataToWrite); stm.Close(); long start = Time.Now(); int iteration = 20; Sharpen.Thread[] threads = new Sharpen.Thread[threadCount]; for (int i = 0; i < threadCount; i++) { threads[i] = new _Thread_554(iteration, fs, file1, dataToWrite, conf); } for (int i_1 = 0; i_1 < threadCount; i_1++) { threads[i_1].Start(); } for (int i_2 = 0; i_2 < threadCount; i_2++) { threads[i_2].Join(); } long end = Time.Now(); System.Console.Out.WriteLine("Iteration " + iteration + " took " + (end - start)); fs.Delete(file1, false); }
/// <summary>test none codecs</summary> /// <exception cref="System.IO.IOException"/> internal virtual void BasicWithSomeCodec(string codec) { Path ncTFile = new Path(Root, "basic.tfile"); FSDataOutputStream fout = CreateFSOutput(ncTFile); TFile.Writer writer = new TFile.Writer(fout, minBlockSize, codec, "memcmp", conf); WriteRecords(writer); fout.Close(); FSDataInputStream fin = fs.Open(ncTFile); TFile.Reader reader = new TFile.Reader(fs.Open(ncTFile), fs.GetFileStatus(ncTFile ).GetLen(), conf); TFile.Reader.Scanner scanner = reader.CreateScanner(); ReadAllRecords(scanner); scanner.SeekTo(GetSomeKey(50)); Assert.True("location lookup failed", scanner.SeekTo(GetSomeKey (50))); // read the key and see if it matches byte[] readKey = ReadKey(scanner); Assert.True("seeked key does not match", Arrays.Equals(GetSomeKey (50), readKey)); scanner.SeekTo(new byte[0]); byte[] val1 = ReadValue(scanner); scanner.SeekTo(new byte[0]); byte[] val2 = ReadValue(scanner); Assert.True(Arrays.Equals(val1, val2)); // check for lowerBound scanner.LowerBound(GetSomeKey(50)); Assert.True("locaton lookup failed", scanner.currentLocation.CompareTo (reader.End()) < 0); readKey = ReadKey(scanner); Assert.True("seeked key does not match", Arrays.Equals(readKey, GetSomeKey(50))); // check for upper bound scanner.UpperBound(GetSomeKey(50)); Assert.True("location lookup failed", scanner.currentLocation.CompareTo (reader.End()) < 0); readKey = ReadKey(scanner); Assert.True("seeked key does not match", Arrays.Equals(readKey, GetSomeKey(51))); scanner.Close(); // test for a range of scanner scanner = reader.CreateScannerByKey(GetSomeKey(10), GetSomeKey(60)); ReadAndCheckbytes(scanner, 10, 50); NUnit.Framework.Assert.IsFalse(scanner.Advance()); scanner.Close(); reader.Close(); fin.Close(); fs.Delete(ncTFile, true); }
/// <exception cref="System.IO.IOException"/> private static void WriteFile(FileSystem fileSys, Path name, int repl, int fileSize , int blockSize) { // Create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.Create(name, true, HdfsConstants.IoFileBufferSize , (short)repl, (long)blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.NextBytes(buffer); stm.Write(buffer); stm.Close(); }
public virtual void TestSimpleFlush() { Configuration conf = new HdfsConfiguration(); fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); try { // create a new file. Path file1 = new Path("/simpleFlush.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); System.Console.Out.WriteLine("Created file simpleFlush.dat"); // write to file int mid = AppendTestUtil.FileSize / 2; stm.Write(fileContents, 0, mid); stm.Hflush(); System.Console.Out.WriteLine("Wrote and Flushed first part of file."); // write the remainder of the file stm.Write(fileContents, mid, AppendTestUtil.FileSize - mid); System.Console.Out.WriteLine("Written second part of file"); stm.Hflush(); stm.Hflush(); System.Console.Out.WriteLine("Wrote and Flushed second part of file."); // verify that full blocks are sane CheckFile(fs, file1, 1); stm.Close(); System.Console.Out.WriteLine("Closed file."); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2" ); } catch (IOException e) { System.Console.Out.WriteLine("Exception :" + e); throw; } catch (Exception e) { System.Console.Out.WriteLine("Throwable :" + e); Sharpen.Runtime.PrintStackTrace(e); throw new IOException("Throwable : " + e); } finally { fs.Close(); cluster.Shutdown(); } }
/// <exception cref="System.IO.IOException"/> /// <exception cref="Sharpen.TimeoutException"/> /// <exception cref="System.Exception"/> internal static void WriteFile(Configuration conf, Path name, short replication, int numBlocks) { FileSystem fileSys = FileSystem.Get(conf); FSDataOutputStream stm = fileSys.Create(name, true, conf.GetInt("io.file.buffer.size" , 4096), replication, (long)Blocksize); for (int i = 0; i < numBlocks; i++) { stm.Write(databuf); } stm.Close(); DFSTestUtil.WaitReplication(fileSys, name, replication); }
/// <exception cref="System.IO.IOException"/> private void WriteFile(FileSystem fileSys, Path name, int repl) { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.Create(name, true, fileSys.GetConf().GetInt(CommonConfigurationKeys .IoFileBufferSizeKey, 4096), (short)repl, blockSize); byte[] buffer = new byte[filesize]; for (int i = 0; i < buffer.Length; i++) { buffer[i] = (byte)('1'); } stm.Write(buffer); stm.Close(); }