public virtual void TestAppend() { int maxOldFileLen = 2 * BlockSize + 1; int maxFlushedBytes = BlockSize; byte[] contents = AppendTestUtil.InitBuffer(maxOldFileLen + 2 * maxFlushedBytes); for (int oldFileLen = 0; oldFileLen <= maxOldFileLen; oldFileLen++) { for (int flushedBytes1 = 0; flushedBytes1 <= maxFlushedBytes; flushedBytes1++) { for (int flushedBytes2 = 0; flushedBytes2 <= maxFlushedBytes; flushedBytes2++) { int fileLen = oldFileLen + flushedBytes1 + flushedBytes2; // create the initial file of oldFileLen Path p = new Path("foo" + oldFileLen + "_" + flushedBytes1 + "_" + flushedBytes2); Log.Info("Creating file " + p); FSDataOutputStream @out = fs.Create(p, false, conf.GetInt(CommonConfigurationKeys .IoFileBufferSizeKey, 4096), Replication, BlockSize); @out.Write(contents, 0, oldFileLen); @out.Close(); // append flushedBytes bytes to the file @out = fs.Append(p); @out.Write(contents, oldFileLen, flushedBytes1); @out.Hflush(); // write another flushedBytes2 bytes to the file @out.Write(contents, oldFileLen + flushedBytes1, flushedBytes2); @out.Close(); // validate the file content AppendTestUtil.CheckFullFile(fs, p, fileLen, contents, p.ToString()); fs.Delete(p, false); } } } }
public virtual void TestSimpleFlush() { Configuration conf = new HdfsConfiguration(); fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); try { // create a new file. Path file1 = new Path("/simpleFlush.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); System.Console.Out.WriteLine("Created file simpleFlush.dat"); // write to file int mid = AppendTestUtil.FileSize / 2; stm.Write(fileContents, 0, mid); stm.Hflush(); System.Console.Out.WriteLine("Wrote and Flushed first part of file."); // write the remainder of the file stm.Write(fileContents, mid, AppendTestUtil.FileSize - mid); System.Console.Out.WriteLine("Written second part of file"); stm.Hflush(); stm.Hflush(); System.Console.Out.WriteLine("Wrote and Flushed second part of file."); // verify that full blocks are sane CheckFile(fs, file1, 1); stm.Close(); System.Console.Out.WriteLine("Closed file."); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2" ); } catch (IOException e) { System.Console.Out.WriteLine("Exception :" + e); throw; } catch (Exception e) { System.Console.Out.WriteLine("Throwable :" + e); Sharpen.Runtime.PrintStackTrace(e); throw new IOException("Throwable : " + e); } finally { fs.Close(); cluster.Shutdown(); } }
public virtual void TestComplexFlush() { Configuration conf = new HdfsConfiguration(); fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); try { // create a new file. Path file1 = new Path("/complexFlush.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); System.Console.Out.WriteLine("Created file complexFlush.dat"); int start = 0; for (start = 0; (start + 29) < AppendTestUtil.FileSize;) { stm.Write(fileContents, start, 29); stm.Hflush(); start += 29; } stm.Write(fileContents, start, AppendTestUtil.FileSize - start); // need to make sure we completely write out all full blocks before // the checkFile() call (see FSOutputSummer#flush) stm.Flush(); // verify that full blocks are sane CheckFile(fs, file1, 1); stm.Close(); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2" ); } catch (IOException e) { System.Console.Out.WriteLine("Exception :" + e); throw; } catch (Exception e) { System.Console.Out.WriteLine("Throwable :" + e); Sharpen.Runtime.PrintStackTrace(e); throw new IOException("Throwable : " + e); } finally { fs.Close(); cluster.Shutdown(); } }
public virtual void TestAppend2AfterSoftLimit() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1); //Set small soft-limit for lease long softLimit = 1L; long hardLimit = 9999999L; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.SetLeasePeriod(softLimit, hardLimit); cluster.WaitActive(); DistributedFileSystem fs = cluster.GetFileSystem(); DistributedFileSystem fs2 = new DistributedFileSystem(); fs2.Initialize(fs.GetUri(), conf); Path testPath = new Path("/testAppendAfterSoftLimit"); byte[] fileContents = AppendTestUtil.InitBuffer(32); // create a new file without closing FSDataOutputStream @out = fs.Create(testPath); @out.Write(fileContents); //Wait for > soft-limit Sharpen.Thread.Sleep(250); try { FSDataOutputStream appendStream2 = fs2.Append(testPath, EnumSet.Of(CreateFlag.Append , CreateFlag.NewBlock), 4096, null); appendStream2.Write(fileContents); appendStream2.Close(); NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen ()); // make sure we now have 1 block since the first writer was revoked LocatedBlocks blks = fs.GetClient().GetLocatedBlocks(testPath.ToString(), 0L); NUnit.Framework.Assert.AreEqual(1, blks.GetLocatedBlocks().Count); foreach (LocatedBlock blk in blks.GetLocatedBlocks()) { NUnit.Framework.Assert.AreEqual(fileContents.Length, blk.GetBlockSize()); } } finally { fs.Close(); fs2.Close(); cluster.Shutdown(); } }
public virtual void TestPipelineHeartbeat() { int DatanodeNum = 2; int fileLen = 6; Configuration conf = new HdfsConfiguration(); int timeout = 2000; conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, timeout); Path p = new Path("/pipelineHeartbeat/foo"); System.Console.Out.WriteLine("p=" + p); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeNum ).Build(); try { DistributedFileSystem fs = cluster.GetFileSystem(); byte[] fileContents = AppendTestUtil.InitBuffer(fileLen); // create a new file. FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, p, DatanodeNum); stm.Write(fileContents, 0, 1); Sharpen.Thread.Sleep(timeout); stm.Hflush(); System.Console.Out.WriteLine("Wrote 1 byte and hflush " + p); // write another byte Sharpen.Thread.Sleep(timeout); stm.Write(fileContents, 1, 1); stm.Hflush(); stm.Write(fileContents, 2, 1); Sharpen.Thread.Sleep(timeout); stm.Hflush(); stm.Write(fileContents, 3, 1); Sharpen.Thread.Sleep(timeout); stm.Write(fileContents, 4, 1); stm.Hflush(); stm.Write(fileContents, 5, 1); Sharpen.Thread.Sleep(timeout); stm.Close(); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, p, fileLen, fileContents, "Failed to slowly write to a file" ); } finally { cluster.Shutdown(); } }
/// <summary> /// Append to a partial CRC chunk and the first write does not fill up the /// partial CRC trunk /// </summary> /// <exception cref="System.IO.IOException"/> private void TestAppendToPartialChunk(bool appendToNewBlock) { Path p = new Path("/partialChunk/foo" + (appendToNewBlock ? "0" : "1")); int fileLen = 513; System.Console.Out.WriteLine("p=" + p); byte[] fileContents = AppendTestUtil.InitBuffer(fileLen); // create a new file. FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, p, 1); // create 1 byte file stm.Write(fileContents, 0, 1); stm.Close(); System.Console.Out.WriteLine("Wrote 1 byte and closed the file " + p); // append to file stm = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock ), 4096, null) : fs.Append(p); // Append to a partial CRC trunk stm.Write(fileContents, 1, 1); stm.Hflush(); // The partial CRC trunk is not full yet and close the file stm.Close(); System.Console.Out.WriteLine("Append 1 byte and closed the file " + p); // write the remainder of the file stm = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock ), 4096, null) : fs.Append(p); // ensure getPos is set to reflect existing size of the file NUnit.Framework.Assert.AreEqual(2, stm.GetPos()); // append to a partial CRC trunk stm.Write(fileContents, 2, 1); // The partial chunk is not full yet, force to send a packet to DN stm.Hflush(); System.Console.Out.WriteLine("Append and flush 1 byte"); // The partial chunk is not full yet, force to send another packet to DN stm.Write(fileContents, 3, 2); stm.Hflush(); System.Console.Out.WriteLine("Append and flush 2 byte"); // fill up the partial chunk and close the file stm.Write(fileContents, 5, fileLen - 5); stm.Close(); System.Console.Out.WriteLine("Flush 508 byte and closed the file " + p); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, p, fileLen, fileContents, "Failed to append to a partial chunk" ); }
public virtual void TestAppendAfterSoftLimit() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1); conf.SetBoolean(DFSConfigKeys.DfsSupportAppendKey, true); //Set small soft-limit for lease long softLimit = 1L; long hardLimit = 9999999L; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.SetLeasePeriod(softLimit, hardLimit); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); FileSystem fs2 = new DistributedFileSystem(); fs2.Initialize(fs.GetUri(), conf); Path testPath = new Path("/testAppendAfterSoftLimit"); byte[] fileContents = AppendTestUtil.InitBuffer(32); // create a new file without closing FSDataOutputStream @out = fs.Create(testPath); @out.Write(fileContents); //Wait for > soft-limit Sharpen.Thread.Sleep(250); try { FSDataOutputStream appendStream2 = fs2.Append(testPath); appendStream2.Write(fileContents); appendStream2.Close(); NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen ()); } finally { fs.Close(); fs2.Close(); cluster.Shutdown(); } }
public virtual void TestAppend2Twice() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); DistributedFileSystem fs1 = cluster.GetFileSystem(); FileSystem fs2 = AppendTestUtil.CreateHdfsWithDifferentUsername(conf); try { Path p = new Path("/testAppendTwice/foo"); int len = 1 << 16; byte[] fileContents = AppendTestUtil.InitBuffer(len); { // create a new file with a full block. FSDataOutputStream @out = fs2.Create(p, true, 4096, (short)1, len); @out.Write(fileContents, 0, len); @out.Close(); } //1st append does not add any data so that the last block remains full //and the last block in INodeFileUnderConstruction is a BlockInfo //but not BlockInfoUnderConstruction. ((DistributedFileSystem)fs2).Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock ), 4096, null); // 2nd append should get AlreadyBeingCreatedException fs1.Append(p); NUnit.Framework.Assert.Fail(); } catch (RemoteException re) { AppendTestUtil.Log.Info("Got an exception:", re); NUnit.Framework.Assert.AreEqual(typeof(AlreadyBeingCreatedException).FullName, re .GetClassName()); } finally { fs2.Close(); fs1.Close(); cluster.Shutdown(); } }
public virtual void TestHFlushInterrupted() { int DatanodeNum = 2; int fileLen = 6; byte[] fileContents = AppendTestUtil.InitBuffer(fileLen); Configuration conf = new HdfsConfiguration(); Path p = new Path("/hflush-interrupted"); System.Console.Out.WriteLine("p=" + p); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeNum ).Build(); try { DistributedFileSystem fs = cluster.GetFileSystem(); // create a new file. FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, p, DatanodeNum); stm.Write(fileContents, 0, 2); Sharpen.Thread.CurrentThread().Interrupt(); try { stm.Hflush(); // If we made it past the hflush(), then that means that the ack made it back // from the pipeline before we got to the wait() call. In that case we should // still have interrupted status. NUnit.Framework.Assert.IsTrue(Sharpen.Thread.Interrupted()); } catch (ThreadInterruptedException) { System.Console.Out.WriteLine("Got expected exception during flush"); } NUnit.Framework.Assert.IsFalse(Sharpen.Thread.Interrupted()); // Try again to flush should succeed since we no longer have interrupt status stm.Hflush(); // Write some more data and flush stm.Write(fileContents, 2, 2); stm.Hflush(); // Write some data and close while interrupted stm.Write(fileContents, 4, 2); Sharpen.Thread.CurrentThread().Interrupt(); try { stm.Close(); // If we made it past the close(), then that means that the ack made it back // from the pipeline before we got to the wait() call. In that case we should // still have interrupted status. NUnit.Framework.Assert.IsTrue(Sharpen.Thread.Interrupted()); } catch (ThreadInterruptedException) { System.Console.Out.WriteLine("Got expected exception during close"); // If we got the exception, we shouldn't have interrupted status anymore. NUnit.Framework.Assert.IsFalse(Sharpen.Thread.Interrupted()); // Now do a successful close. stm.Close(); } // verify that entire file is good AppendTestUtil.CheckFullFile(fs, p, 4, fileContents, "Failed to deal with thread interruptions" , false); } finally { cluster.Shutdown(); } }
/// <summary> /// The method starts new cluster with defined Configuration; creates a file /// with specified block_size and writes 10 equal sections in it; it also calls /// hflush/hsync after each write and throws an IOException in case of an error. /// </summary> /// <param name="conf">cluster configuration</param> /// <param name="fileName">of the file to be created and processed as required</param> /// <param name="block_size">value to be used for the file's creation</param> /// <param name="replicas">is the number of replicas</param> /// <param name="isSync">hsync or hflush</param> /// <param name="syncFlags">specify the semantic of the sync/flush</param> /// <exception cref="System.IO.IOException">in case of any errors</exception> public static void DoTheJob(Configuration conf, string fileName, long block_size, short replicas, bool isSync, EnumSet <HdfsDataOutputStream.SyncFlag> syncFlags) { byte[] fileContent; int Sections = 10; fileContent = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(replicas). Build(); // Make sure we work with DFS in order to utilize all its functionality DistributedFileSystem fileSystem = cluster.GetFileSystem(); FSDataInputStream @is; try { Path path = new Path(fileName); string pathName = new Path(fileSystem.GetWorkingDirectory(), path).ToUri().GetPath (); FSDataOutputStream stm = fileSystem.Create(path, false, 4096, replicas, block_size ); System.Console.Out.WriteLine("Created file " + fileName); int tenth = AppendTestUtil.FileSize / Sections; int rounding = AppendTestUtil.FileSize - tenth * Sections; for (int i = 0; i < Sections; i++) { System.Console.Out.WriteLine("Writing " + (tenth * i) + " to " + (tenth * (i + 1) ) + " section to file " + fileName); // write to the file stm.Write(fileContent, tenth * i, tenth); // Wait while hflush/hsync pushes all packets through built pipeline if (isSync) { ((DFSOutputStream)stm.GetWrappedStream()).Hsync(syncFlags); } else { ((DFSOutputStream)stm.GetWrappedStream()).Hflush(); } // Check file length if updatelength is required if (isSync && syncFlags.Contains(HdfsDataOutputStream.SyncFlag.UpdateLength)) { long currentFileLength = fileSystem.GetFileStatus(path).GetLen(); NUnit.Framework.Assert.AreEqual("File size doesn't match for hsync/hflush with updating the length" , tenth * (i + 1), currentFileLength); } else { if (isSync && syncFlags.Contains(HdfsDataOutputStream.SyncFlag.EndBlock)) { LocatedBlocks blocks = fileSystem.dfs.GetLocatedBlocks(pathName, 0); NUnit.Framework.Assert.AreEqual(i + 1, blocks.GetLocatedBlocks().Count); } } byte[] toRead = new byte[tenth]; byte[] expected = new byte[tenth]; System.Array.Copy(fileContent, tenth * i, expected, 0, tenth); // Open the same file for read. Need to create new reader after every write operation(!) @is = fileSystem.Open(path); @is.Seek(tenth * i); int readBytes = @is.Read(toRead, 0, tenth); System.Console.Out.WriteLine("Has read " + readBytes); NUnit.Framework.Assert.IsTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth)); @is.Close(); CheckData(toRead, 0, readBytes, expected, "Partial verification"); } System.Console.Out.WriteLine("Writing " + (tenth * Sections) + " to " + (tenth * Sections + rounding) + " section to file " + fileName); stm.Write(fileContent, tenth * Sections, rounding); stm.Close(); NUnit.Framework.Assert.AreEqual("File size doesn't match ", AppendTestUtil.FileSize , fileSystem.GetFileStatus(path).GetLen()); AppendTestUtil.CheckFullFile(fileSystem, path, fileContent.Length, fileContent, "hflush()" ); } finally { fileSystem.Close(); cluster.Shutdown(); } }
public virtual void TestSimpleAppend() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50); fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); try { { // test appending to a file. // create a new file. Path file1 = new Path("/simpleAppend.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); System.Console.Out.WriteLine("Created file simpleAppend.dat"); // write to file int mid = 186; // io.bytes.per.checksum bytes System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1); stm.Write(fileContents, 0, mid); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed first part of file."); // write to file int mid2 = 607; // io.bytes.per.checksum bytes System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1); stm = fs.Append(file1); stm.Write(fileContents, mid, mid2 - mid); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed second part of file."); // write the remainder of the file stm = fs.Append(file1); // ensure getPos is set to reflect existing size of the file NUnit.Framework.Assert.IsTrue(stm.GetPos() > 0); System.Console.Out.WriteLine("Writing " + (AppendTestUtil.FileSize - mid2) + " bytes to file " + file1); stm.Write(fileContents, mid2, AppendTestUtil.FileSize - mid2); System.Console.Out.WriteLine("Written second part of file"); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed second part of file."); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2" ); } { // test appending to an non-existing file. FSDataOutputStream @out = null; try { @out = fs.Append(new Path("/non-existing.dat")); NUnit.Framework.Assert.Fail("Expected to have FileNotFoundException"); } catch (FileNotFoundException fnfe) { System.Console.Out.WriteLine("Good: got " + fnfe); Sharpen.Runtime.PrintStackTrace(fnfe, System.Console.Out); } finally { IOUtils.CloseStream(@out); } } { // test append permission. //set root to all writable Path root = new Path("/"); fs.SetPermission(root, new FsPermission((short)0x1ff)); fs.Close(); // login as a different user UserGroupInformation superuser = UserGroupInformation.GetCurrentUser(); string username = "******"; string group = "testappendgroup"; NUnit.Framework.Assert.IsFalse(superuser.GetShortUserName().Equals(username)); NUnit.Framework.Assert.IsFalse(Arrays.AsList(superuser.GetGroupNames()).Contains( group)); UserGroupInformation appenduser = UserGroupInformation.CreateUserForTesting(username , new string[] { group }); fs = DFSTestUtil.GetFileSystemAs(appenduser, conf); // create a file Path dir = new Path(root, GetType().Name); Path foo = new Path(dir, "foo.dat"); FSDataOutputStream @out = null; int offset = 0; try { @out = fs.Create(foo); int len = 10 + AppendTestUtil.NextInt(100); @out.Write(fileContents, offset, len); offset += len; } finally { IOUtils.CloseStream(@out); } // change dir and foo to minimal permissions. fs.SetPermission(dir, new FsPermission((short)0x40)); fs.SetPermission(foo, new FsPermission((short)0x80)); // try append, should success @out = null; try { @out = fs.Append(foo); int len = 10 + AppendTestUtil.NextInt(100); @out.Write(fileContents, offset, len); offset += len; } finally { IOUtils.CloseStream(@out); } // change dir and foo to all but no write on foo. fs.SetPermission(foo, new FsPermission((short)0x17f)); fs.SetPermission(dir, new FsPermission((short)0x1ff)); // try append, should fail @out = null; try { @out = fs.Append(foo); NUnit.Framework.Assert.Fail("Expected to have AccessControlException"); } catch (AccessControlException ace) { System.Console.Out.WriteLine("Good: got " + ace); Sharpen.Runtime.PrintStackTrace(ace, System.Console.Out); } finally { IOUtils.CloseStream(@out); } } } catch (IOException e) { System.Console.Out.WriteLine("Exception :" + e); throw; } catch (Exception e) { System.Console.Out.WriteLine("Throwable :" + e); Sharpen.Runtime.PrintStackTrace(e); throw new IOException("Throwable : " + e); } finally { fs.Close(); cluster.Shutdown(); } }
/// <summary>Test that appends to files at random offsets.</summary> /// <exception cref="System.IO.IOException"/> private void TestComplexAppend(bool appendToNewBlock) { fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 2000); conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 2); conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, 2); conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 30000); conf.SetInt(DFSConfigKeys.DfsDatanodeSocketWriteTimeoutKey, 30000); conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes ).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); try { // create a bunch of test files with random replication factors. // Insert them into a linked list. // for (int i = 0; i < numberOfFiles; i++) { int replication = AppendTestUtil.NextInt(numDatanodes - 2) + 1; Path testFile = new Path("/" + i + ".dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, testFile, replication); stm.Close(); testFiles.AddItem(testFile); } // Create threads and make them run workload concurrently. workload = new TestFileAppend2.Workload[numThreads]; for (int i_1 = 0; i_1 < numThreads; i_1++) { workload[i_1] = new TestFileAppend2.Workload(this, cluster, i_1, appendToNewBlock ); workload[i_1].Start(); } // wait for all transactions to get over for (int i_2 = 0; i_2 < numThreads; i_2++) { try { System.Console.Out.WriteLine("Waiting for thread " + i_2 + " to complete..."); workload[i_2].Join(); System.Console.Out.WriteLine("Waiting for thread " + i_2 + " complete."); } catch (Exception) { i_2--; } } } finally { // retry fs.Close(); cluster.Shutdown(); } // If any of the worker thread failed in their job, indicate that // this test failed. // NUnit.Framework.Assert.IsTrue("testComplexAppend Worker encountered exceptions.", globalStatus); }
// // writes to file but does not close it // /// <exception cref="System.IO.IOException"/> private void WriteFile(FSDataOutputStream stm) { byte[] buffer = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); stm.Write(buffer); }