public virtual void TestSimpleFlush() { Configuration conf = new HdfsConfiguration(); fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); try { // create a new file. Path file1 = new Path("/simpleFlush.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); System.Console.Out.WriteLine("Created file simpleFlush.dat"); // write to file int mid = AppendTestUtil.FileSize / 2; stm.Write(fileContents, 0, mid); stm.Hflush(); System.Console.Out.WriteLine("Wrote and Flushed first part of file."); // write the remainder of the file stm.Write(fileContents, mid, AppendTestUtil.FileSize - mid); System.Console.Out.WriteLine("Written second part of file"); stm.Hflush(); stm.Hflush(); System.Console.Out.WriteLine("Wrote and Flushed second part of file."); // verify that full blocks are sane CheckFile(fs, file1, 1); stm.Close(); System.Console.Out.WriteLine("Closed file."); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2" ); } catch (IOException e) { System.Console.Out.WriteLine("Exception :" + e); throw; } catch (Exception e) { System.Console.Out.WriteLine("Throwable :" + e); Sharpen.Runtime.PrintStackTrace(e); throw new IOException("Throwable : " + e); } finally { fs.Close(); cluster.Shutdown(); } }
public virtual void TestComplexFlush() { Configuration conf = new HdfsConfiguration(); fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); try { // create a new file. Path file1 = new Path("/complexFlush.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); System.Console.Out.WriteLine("Created file complexFlush.dat"); int start = 0; for (start = 0; (start + 29) < AppendTestUtil.FileSize;) { stm.Write(fileContents, start, 29); stm.Hflush(); start += 29; } stm.Write(fileContents, start, AppendTestUtil.FileSize - start); // need to make sure we completely write out all full blocks before // the checkFile() call (see FSOutputSummer#flush) stm.Flush(); // verify that full blocks are sane CheckFile(fs, file1, 1); stm.Close(); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2" ); } catch (IOException e) { System.Console.Out.WriteLine("Exception :" + e); throw; } catch (Exception e) { System.Console.Out.WriteLine("Throwable :" + e); Sharpen.Runtime.PrintStackTrace(e); throw new IOException("Throwable : " + e); } finally { fs.Close(); cluster.Shutdown(); } }
public virtual void TestPipelineHeartbeat() { int DatanodeNum = 2; int fileLen = 6; Configuration conf = new HdfsConfiguration(); int timeout = 2000; conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, timeout); Path p = new Path("/pipelineHeartbeat/foo"); System.Console.Out.WriteLine("p=" + p); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeNum ).Build(); try { DistributedFileSystem fs = cluster.GetFileSystem(); byte[] fileContents = AppendTestUtil.InitBuffer(fileLen); // create a new file. FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, p, DatanodeNum); stm.Write(fileContents, 0, 1); Sharpen.Thread.Sleep(timeout); stm.Hflush(); System.Console.Out.WriteLine("Wrote 1 byte and hflush " + p); // write another byte Sharpen.Thread.Sleep(timeout); stm.Write(fileContents, 1, 1); stm.Hflush(); stm.Write(fileContents, 2, 1); Sharpen.Thread.Sleep(timeout); stm.Hflush(); stm.Write(fileContents, 3, 1); Sharpen.Thread.Sleep(timeout); stm.Write(fileContents, 4, 1); stm.Hflush(); stm.Write(fileContents, 5, 1); Sharpen.Thread.Sleep(timeout); stm.Close(); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, p, fileLen, fileContents, "Failed to slowly write to a file" ); } finally { cluster.Shutdown(); } }
/// <summary> /// Append to a partial CRC chunk and the first write does not fill up the /// partial CRC trunk /// </summary> /// <exception cref="System.IO.IOException"/> private void TestAppendToPartialChunk(bool appendToNewBlock) { Path p = new Path("/partialChunk/foo" + (appendToNewBlock ? "0" : "1")); int fileLen = 513; System.Console.Out.WriteLine("p=" + p); byte[] fileContents = AppendTestUtil.InitBuffer(fileLen); // create a new file. FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, p, 1); // create 1 byte file stm.Write(fileContents, 0, 1); stm.Close(); System.Console.Out.WriteLine("Wrote 1 byte and closed the file " + p); // append to file stm = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock ), 4096, null) : fs.Append(p); // Append to a partial CRC trunk stm.Write(fileContents, 1, 1); stm.Hflush(); // The partial CRC trunk is not full yet and close the file stm.Close(); System.Console.Out.WriteLine("Append 1 byte and closed the file " + p); // write the remainder of the file stm = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock ), 4096, null) : fs.Append(p); // ensure getPos is set to reflect existing size of the file NUnit.Framework.Assert.AreEqual(2, stm.GetPos()); // append to a partial CRC trunk stm.Write(fileContents, 2, 1); // The partial chunk is not full yet, force to send a packet to DN stm.Hflush(); System.Console.Out.WriteLine("Append and flush 1 byte"); // The partial chunk is not full yet, force to send another packet to DN stm.Write(fileContents, 3, 2); stm.Hflush(); System.Console.Out.WriteLine("Append and flush 2 byte"); // fill up the partial chunk and close the file stm.Write(fileContents, 5, fileLen - 5); stm.Close(); System.Console.Out.WriteLine("Flush 508 byte and closed the file " + p); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, p, fileLen, fileContents, "Failed to append to a partial chunk" ); }
public virtual void TestHFlushInterrupted() { int DatanodeNum = 2; int fileLen = 6; byte[] fileContents = AppendTestUtil.InitBuffer(fileLen); Configuration conf = new HdfsConfiguration(); Path p = new Path("/hflush-interrupted"); System.Console.Out.WriteLine("p=" + p); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeNum ).Build(); try { DistributedFileSystem fs = cluster.GetFileSystem(); // create a new file. FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, p, DatanodeNum); stm.Write(fileContents, 0, 2); Sharpen.Thread.CurrentThread().Interrupt(); try { stm.Hflush(); // If we made it past the hflush(), then that means that the ack made it back // from the pipeline before we got to the wait() call. In that case we should // still have interrupted status. NUnit.Framework.Assert.IsTrue(Sharpen.Thread.Interrupted()); } catch (ThreadInterruptedException) { System.Console.Out.WriteLine("Got expected exception during flush"); } NUnit.Framework.Assert.IsFalse(Sharpen.Thread.Interrupted()); // Try again to flush should succeed since we no longer have interrupt status stm.Hflush(); // Write some more data and flush stm.Write(fileContents, 2, 2); stm.Hflush(); // Write some data and close while interrupted stm.Write(fileContents, 4, 2); Sharpen.Thread.CurrentThread().Interrupt(); try { stm.Close(); // If we made it past the close(), then that means that the ack made it back // from the pipeline before we got to the wait() call. In that case we should // still have interrupted status. NUnit.Framework.Assert.IsTrue(Sharpen.Thread.Interrupted()); } catch (ThreadInterruptedException) { System.Console.Out.WriteLine("Got expected exception during close"); // If we got the exception, we shouldn't have interrupted status anymore. NUnit.Framework.Assert.IsFalse(Sharpen.Thread.Interrupted()); // Now do a successful close. stm.Close(); } // verify that entire file is good AppendTestUtil.CheckFullFile(fs, p, 4, fileContents, "Failed to deal with thread interruptions" , false); } finally { cluster.Shutdown(); } }
public virtual void TestSimpleAppend() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50); fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); try { { // test appending to a file. // create a new file. Path file1 = new Path("/simpleAppend.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); System.Console.Out.WriteLine("Created file simpleAppend.dat"); // write to file int mid = 186; // io.bytes.per.checksum bytes System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1); stm.Write(fileContents, 0, mid); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed first part of file."); // write to file int mid2 = 607; // io.bytes.per.checksum bytes System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1); stm = fs.Append(file1); stm.Write(fileContents, mid, mid2 - mid); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed second part of file."); // write the remainder of the file stm = fs.Append(file1); // ensure getPos is set to reflect existing size of the file NUnit.Framework.Assert.IsTrue(stm.GetPos() > 0); System.Console.Out.WriteLine("Writing " + (AppendTestUtil.FileSize - mid2) + " bytes to file " + file1); stm.Write(fileContents, mid2, AppendTestUtil.FileSize - mid2); System.Console.Out.WriteLine("Written second part of file"); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed second part of file."); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2" ); } { // test appending to an non-existing file. FSDataOutputStream @out = null; try { @out = fs.Append(new Path("/non-existing.dat")); NUnit.Framework.Assert.Fail("Expected to have FileNotFoundException"); } catch (FileNotFoundException fnfe) { System.Console.Out.WriteLine("Good: got " + fnfe); Sharpen.Runtime.PrintStackTrace(fnfe, System.Console.Out); } finally { IOUtils.CloseStream(@out); } } { // test append permission. //set root to all writable Path root = new Path("/"); fs.SetPermission(root, new FsPermission((short)0x1ff)); fs.Close(); // login as a different user UserGroupInformation superuser = UserGroupInformation.GetCurrentUser(); string username = "******"; string group = "testappendgroup"; NUnit.Framework.Assert.IsFalse(superuser.GetShortUserName().Equals(username)); NUnit.Framework.Assert.IsFalse(Arrays.AsList(superuser.GetGroupNames()).Contains( group)); UserGroupInformation appenduser = UserGroupInformation.CreateUserForTesting(username , new string[] { group }); fs = DFSTestUtil.GetFileSystemAs(appenduser, conf); // create a file Path dir = new Path(root, GetType().Name); Path foo = new Path(dir, "foo.dat"); FSDataOutputStream @out = null; int offset = 0; try { @out = fs.Create(foo); int len = 10 + AppendTestUtil.NextInt(100); @out.Write(fileContents, offset, len); offset += len; } finally { IOUtils.CloseStream(@out); } // change dir and foo to minimal permissions. fs.SetPermission(dir, new FsPermission((short)0x40)); fs.SetPermission(foo, new FsPermission((short)0x80)); // try append, should success @out = null; try { @out = fs.Append(foo); int len = 10 + AppendTestUtil.NextInt(100); @out.Write(fileContents, offset, len); offset += len; } finally { IOUtils.CloseStream(@out); } // change dir and foo to all but no write on foo. fs.SetPermission(foo, new FsPermission((short)0x17f)); fs.SetPermission(dir, new FsPermission((short)0x1ff)); // try append, should fail @out = null; try { @out = fs.Append(foo); NUnit.Framework.Assert.Fail("Expected to have AccessControlException"); } catch (AccessControlException ace) { System.Console.Out.WriteLine("Good: got " + ace); Sharpen.Runtime.PrintStackTrace(ace, System.Console.Out); } finally { IOUtils.CloseStream(@out); } } } catch (IOException e) { System.Console.Out.WriteLine("Exception :" + e); throw; } catch (Exception e) { System.Console.Out.WriteLine("Throwable :" + e); Sharpen.Runtime.PrintStackTrace(e); throw new IOException("Throwable : " + e); } finally { fs.Close(); cluster.Shutdown(); } }
/// <summary>Test that appends to files at random offsets.</summary> /// <exception cref="System.IO.IOException"/> private void TestComplexAppend(bool appendToNewBlock) { fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 2000); conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 2); conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, 2); conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 30000); conf.SetInt(DFSConfigKeys.DfsDatanodeSocketWriteTimeoutKey, 30000); conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes ).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); try { // create a bunch of test files with random replication factors. // Insert them into a linked list. // for (int i = 0; i < numberOfFiles; i++) { int replication = AppendTestUtil.NextInt(numDatanodes - 2) + 1; Path testFile = new Path("/" + i + ".dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, testFile, replication); stm.Close(); testFiles.AddItem(testFile); } // Create threads and make them run workload concurrently. workload = new TestFileAppend2.Workload[numThreads]; for (int i_1 = 0; i_1 < numThreads; i_1++) { workload[i_1] = new TestFileAppend2.Workload(this, cluster, i_1, appendToNewBlock ); workload[i_1].Start(); } // wait for all transactions to get over for (int i_2 = 0; i_2 < numThreads; i_2++) { try { System.Console.Out.WriteLine("Waiting for thread " + i_2 + " to complete..."); workload[i_2].Join(); System.Console.Out.WriteLine("Waiting for thread " + i_2 + " complete."); } catch (Exception) { i_2--; } } } finally { // retry fs.Close(); cluster.Shutdown(); } // If any of the worker thread failed in their job, indicate that // this test failed. // NUnit.Framework.Assert.IsTrue("testComplexAppend Worker encountered exceptions.", globalStatus); }
public virtual void TestCopyOnWrite() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); IPEndPoint addr = new IPEndPoint("localhost", cluster.GetNameNodePort()); DFSClient client = new DFSClient(addr, conf); try { // create a new file, write to it and close it. // Path file1 = new Path("/filestatus.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); WriteFile(stm); stm.Close(); // Get a handle to the datanode DataNode[] dn = cluster.ListDataNodes(); NUnit.Framework.Assert.IsTrue("There should be only one datanode but found " + dn .Length, dn.Length == 1); LocatedBlocks locations = client.GetNamenode().GetBlockLocations(file1.ToString() , 0, long.MaxValue); IList <LocatedBlock> blocks = locations.GetLocatedBlocks(); // // Create hard links for a few of the blocks // for (int i = 0; i < blocks.Count; i = i + 2) { ExtendedBlock b = blocks[i].GetBlock(); FilePath f = DataNodeTestUtils.GetFile(dn[0], b.GetBlockPoolId(), b.GetLocalBlock ().GetBlockId()); FilePath link = new FilePath(f.ToString() + ".link"); System.Console.Out.WriteLine("Creating hardlink for File " + f + " to " + link); HardLink.CreateHardLink(f, link); } // // Detach all blocks. This should remove hardlinks (if any) // for (int i_1 = 0; i_1 < blocks.Count; i_1++) { ExtendedBlock b = blocks[i_1].GetBlock(); System.Console.Out.WriteLine("testCopyOnWrite detaching block " + b); NUnit.Framework.Assert.IsTrue("Detaching block " + b + " should have returned true" , DataNodeTestUtils.UnlinkBlock(dn[0], b, 1)); } // Since the blocks were already detached earlier, these calls should // return false // for (int i_2 = 0; i_2 < blocks.Count; i_2++) { ExtendedBlock b = blocks[i_2].GetBlock(); System.Console.Out.WriteLine("testCopyOnWrite detaching block " + b); NUnit.Framework.Assert.IsTrue("Detaching block " + b + " should have returned false" , !DataNodeTestUtils.UnlinkBlock(dn[0], b, 1)); } } finally { client.Close(); fs.Close(); cluster.Shutdown(); } }