public virtual void TestSmallAppendRace() { Path file = new Path("/testSmallAppendRace"); string fName = file.ToUri().GetPath(); // Create the file and write a small amount of data. FSDataOutputStream stm = fs.Create(file); AppendTestUtil.Write(stm, 0, 123); stm.Close(); // Introduce a delay between getFileInfo and calling append() against NN. DFSClient client = DFSClientAdapter.GetDFSClient(fs); DFSClient spyClient = Org.Mockito.Mockito.Spy(client); Org.Mockito.Mockito.When(spyClient.GetFileInfo(fName)).ThenAnswer(new _Answer_548 (client, fName)); DFSClientAdapter.SetDFSClient(fs, spyClient); // Create two threads for doing appends to the same file. Sharpen.Thread worker1 = new _Thread_564(this, file); Sharpen.Thread worker2 = new _Thread_574(this, file); worker1.Start(); worker2.Start(); // append will fail when the file size crosses the checksum chunk boundary, // if append was called with a stale file stat. DoSmallAppends(file, fs, 20); }
public virtual void TestAbandonBlock() { string src = FileNamePrefix + "foo"; // Start writing a file but do not close it FSDataOutputStream fout = fs.Create(new Path(src), true, 4096, (short)1, 512L); for (int i = 0; i < 1024; i++) { fout.Write(123); } fout.Hflush(); long fileId = ((DFSOutputStream)fout.GetWrappedStream()).GetFileId(); // Now abandon the last block DFSClient dfsclient = DFSClientAdapter.GetDFSClient(fs); LocatedBlocks blocks = dfsclient.GetNamenode().GetBlockLocations(src, 0, int.MaxValue ); int orginalNumBlocks = blocks.LocatedBlockCount(); LocatedBlock b = blocks.GetLastLocatedBlock(); dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileId, src, dfsclient.clientName ); // call abandonBlock again to make sure the operation is idempotent dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileId, src, dfsclient.clientName ); // And close the file fout.Close(); // Close cluster and check the block has been abandoned after restart cluster.RestartNameNode(); blocks = dfsclient.GetNamenode().GetBlockLocations(src, 0, int.MaxValue); NUnit.Framework.Assert.AreEqual("Blocks " + b + " has not been abandoned.", orginalNumBlocks , blocks.LocatedBlockCount() + 1); }
public virtual void TestClientThatDoesNotSupportEncryption() { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); // Set short retry timeouts so this test runs faster conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10); cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = GetFileSystem(conf); WriteTestDataToFile(fs); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); fs.Close(); cluster.Shutdown(); SetEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf).ManageDataDfsDirs(false).ManageNameDfsDirs (false).Format(false).StartupOption(HdfsServerConstants.StartupOption.Regular).Build (); fs = GetFileSystem(conf); DFSClient client = DFSClientAdapter.GetDFSClient((DistributedFileSystem)fs); DFSClient spyClient = Org.Mockito.Mockito.Spy(client); Org.Mockito.Mockito.DoReturn(false).When(spyClient).ShouldEncryptData(); DFSClientAdapter.SetDFSClient((DistributedFileSystem)fs, spyClient); GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer.CaptureLogs(LogFactory .GetLog(typeof(DataNode))); try { NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); if (resolverClazz != null && !resolverClazz.EndsWith("TestTrustedChannelResolver" )) { NUnit.Framework.Assert.Fail("Should not have been able to read without encryption enabled." ); } } catch (IOException ioe) { GenericTestUtils.AssertExceptionContains("Could not obtain block:", ioe); } finally { logs.StopCapturing(); } fs.Close(); if (resolverClazz == null) { GenericTestUtils.AssertMatches(logs.GetOutput(), "Failed to read expected encryption handshake from client at" ); } } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestRestartDfsWithAbandonedBlock() { Configuration conf = new HdfsConfiguration(); // Turn off persistent IPC, so that the DFSClient can survive NN restart conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectionMaxidletimeKey, 0); MiniDFSCluster cluster = null; long len = 0; FSDataOutputStream stream; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); FileSystem fs = cluster.GetFileSystem(); // Creating a file with 4096 blockSize to write multiple blocks stream = fs.Create(FilePath, true, BlockSize, (short)1, BlockSize); stream.Write(DataBeforeRestart); stream.Hflush(); // Wait for all of the blocks to get through while (len < BlockSize * (NumBlocks - 1)) { FileStatus status = fs.GetFileStatus(FilePath); len = status.GetLen(); Sharpen.Thread.Sleep(100); } // Abandon the last block DFSClient dfsclient = DFSClientAdapter.GetDFSClient((DistributedFileSystem)fs); HdfsFileStatus fileStatus = dfsclient.GetNamenode().GetFileInfo(FileName); LocatedBlocks blocks = dfsclient.GetNamenode().GetBlockLocations(FileName, 0, BlockSize * NumBlocks); NUnit.Framework.Assert.AreEqual(NumBlocks, blocks.GetLocatedBlocks().Count); LocatedBlock b = blocks.GetLastLocatedBlock(); dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileStatus.GetFileId(), FileName , dfsclient.clientName); // explicitly do NOT close the file. cluster.RestartNameNode(); // Check that the file has no less bytes than before the restart // This would mean that blocks were successfully persisted to the log FileStatus status_1 = fs.GetFileStatus(FilePath); NUnit.Framework.Assert.IsTrue("Length incorrect: " + status_1.GetLen(), status_1. GetLen() == len - BlockSize); // Verify the data showed up from before restart, sans abandoned block. FSDataInputStream readStream = fs.Open(FilePath); try { byte[] verifyBuf = new byte[DataBeforeRestart.Length - BlockSize]; IOUtils.ReadFully(readStream, verifyBuf, 0, verifyBuf.Length); byte[] expectedBuf = new byte[DataBeforeRestart.Length - BlockSize]; System.Array.Copy(DataBeforeRestart, 0, expectedBuf, 0, expectedBuf.Length); Assert.AssertArrayEquals(expectedBuf, verifyBuf); } finally { IOUtils.CloseStream(readStream); } } finally { if (cluster != null) { cluster.Shutdown(); } } }