public virtual void TestTruncate() { short repl = 3; int blockSize = 1024; int numOfBlocks = 2; DistributedFileSystem fs = cluster.GetFileSystem(); Path dir = GetTestRootPath(fc, "test/hadoop"); Path file = GetTestRootPath(fc, "test/hadoop/file"); byte[] data = FileSystemTestHelper.GetFileData(numOfBlocks, blockSize); FileSystemTestHelper.CreateFile(fs, file, data, blockSize, repl); int newLength = blockSize; bool isReady = fc.Truncate(file, newLength); NUnit.Framework.Assert.IsTrue("Recovery is not expected.", isReady); FileStatus fileStatus = fc.GetFileStatus(file); NUnit.Framework.Assert.AreEqual(fileStatus.GetLen(), newLength); AppendTestUtil.CheckFullFile(fs, file, newLength, data, file.ToString()); ContentSummary cs = fs.GetContentSummary(dir); NUnit.Framework.Assert.AreEqual("Bad disk space usage", cs.GetSpaceConsumed(), newLength * repl); NUnit.Framework.Assert.IsTrue(fs.Delete(dir, true)); }
public virtual void TestBlockReportsWhileFileBeingWritten() { FSDataOutputStream @out = fs.Create(TestFilePath); try { AppendTestUtil.Write(@out, 0, 10); @out.Hflush(); // Block report will include the RBW replica, but will be // queued on the StandbyNode. cluster.TriggerBlockReports(); } finally { IOUtils.CloseStream(@out); } cluster.TransitionToStandby(0); cluster.TransitionToActive(1); // Verify that no replicas are marked corrupt, and that the // file is readable from the failed-over standby. BlockManagerTestUtil.UpdateState(nn1.GetNamesystem().GetBlockManager()); BlockManagerTestUtil.UpdateState(nn2.GetNamesystem().GetBlockManager()); NUnit.Framework.Assert.AreEqual(0, nn1.GetNamesystem().GetCorruptReplicaBlocks()); NUnit.Framework.Assert.AreEqual(0, nn2.GetNamesystem().GetCorruptReplicaBlocks()); DFSTestUtil.ReadFile(fs, TestFilePath); }
public virtual void TestHSyncBlockBoundary() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); Path p = new Path("/testHSyncBlockBoundary/foo"); int len = 1 << 16; byte[] fileContents = AppendTestUtil.InitBuffer(len); FSDataOutputStream @out = fs.Create(p, FsPermission.GetDefault(), EnumSet.Of(CreateFlag .Create, CreateFlag.Overwrite, CreateFlag.SyncBlock), 4096, (short)1, len, null); // fill exactly one block (tests the SYNC_BLOCK case) and flush @out.Write(fileContents, 0, len); @out.Hflush(); // the full block should have caused a sync CheckSyncMetric(cluster, 1); @out.Hsync(); // first on block again CheckSyncMetric(cluster, 1); // write one more byte and sync again @out.Write(1); @out.Hsync(); CheckSyncMetric(cluster, 2); @out.Close(); CheckSyncMetric(cluster, 3); cluster.Shutdown(); }
/// <exception cref="System.IO.IOException"/> private void DoTestReceiveAndMirror(PacketReceiver pr, int dataLen, int checksumsLen ) { byte[] Data = AppendTestUtil.InitBuffer(dataLen); byte[] Checksums = AppendTestUtil.InitBuffer(checksumsLen); byte[] packet = PrepareFakePacket(Data, Checksums); ByteArrayInputStream @in = new ByteArrayInputStream(packet); pr.ReceiveNextPacket(@in); ByteBuffer parsedData = pr.GetDataSlice(); Assert.AssertArrayEquals(Data, RemainingAsArray(parsedData)); ByteBuffer parsedChecksums = pr.GetChecksumSlice(); Assert.AssertArrayEquals(Checksums, RemainingAsArray(parsedChecksums)); PacketHeader header = pr.GetHeader(); NUnit.Framework.Assert.AreEqual(Seqno, header.GetSeqno()); NUnit.Framework.Assert.AreEqual(OffsetInBlock, header.GetOffsetInBlock()); NUnit.Framework.Assert.AreEqual(dataLen + checksumsLen + Ints.Bytes, header.GetPacketLen ()); // Mirror the packet to an output stream and make sure it matches // the packet we sent. ByteArrayOutputStream mirrored = new ByteArrayOutputStream(); mirrored = Org.Mockito.Mockito.Spy(mirrored); pr.MirrorPacketTo(new DataOutputStream(mirrored)); // The write should be done in a single call. Otherwise we may hit // nasty interactions with nagling (eg HDFS-4049). Org.Mockito.Mockito.Verify(mirrored, Org.Mockito.Mockito.Times(1)).Write(Org.Mockito.Mockito .Any <byte[]>(), Org.Mockito.Mockito.AnyInt(), Org.Mockito.Mockito.Eq(packet.Length )); Org.Mockito.Mockito.VerifyNoMoreInteractions(mirrored); Assert.AssertArrayEquals(packet, mirrored.ToByteArray()); }
/// <exception cref="System.Exception"/> private void DoWriteOverFailoverTest(TestPipelinesFailover.TestScenario scenario, TestPipelinesFailover.MethodToTestIdempotence methodToTest) { Configuration conf = new Configuration(); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize); // Don't check replication periodically. conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 1000); FSDataOutputStream stm = null; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(3).Build(); try { int sizeWritten = 0; cluster.WaitActive(); cluster.TransitionToActive(0); Sharpen.Thread.Sleep(500); Log.Info("Starting with NN 0 active"); FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf); stm = fs.Create(TestPath); // write a block and a half AppendTestUtil.Write(stm, 0, BlockAndAHalf); sizeWritten += BlockAndAHalf; // Make sure all of the blocks are written out before failover. stm.Hflush(); Log.Info("Failing over to NN 1"); scenario.Run(cluster); // NOTE: explicitly do *not* make any further metadata calls // to the NN here. The next IPC call should be to allocate the next // block. Any other call would notice the failover and not test // idempotence of the operation (HDFS-3031) FSNamesystem ns1 = cluster.GetNameNode(1).GetNamesystem(); BlockManagerTestUtil.UpdateState(ns1.GetBlockManager()); NUnit.Framework.Assert.AreEqual(0, ns1.GetPendingReplicationBlocks()); NUnit.Framework.Assert.AreEqual(0, ns1.GetCorruptReplicaBlocks()); NUnit.Framework.Assert.AreEqual(0, ns1.GetMissingBlocksCount()); // If we're testing allocateBlock()'s idempotence, write another // block and a half, so we have to allocate a new block. // Otherise, don't write anything, so our next RPC will be // completeFile() if we're testing idempotence of that operation. if (methodToTest == TestPipelinesFailover.MethodToTestIdempotence.AllocateBlock) { // write another block and a half AppendTestUtil.Write(stm, sizeWritten, BlockAndAHalf); sizeWritten += BlockAndAHalf; } stm.Close(); stm = null; AppendTestUtil.Check(fs, TestPath, sizeWritten); } finally { IOUtils.CloseStream(stm); cluster.Shutdown(); } }
// return the initial state of the configuration /// <summary> /// Test for the case where one of the DNs in the pipeline is in the /// process of doing a block report exactly when the block is closed. /// </summary> /// <remarks> /// Test for the case where one of the DNs in the pipeline is in the /// process of doing a block report exactly when the block is closed. /// In this case, the block report becomes delayed until after the /// block is marked completed on the NN, and hence it reports an RBW /// replica for a COMPLETE block. Such a report should not be marked /// corrupt. /// This is a regression test for HDFS-2791. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestOneReplicaRbwReportArrivesAfterBlockCompleted() { CountDownLatch brFinished = new CountDownLatch(1); GenericTestUtils.DelayAnswer delayer = new _DelayAnswer_579(brFinished, Log); // inform the test that our block report went through. string MethodName = GenericTestUtils.GetMethodName(); Path filePath = new Path("/" + MethodName + ".dat"); // Start a second DN for this test -- we're checking // what happens when one of the DNs is slowed for some reason. ReplFactor = 2; StartDNandWait(null, false); NameNode nn = cluster.GetNameNode(); FSDataOutputStream @out = fs.Create(filePath, ReplFactor); try { AppendTestUtil.Write(@out, 0, 10); @out.Hflush(); // Set up a spy so that we can delay the block report coming // from this node. DataNode dn = cluster.GetDataNodes()[0]; DatanodeProtocolClientSideTranslatorPB spy = DataNodeTestUtils.SpyOnBposToNN(dn, nn); Org.Mockito.Mockito.DoAnswer(delayer).When(spy).BlockReport(Org.Mockito.Mockito.AnyObject <DatanodeRegistration>(), Org.Mockito.Mockito.AnyString(), Org.Mockito.Mockito.AnyObject <StorageBlockReport[]>(), Org.Mockito.Mockito.AnyObject <BlockReportContext>()); // Force a block report to be generated. The block report will have // an RBW replica in it. Wait for the RPC to be sent, but block // it before it gets to the NN. dn.ScheduleAllBlockReport(0); delayer.WaitForCall(); } finally { IOUtils.CloseStream(@out); } // Now that the stream is closed, the NN will have the block in COMPLETE // state. delayer.Proceed(); brFinished.Await(); // Verify that no replicas are marked corrupt, and that the // file is still readable. BlockManagerTestUtil.UpdateState(nn.GetNamesystem().GetBlockManager()); NUnit.Framework.Assert.AreEqual(0, nn.GetNamesystem().GetCorruptReplicaBlocks()); DFSTestUtil.ReadFile(fs, filePath); // Ensure that the file is readable even from the DN that we futzed with. cluster.StopDataNode(1); DFSTestUtil.ReadFile(fs, filePath); }
/// <summary> /// Test to run benchmarks between short circuit read vs regular read with /// specified number of threads simultaneously reading. /// </summary> /// <remarks> /// Test to run benchmarks between short circuit read vs regular read with /// specified number of threads simultaneously reading. /// <br /> /// Run this using the following command: /// bin/hadoop --config confdir \ /// org.apache.hadoop.hdfs.TestShortCircuitLocalRead \ /// <shortcircuit on?> <checsum on?> <Number of threads> /// </remarks> /// <exception cref="System.Exception"/> public static void Main(string[] args) { if (args.Length != 3) { System.Console.Out.WriteLine("Usage: test shortcircuit checksum threadCount"); System.Environment.Exit(1); } bool shortcircuit = Sharpen.Extensions.ValueOf(args[0]); bool checksum = Sharpen.Extensions.ValueOf(args[1]); int threadCount = System.Convert.ToInt32(args[2]); // Setup create a file Configuration conf = new Configuration(); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, shortcircuit); conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, "/tmp/TestShortCircuitLocalRead._PORT" ); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, checksum); //Override fileSize and DATA_TO_WRITE to much larger values for benchmark test int fileSize = 1000 * blockSize + 100; // File with 1000 blocks byte[] dataToWrite = AppendTestUtil.RandomBytes(seed, fileSize); // create a new file in home directory. Do not close it. Path file1 = new Path("filelocal.dat"); FileSystem fs = FileSystem.Get(conf); FSDataOutputStream stm = CreateFile(fs, file1, 1); stm.Write(dataToWrite); stm.Close(); long start = Time.Now(); int iteration = 20; Sharpen.Thread[] threads = new Sharpen.Thread[threadCount]; for (int i = 0; i < threadCount; i++) { threads[i] = new _Thread_554(iteration, fs, file1, dataToWrite, conf); } for (int i_1 = 0; i_1 < threadCount; i_1++) { threads[i_1].Start(); } for (int i_2 = 0; i_2 < threadCount; i_2++) { threads[i_2].Join(); } long end = Time.Now(); System.Console.Out.WriteLine("Iteration " + iteration + " took " + (end - start)); fs.Delete(file1, false); }
/// <summary> /// Test that file data can be read by reading the block file /// directly from the local store. /// </summary> /// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> public virtual void DoTestShortCircuitReadImpl(bool ignoreChecksum, int size, int readOffset, string shortCircuitUser, string readingUser, bool legacyShortCircuitFails ) { Configuration conf = new Configuration(); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, ignoreChecksum ); // Set a random client context name so that we don't share a cache with // other invocations of this function. conf.Set(DFSConfigKeys.DfsClientContext, UUID.RandomUUID().ToString()); conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), "TestShortCircuitLocalRead._PORT.sock" ).GetAbsolutePath()); if (shortCircuitUser != null) { conf.Set(DFSConfigKeys.DfsBlockLocalPathAccessUserKey, shortCircuitUser); conf.SetBoolean(DFSConfigKeys.DfsClientUseLegacyBlockreaderlocal, true); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Format( true).Build(); FileSystem fs = cluster.GetFileSystem(); try { // check that / exists Path path = new Path("/"); NUnit.Framework.Assert.IsTrue("/ should be a directory", fs.GetFileStatus(path).IsDirectory () == true); byte[] fileData = AppendTestUtil.RandomBytes(seed, size); Path file1 = fs.MakeQualified(new Path("filelocal.dat")); FSDataOutputStream stm = CreateFile(fs, file1, 1); stm.Write(fileData); stm.Close(); URI uri = cluster.GetURI(); CheckFileContent(uri, file1, fileData, readOffset, readingUser, conf, legacyShortCircuitFails ); CheckFileContentDirect(uri, file1, fileData, readOffset, readingUser, conf, legacyShortCircuitFails ); } finally { fs.Close(); cluster.Shutdown(); } }
/// <exception cref="System.Exception"/> private void DoTestWriteOverFailoverWithDnFail(TestPipelinesFailover.TestScenario scenario) { Configuration conf = new Configuration(); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize); FSDataOutputStream stm = null; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(5).Build(); try { cluster.WaitActive(); cluster.TransitionToActive(0); Sharpen.Thread.Sleep(500); Log.Info("Starting with NN 0 active"); FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf); stm = fs.Create(TestPath); // write a block and a half AppendTestUtil.Write(stm, 0, BlockAndAHalf); // Make sure all the blocks are written before failover stm.Hflush(); Log.Info("Failing over to NN 1"); scenario.Run(cluster); NUnit.Framework.Assert.IsTrue(fs.Exists(TestPath)); cluster.StopDataNode(0); // write another block and a half AppendTestUtil.Write(stm, BlockAndAHalf, BlockAndAHalf); stm.Hflush(); Log.Info("Failing back to NN 0"); cluster.TransitionToStandby(1); cluster.TransitionToActive(0); cluster.StopDataNode(1); AppendTestUtil.Write(stm, BlockAndAHalf * 2, BlockAndAHalf); stm.Hflush(); stm.Close(); stm = null; AppendTestUtil.Check(fs, TestPath, BlockAndAHalf * 3); } finally { IOUtils.CloseStream(stm); cluster.Shutdown(); } }
/// <summary> /// Test that file data can be read by reading the block /// through RemoteBlockReader /// </summary> /// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> public virtual void DoTestShortCircuitReadWithRemoteBlockReader(bool ignoreChecksum , int size, string shortCircuitUser, int readOffset, bool shortCircuitFails) { Configuration conf = new Configuration(); conf.SetBoolean(DFSConfigKeys.DfsClientUseLegacyBlockreader, true); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Format( true).Build(); FileSystem fs = cluster.GetFileSystem(); // check that / exists Path path = new Path("/"); URI uri = cluster.GetURI(); NUnit.Framework.Assert.IsTrue("/ should be a directory", fs.GetFileStatus(path).IsDirectory () == true); byte[] fileData = AppendTestUtil.RandomBytes(seed, size); Path file1 = new Path("filelocal.dat"); FSDataOutputStream stm = CreateFile(fs, file1, 1); stm.Write(fileData); stm.Close(); try { CheckFileContent(uri, file1, fileData, readOffset, shortCircuitUser, conf, shortCircuitFails ); //RemoteBlockReader have unsupported method read(ByteBuffer bf) NUnit.Framework.Assert.IsTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error" , CheckUnsupportedMethod(fs, file1, fileData, readOffset)); } catch (IOException e) { throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e); } catch (Exception inEx) { throw; } finally { fs.Close(); cluster.Shutdown(); } }
/// <exception cref="System.IO.IOException"/> public virtual void TestSkipWithVerifyChecksum() { int size = blockSize; Configuration conf = new Configuration(); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false); conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, "/tmp/testSkipWithVerifyChecksum._PORT" ); DomainSocket.DisableBindPathValidation(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Format( true).Build(); FileSystem fs = cluster.GetFileSystem(); try { // check that / exists Path path = new Path("/"); NUnit.Framework.Assert.IsTrue("/ should be a directory", fs.GetFileStatus(path).IsDirectory () == true); byte[] fileData = AppendTestUtil.RandomBytes(seed, size * 3); // create a new file in home directory. Do not close it. Path file1 = new Path("filelocal.dat"); FSDataOutputStream stm = CreateFile(fs, file1, 1); // write to file stm.Write(fileData); stm.Close(); // now test the skip function FSDataInputStream instm = fs.Open(file1); byte[] actual = new byte[fileData.Length]; // read something from the block first, otherwise BlockReaderLocal.skip() // will not be invoked int nread = instm.Read(actual, 0, 3); long skipped = 2 * size + 3; instm.Seek(skipped); nread = instm.Read(actual, (int)(skipped + nread), 3); instm.Close(); } finally { fs.Close(); cluster.Shutdown(); } }
/// <summary>Tests lease recovery if a client crashes.</summary> /// <remarks> /// Tests lease recovery if a client crashes. This approximates the /// use case of HBase WALs being recovered after a NN failover. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestLeaseRecoveryAfterFailover() { Configuration conf = new Configuration(); // Disable permissions so that another user can recover the lease. conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, false); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize); FSDataOutputStream stm = null; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(3).Build(); try { cluster.WaitActive(); cluster.TransitionToActive(0); Sharpen.Thread.Sleep(500); Log.Info("Starting with NN 0 active"); FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf); stm = fs.Create(TestPath); // write a block and a half AppendTestUtil.Write(stm, 0, BlockAndAHalf); stm.Hflush(); Log.Info("Failing over to NN 1"); cluster.TransitionToStandby(0); cluster.TransitionToActive(1); NUnit.Framework.Assert.IsTrue(fs.Exists(TestPath)); FileSystem fsOtherUser = CreateFsAsOtherUser(cluster, conf); LoopRecoverLease(fsOtherUser, TestPath); AppendTestUtil.Check(fs, TestPath, BlockAndAHalf); // Fail back to ensure that the block locations weren't lost on the // original node. cluster.TransitionToStandby(1); cluster.TransitionToActive(0); AppendTestUtil.Check(fs, TestPath, BlockAndAHalf); } finally { IOUtils.CloseStream(stm); cluster.Shutdown(); } }
/// <exception cref="System.Exception"/> private void TestTruncate() { if (!IsLocalFS()) { short repl = 3; int blockSize = 1024; int numOfBlocks = 2; FileSystem fs = FileSystem.Get(GetProxiedFSConf()); fs.Mkdirs(GetProxiedFSTestDir()); Path file = new Path(GetProxiedFSTestDir(), "foo.txt"); byte[] data = FileSystemTestHelper.GetFileData(numOfBlocks, blockSize); FileSystemTestHelper.CreateFile(fs, file, data, blockSize, repl); int newLength = blockSize; bool isReady = fs.Truncate(file, newLength); NUnit.Framework.Assert.IsTrue("Recovery is not expected.", isReady); FileStatus fileStatus = fs.GetFileStatus(file); NUnit.Framework.Assert.AreEqual(fileStatus.GetLen(), newLength); AppendTestUtil.CheckFullFile(fs, file, newLength, data, file.ToString()); fs.Close(); } }
/// <exception cref="System.Exception"/> public override void DoAnAction() { FSDataOutputStream stm = fs.Create(path, true); try { AppendTestUtil.Write(stm, 0, 100); stm.Hflush(); LoopRecoverLease(fsOtherUser, path); AppendTestUtil.Check(fs, path, 100); } finally { try { stm.Close(); } catch (IOException) { } } }
public virtual void TestRBWReportArrivesAfterEdits() { CountDownLatch brFinished = new CountDownLatch(1); GenericTestUtils.DelayAnswer delayer = new _DelayAnswer_521(brFinished, Log); // inform the test that our block report went through. FSDataOutputStream @out = fs.Create(TestFilePath); try { AppendTestUtil.Write(@out, 0, 10); @out.Hflush(); DataNode dn = cluster.GetDataNodes()[0]; DatanodeProtocolClientSideTranslatorPB spy = DataNodeTestUtils.SpyOnBposToNN(dn, nn2); Org.Mockito.Mockito.DoAnswer(delayer).When(spy).BlockReport(Org.Mockito.Mockito.AnyObject <DatanodeRegistration>(), Org.Mockito.Mockito.AnyString(), Org.Mockito.Mockito.AnyObject <StorageBlockReport[]>(), Org.Mockito.Mockito.AnyObject <BlockReportContext>()); dn.ScheduleAllBlockReport(0); delayer.WaitForCall(); } finally { IOUtils.CloseStream(@out); } cluster.TransitionToStandby(0); cluster.TransitionToActive(1); delayer.Proceed(); brFinished.Await(); // Verify that no replicas are marked corrupt, and that the // file is readable from the failed-over standby. BlockManagerTestUtil.UpdateState(nn1.GetNamesystem().GetBlockManager()); BlockManagerTestUtil.UpdateState(nn2.GetNamesystem().GetBlockManager()); NUnit.Framework.Assert.AreEqual(0, nn1.GetNamesystem().GetCorruptReplicaBlocks()); NUnit.Framework.Assert.AreEqual(0, nn2.GetNamesystem().GetCorruptReplicaBlocks()); DFSTestUtil.ReadFile(fs, TestFilePath); }
/// <summary>Test race between delete operation and commitBlockSynchronization method. /// </summary> /// <remarks> /// Test race between delete operation and commitBlockSynchronization method. /// See HDFS-6825. /// </remarks> /// <param name="hasSnapshot"/> /// <exception cref="System.Exception"/> private void TestDeleteAndCommitBlockSynchronizationRace(bool hasSnapshot) { Log.Info("Start testing, hasSnapshot: " + hasSnapshot); AList <AbstractMap.SimpleImmutableEntry <string, bool> > testList = new AList <AbstractMap.SimpleImmutableEntry <string, bool> >(); testList.AddItem(new AbstractMap.SimpleImmutableEntry <string, bool>("/test-file", false)); testList.AddItem(new AbstractMap.SimpleImmutableEntry <string, bool>("/test-file1" , true)); testList.AddItem(new AbstractMap.SimpleImmutableEntry <string, bool>("/testdir/testdir1/test-file" , false)); testList.AddItem(new AbstractMap.SimpleImmutableEntry <string, bool>("/testdir/testdir1/test-file1" , true)); Path rootPath = new Path("/"); Configuration conf = new Configuration(); // Disable permissions so that another user can recover the lease. conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, false); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize); FSDataOutputStream stm = null; IDictionary <DataNode, DatanodeProtocolClientSideTranslatorPB> dnMap = new Dictionary <DataNode, DatanodeProtocolClientSideTranslatorPB>(); try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); cluster.WaitActive(); DistributedFileSystem fs = cluster.GetFileSystem(); int stId = 0; foreach (AbstractMap.SimpleImmutableEntry <string, bool> stest in testList) { string testPath = stest.Key; bool mkSameDir = stest.Value; Log.Info("test on " + testPath + " mkSameDir: " + mkSameDir + " snapshot: " + hasSnapshot ); Path fPath = new Path(testPath); //find grandest non-root parent Path grandestNonRootParent = fPath; while (!grandestNonRootParent.GetParent().Equals(rootPath)) { grandestNonRootParent = grandestNonRootParent.GetParent(); } stm = fs.Create(fPath); Log.Info("test on " + testPath + " created " + fPath); // write a half block AppendTestUtil.Write(stm, 0, BlockSize / 2); stm.Hflush(); if (hasSnapshot) { SnapshotTestHelper.CreateSnapshot(fs, rootPath, "st" + stId.ToString()); ++stId; } // Look into the block manager on the active node for the block // under construction. NameNode nn = cluster.GetNameNode(); ExtendedBlock blk = DFSTestUtil.GetFirstBlock(fs, fPath); DatanodeDescriptor expectedPrimary = DFSTestUtil.GetExpectedPrimaryNode(nn, blk); Log.Info("Expecting block recovery to be triggered on DN " + expectedPrimary); // Find the corresponding DN daemon, and spy on its connection to the // active. DataNode primaryDN = cluster.GetDataNode(expectedPrimary.GetIpcPort()); DatanodeProtocolClientSideTranslatorPB nnSpy = dnMap[primaryDN]; if (nnSpy == null) { nnSpy = DataNodeTestUtils.SpyOnBposToNN(primaryDN, nn); dnMap[primaryDN] = nnSpy; } // Delay the commitBlockSynchronization call GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(Log); Org.Mockito.Mockito.DoAnswer(delayer).When(nnSpy).CommitBlockSynchronization(Org.Mockito.Mockito .Eq(blk), Org.Mockito.Mockito.AnyInt(), Org.Mockito.Mockito.AnyLong(), Org.Mockito.Mockito .Eq(true), Org.Mockito.Mockito.Eq(false), (DatanodeID[])Org.Mockito.Mockito.AnyObject (), (string[])Org.Mockito.Mockito.AnyObject()); // new genstamp // new length // close file // delete block // new targets // new target storages fs.RecoverLease(fPath); Log.Info("Waiting for commitBlockSynchronization call from primary"); delayer.WaitForCall(); Log.Info("Deleting recursively " + grandestNonRootParent); fs.Delete(grandestNonRootParent, true); if (mkSameDir && !grandestNonRootParent.ToString().Equals(testPath)) { Log.Info("Recreate dir " + grandestNonRootParent + " testpath: " + testPath); fs.Mkdirs(grandestNonRootParent); } delayer.Proceed(); Log.Info("Now wait for result"); delayer.WaitForResult(); Exception t = delayer.GetThrown(); if (t != null) { Log.Info("Result exception (snapshot: " + hasSnapshot + "): " + t); } } // end of loop each fPath Log.Info("Now check we can restart"); cluster.RestartNameNodes(); Log.Info("Restart finished"); } finally { if (stm != null) { IOUtils.CloseStream(stm); } if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="System.IO.IOException"/> public virtual void TestResponseCode() { WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs; Path root = new Path("/"); Path dir = new Path("/test/testUrl"); NUnit.Framework.Assert.IsTrue(webhdfs.Mkdirs(dir)); Path file = new Path("/test/file"); FSDataOutputStream @out = webhdfs.Create(file); @out.Write(1); @out.Close(); { //test GETHOMEDIRECTORY Uri url = webhdfs.ToUrl(GetOpParam.OP.Gethomedirectory, root); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); IDictionary <object, object> m = WebHdfsTestUtil.ConnectAndGetJson(conn, HttpServletResponse .ScOk); NUnit.Framework.Assert.AreEqual(WebHdfsFileSystem.GetHomeDirectoryString(ugi), m[ typeof(Path).Name]); conn.Disconnect(); } { //test GETHOMEDIRECTORY with unauthorized doAs Uri url = webhdfs.ToUrl(GetOpParam.OP.Gethomedirectory, root, new DoAsParam(ugi.GetShortUserName () + "proxy")); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScForbidden, conn.GetResponseCode ()); conn.Disconnect(); } { //test set owner with empty parameters Uri url = webhdfs.ToUrl(PutOpParam.OP.Setowner, dir); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScBadRequest, conn.GetResponseCode ()); conn.Disconnect(); } { //test set replication on a directory HttpOpParam.OP op = PutOpParam.OP.Setreplication; Uri url = webhdfs.ToUrl(op, dir); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScOk, conn.GetResponseCode()); NUnit.Framework.Assert.IsFalse(webhdfs.SetReplication(dir, (short)1)); conn.Disconnect(); } { //test get file status for a non-exist file. Path p = new Path(dir, "non-exist"); Uri url = webhdfs.ToUrl(GetOpParam.OP.Getfilestatus, p); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScNotFound, conn.GetResponseCode ()); conn.Disconnect(); } { //test set permission with empty parameters HttpOpParam.OP op = PutOpParam.OP.Setpermission; Uri url = webhdfs.ToUrl(op, dir); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScOk, conn.GetResponseCode()); NUnit.Framework.Assert.AreEqual(0, conn.GetContentLength()); NUnit.Framework.Assert.AreEqual(MediaType.ApplicationOctetStream, conn.GetContentType ()); NUnit.Framework.Assert.AreEqual((short)0x1ed, webhdfs.GetFileStatus(dir).GetPermission ().ToShort()); conn.Disconnect(); } { //test append. AppendTestUtil.TestAppend(fs, new Path(dir, "append")); } { //test NamenodeAddressParam not set. HttpOpParam.OP op = PutOpParam.OP.Create; Uri url = webhdfs.ToUrl(op, dir); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.SetDoOutput(false); conn.SetInstanceFollowRedirects(false); conn.Connect(); string redirect = conn.GetHeaderField("Location"); conn.Disconnect(); //remove NamenodeAddressParam WebHdfsFileSystem.Log.Info("redirect = " + redirect); int i = redirect.IndexOf(NamenodeAddressParam.Name); int j = redirect.IndexOf("&", i); string modified = Sharpen.Runtime.Substring(redirect, 0, i - 1) + Sharpen.Runtime.Substring (redirect, j); WebHdfsFileSystem.Log.Info("modified = " + modified); //connect to datanode conn = (HttpURLConnection) new Uri(modified).OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.SetDoOutput(op.GetDoOutput()); conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScBadRequest, conn.GetResponseCode ()); } { //test jsonParse with non-json type. HttpOpParam.OP op = GetOpParam.OP.Open; Uri url = webhdfs.ToUrl(op, file); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.Connect(); try { WebHdfsFileSystem.JsonParse(conn, false); Fail(); } catch (IOException ioe) { WebHdfsFileSystem.Log.Info("GOOD", ioe); } conn.Disconnect(); } { //test create with path containing spaces HttpOpParam.OP op = PutOpParam.OP.Create; Path path = new Path("/test/path with spaces"); Uri url = webhdfs.ToUrl(op, path); HttpURLConnection conn = (HttpURLConnection)url.OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.SetDoOutput(false); conn.SetInstanceFollowRedirects(false); string redirect; try { conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScTemporaryRedirect, conn.GetResponseCode ()); redirect = conn.GetHeaderField("Location"); } finally { conn.Disconnect(); } conn = (HttpURLConnection) new Uri(redirect).OpenConnection(); conn.SetRequestMethod(op.GetType().ToString()); conn.SetDoOutput(op.GetDoOutput()); try { conn.Connect(); NUnit.Framework.Assert.AreEqual(HttpServletResponse.ScCreated, conn.GetResponseCode ()); } finally { conn.Disconnect(); } } }
/// <summary> /// Test that we cannot read a file beyond its snapshot length /// when accessing it via a snapshot path. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestSnapshotfileLength() { hdfs.Mkdirs(sub); int bytesRead; byte[] buffer = new byte[Blocksize * 8]; int origLen = Blocksize + 1; int toAppend = Blocksize; FSDataInputStream fis = null; FileStatus fileStatus = null; // Create and write a file. Path file1 = new Path(sub, file1Name); DFSTestUtil.CreateFile(hdfs, file1, Blocksize, 0, Blocksize, Replication, Seed); DFSTestUtil.AppendFile(hdfs, file1, origLen); // Create a snapshot on the parent directory. hdfs.AllowSnapshot(sub); hdfs.CreateSnapshot(sub, snapshot1); Path file1snap1 = SnapshotTestHelper.GetSnapshotPath(sub, snapshot1, file1Name); FileChecksum snapChksum1 = hdfs.GetFileChecksum(file1snap1); Assert.AssertThat("file and snapshot file checksums are not equal", hdfs.GetFileChecksum (file1), CoreMatchers.Is(snapChksum1)); // Append to the file. FSDataOutputStream @out = hdfs.Append(file1); // Nothing has been appended yet. All checksums should still be equal. Assert.AssertThat("file and snapshot checksums (open for append) are not equal", hdfs.GetFileChecksum(file1), CoreMatchers.Is(snapChksum1)); Assert.AssertThat("snapshot checksum (post-open for append) has changed", hdfs.GetFileChecksum (file1snap1), CoreMatchers.Is(snapChksum1)); try { AppendTestUtil.Write(@out, 0, toAppend); // Test reading from snapshot of file that is open for append byte[] dataFromSnapshot = DFSTestUtil.ReadFileBuffer(hdfs, file1snap1); Assert.AssertThat("Wrong data size in snapshot.", dataFromSnapshot.Length, CoreMatchers.Is (origLen)); // Verify that checksum didn't change Assert.AssertThat("snapshot file checksum (pre-close) has changed", hdfs.GetFileChecksum (file1), CoreMatchers.Is(snapChksum1)); Assert.AssertThat("snapshot checksum (post-append) has changed", hdfs.GetFileChecksum (file1snap1), CoreMatchers.Is(snapChksum1)); } finally { @out.Close(); } Assert.AssertThat("file and snapshot file checksums (post-close) are equal", hdfs .GetFileChecksum(file1), CoreMatchers.Not(snapChksum1)); Assert.AssertThat("snapshot file checksum (post-close) has changed", hdfs.GetFileChecksum (file1snap1), CoreMatchers.Is(snapChksum1)); // Make sure we can read the entire file via its non-snapshot path. fileStatus = hdfs.GetFileStatus(file1); Assert.AssertThat(fileStatus.GetLen(), CoreMatchers.Is((long)origLen + toAppend)); fis = hdfs.Open(file1); bytesRead = fis.Read(0, buffer, 0, buffer.Length); Assert.AssertThat(bytesRead, CoreMatchers.Is(origLen + toAppend)); fis.Close(); // Try to open the file via its snapshot path. fis = hdfs.Open(file1snap1); fileStatus = hdfs.GetFileStatus(file1snap1); Assert.AssertThat(fileStatus.GetLen(), CoreMatchers.Is((long)origLen)); // Make sure we can only read up to the snapshot length. bytesRead = fis.Read(0, buffer, 0, buffer.Length); Assert.AssertThat(bytesRead, CoreMatchers.Is(origLen)); fis.Close(); byte[] dataFromSnapshot_1 = DFSTestUtil.ReadFileBuffer(hdfs, file1snap1); Assert.AssertThat("Wrong data size in snapshot.", dataFromSnapshot_1.Length, CoreMatchers.Is (origLen)); }
/// <summary> /// Test the scenario where the NN fails over after issuing a block /// synchronization request, but before it is committed. /// </summary> /// <remarks> /// Test the scenario where the NN fails over after issuing a block /// synchronization request, but before it is committed. The /// DN running the recovery should then fail to commit the synchronization /// and a later retry will succeed. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestFailoverRightBeforeCommitSynchronization() { Configuration conf = new Configuration(); // Disable permissions so that another user can recover the lease. conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, false); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, BlockSize); FSDataOutputStream stm = null; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(3).Build(); try { cluster.WaitActive(); cluster.TransitionToActive(0); Sharpen.Thread.Sleep(500); Log.Info("Starting with NN 0 active"); FileSystem fs = HATestUtil.ConfigureFailoverFs(cluster, conf); stm = fs.Create(TestPath); // write a half block AppendTestUtil.Write(stm, 0, BlockSize / 2); stm.Hflush(); // Look into the block manager on the active node for the block // under construction. NameNode nn0 = cluster.GetNameNode(0); ExtendedBlock blk = DFSTestUtil.GetFirstBlock(fs, TestPath); DatanodeDescriptor expectedPrimary = DFSTestUtil.GetExpectedPrimaryNode(nn0, blk); Log.Info("Expecting block recovery to be triggered on DN " + expectedPrimary); // Find the corresponding DN daemon, and spy on its connection to the // active. DataNode primaryDN = cluster.GetDataNode(expectedPrimary.GetIpcPort()); DatanodeProtocolClientSideTranslatorPB nnSpy = DataNodeTestUtils.SpyOnBposToNN(primaryDN , nn0); // Delay the commitBlockSynchronization call GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(Log); Org.Mockito.Mockito.DoAnswer(delayer).When(nnSpy).CommitBlockSynchronization(Org.Mockito.Mockito .Eq(blk), Org.Mockito.Mockito.AnyInt(), Org.Mockito.Mockito.AnyLong(), Org.Mockito.Mockito .Eq(true), Org.Mockito.Mockito.Eq(false), (DatanodeID[])Org.Mockito.Mockito.AnyObject (), (string[])Org.Mockito.Mockito.AnyObject()); // new genstamp // new length // close file // delete block // new targets // new target storages DistributedFileSystem fsOtherUser = CreateFsAsOtherUser(cluster, conf); NUnit.Framework.Assert.IsFalse(fsOtherUser.RecoverLease(TestPath)); Log.Info("Waiting for commitBlockSynchronization call from primary"); delayer.WaitForCall(); Log.Info("Failing over to NN 1"); cluster.TransitionToStandby(0); cluster.TransitionToActive(1); // Let the commitBlockSynchronization call go through, and check that // it failed with the correct exception. delayer.Proceed(); delayer.WaitForResult(); Exception t = delayer.GetThrown(); if (t == null) { NUnit.Framework.Assert.Fail("commitBlockSynchronization call did not fail on standby" ); } GenericTestUtils.AssertExceptionContains("Operation category WRITE is not supported" , t); // Now, if we try again to recover the block, it should succeed on the new // active. LoopRecoverLease(fsOtherUser, TestPath); AppendTestUtil.Check(fs, TestPath, BlockSize / 2); } finally { IOUtils.CloseStream(stm); cluster.Shutdown(); } }
public virtual void TestQueueingWithAppend() { int numQueued = 0; int numDN = cluster.GetDataNodes().Count; // case 1: create file and call hflush after write FSDataOutputStream @out = fs.Create(TestFilePath); try { AppendTestUtil.Write(@out, 0, 10); @out.Hflush(); // Opening the file will report RBW replicas, but will be // queued on the StandbyNode. // However, the delivery of RBW messages is delayed by HDFS-7217 fix. // Apply cluster.triggerBlockReports() to trigger the reporting sooner. // cluster.TriggerBlockReports(); numQueued += numDN; // RBW messages // The cluster.triggerBlockReports() call above does a full // block report that incurs 3 extra RBW messages numQueued += numDN; } finally { // RBW messages IOUtils.CloseStream(@out); numQueued += numDN; } // blockReceived messages cluster.TriggerBlockReports(); numQueued += numDN; NUnit.Framework.Assert.AreEqual(numQueued, cluster.GetNameNode(1).GetNamesystem() .GetPendingDataNodeMessageCount()); // case 2: append to file and call hflush after write try { @out = fs.Append(TestFilePath); AppendTestUtil.Write(@out, 10, 10); @out.Hflush(); cluster.TriggerBlockReports(); numQueued += numDN * 2; } finally { // RBW messages, see comments in case 1 IOUtils.CloseStream(@out); numQueued += numDN; } // blockReceived NUnit.Framework.Assert.AreEqual(numQueued, cluster.GetNameNode(1).GetNamesystem() .GetPendingDataNodeMessageCount()); // case 3: similar to case 2, except no hflush is called. try { @out = fs.Append(TestFilePath); AppendTestUtil.Write(@out, 20, 10); } finally { // The write operation in the try block is buffered, thus no RBW message // is reported yet until the closeStream call here. When closeStream is // called, before HDFS-7217 fix, there would be three RBW messages // (blockReceiving), plus three FINALIZED messages (blockReceived) // delivered to NN. However, because of HDFS-7217 fix, the reporting of // RBW messages is postponed. In this case, they are even overwritten // by the blockReceived messages of the same block when they are waiting // to be delivered. All this happens within the closeStream() call. // What's delivered to NN is the three blockReceived messages. See // BPServiceActor#addPendingReplicationBlockInfo // IOUtils.CloseStream(@out); numQueued += numDN; } // blockReceived cluster.TriggerBlockReports(); numQueued += numDN; Log.Info("Expect " + numQueued + " and got: " + cluster.GetNameNode(1).GetNamesystem ().GetPendingDataNodeMessageCount()); NUnit.Framework.Assert.AreEqual(numQueued, cluster.GetNameNode(1).GetNamesystem() .GetPendingDataNodeMessageCount()); cluster.TransitionToStandby(0); cluster.TransitionToActive(1); // Verify that no replicas are marked corrupt, and that the // file is readable from the failed-over standby. BlockManagerTestUtil.UpdateState(nn1.GetNamesystem().GetBlockManager()); BlockManagerTestUtil.UpdateState(nn2.GetNamesystem().GetBlockManager()); NUnit.Framework.Assert.AreEqual(0, nn1.GetNamesystem().GetCorruptReplicaBlocks()); NUnit.Framework.Assert.AreEqual(0, nn2.GetNamesystem().GetCorruptReplicaBlocks()); AppendTestUtil.Check(fs, TestFilePath, 30); }
public virtual void TestMultipleAppendsDuringCatchupTailing() { Configuration conf = new Configuration(); // Set a length edits tailing period, and explicit rolling, so we can // control the ingest of edits by the standby for this test. conf.Set(DFSConfigKeys.DfsHaTaileditsPeriodKey, "5000"); conf.SetInt(DFSConfigKeys.DfsHaLogrollPeriodKey, -1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(3).Build(); FileSystem fs = null; try { cluster.TransitionToActive(0); fs = HATestUtil.ConfigureFailoverFs(cluster, conf); Path fileToAppend = new Path("/FileToAppend"); Path fileToTruncate = new Path("/FileToTruncate"); byte[] data = new byte[1 << 16]; DFSUtil.GetRandom().NextBytes(data); int[] appendPos = AppendTestUtil.RandomFilePartition(data.Length, Count); int[] truncatePos = AppendTestUtil.RandomFilePartition(data.Length, 1); // Create file, write some data, and hflush so that the first // block is in the edit log prior to roll. FSDataOutputStream @out = CreateAndHflush(fs, fileToAppend, data, appendPos[0]); FSDataOutputStream out4Truncate = CreateAndHflush(fs, fileToTruncate, data, data. Length); // Let the StandbyNode catch the creation of the file. cluster.GetNameNode(0).GetRpcServer().RollEditLog(); cluster.GetNameNode(1).GetNamesystem().GetEditLogTailer().DoTailEdits(); @out.Close(); out4Truncate.Close(); // Append and re-close a few time, so that many block entries are queued. for (int i = 0; i < Count; i++) { int end = i < Count - 1 ? appendPos[i + 1] : data.Length; @out = fs.Append(fileToAppend); @out.Write(data, appendPos[i], end - appendPos[i]); @out.Close(); } bool isTruncateReady = fs.Truncate(fileToTruncate, truncatePos[0]); // Ensure that blocks have been reported to the SBN ahead of the edits // arriving. cluster.TriggerBlockReports(); // Failover the current standby to active. cluster.ShutdownNameNode(0); cluster.TransitionToActive(1); // Check the FSCK doesn't detect any bad blocks on the SBN. int rc = ToolRunner.Run(new DFSck(cluster.GetConfiguration(1)), new string[] { "/" , "-files", "-blocks" }); NUnit.Framework.Assert.AreEqual(0, rc); NUnit.Framework.Assert.AreEqual("CorruptBlocks should be empty.", 0, cluster.GetNameNode (1).GetNamesystem().GetCorruptReplicaBlocks()); AppendTestUtil.CheckFullFile(fs, fileToAppend, data.Length, data, fileToAppend.ToString ()); if (!isTruncateReady) { TestFileTruncate.CheckBlockRecovery(fileToTruncate, cluster.GetFileSystem(1)); } AppendTestUtil.CheckFullFile(fs, fileToTruncate, truncatePos[0], data, fileToTruncate .ToString()); } finally { if (null != cluster) { cluster.Shutdown(); } if (null != fs) { fs.Close(); } } }