/// <summary>Stress test to ensure we are not leaking reserved space.</summary> /// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> public virtual void StressTest() { int numWriters = 5; StartCluster(SmallBlockSize, 1, SmallBlockSize * numWriters * 10); TestRbwSpaceReservation.Writer[] writers = new TestRbwSpaceReservation.Writer[numWriters ]; // Start a few writers and let them run for a while. for (int i = 0; i < numWriters; ++i) { writers[i] = new TestRbwSpaceReservation.Writer(client, SmallBlockSize); writers[i].Start(); } Sharpen.Thread.Sleep(60000); // Stop the writers. foreach (TestRbwSpaceReservation.Writer w in writers) { w.StopWriter(); } int filesCreated = 0; int numFailures = 0; foreach (TestRbwSpaceReservation.Writer w_1 in writers) { w_1.Join(); filesCreated += w_1.GetFilesCreated(); numFailures += w_1.GetNumFailures(); } Log.Info("Stress test created " + filesCreated + " files and hit " + numFailures + " failures"); // Check no space was leaked. Assert.AssertThat(singletonVolume.GetReservedForRbw(), IS.Is(0L)); }
public virtual void TestAddVolumeFailures() { StartDFSCluster(1, 1); string dataDir = cluster.GetDataDirectory(); DataNode dn = cluster.GetDataNodes()[0]; IList <string> newDirs = Lists.NewArrayList(); int NumNewDirs = 4; for (int i = 0; i < NumNewDirs; i++) { FilePath newVolume = new FilePath(dataDir, "new_vol" + i); newDirs.AddItem(newVolume.ToString()); if (i % 2 == 0) { // Make addVolume() fail. newVolume.CreateNewFile(); } } string newValue = dn.GetConf().Get(DFSConfigKeys.DfsDatanodeDataDirKey) + "," + Joiner .On(",").Join(newDirs); try { dn.ReconfigurePropertyImpl(DFSConfigKeys.DfsDatanodeDataDirKey, newValue); NUnit.Framework.Assert.Fail("Expect to throw IOException."); } catch (ReconfigurationException e) { string errorMessage = e.InnerException.Message; string[] messages = errorMessage.Split("\\r?\\n"); NUnit.Framework.Assert.AreEqual(2, messages.Length); Assert.AssertThat(messages[0], CoreMatchers.ContainsString("new_vol0")); Assert.AssertThat(messages[1], CoreMatchers.ContainsString("new_vol2")); } // Make sure that vol0 and vol2's metadata are not left in memory. FsDatasetSpi <object> dataset = dn.GetFSDataset(); foreach (FsVolumeSpi volume in dataset.GetVolumes()) { Assert.AssertThat(volume.GetBasePath(), IS.Is(CoreMatchers.Not(CoreMatchers.AnyOf (IS.Is(newDirs[0]), IS.Is(newDirs[2]))))); } DataStorage storage = dn.GetStorage(); for (int i_1 = 0; i_1 < storage.GetNumStorageDirs(); i_1++) { Storage.StorageDirectory sd = storage.GetStorageDir(i_1); Assert.AssertThat(sd.GetRoot().ToString(), IS.Is(CoreMatchers.Not(CoreMatchers.AnyOf (IS.Is(newDirs[0]), IS.Is(newDirs[2]))))); } // The newly effective conf does not have vol0 and vol2. string[] effectiveVolumes = dn.GetConf().Get(DFSConfigKeys.DfsDatanodeDataDirKey) .Split(","); NUnit.Framework.Assert.AreEqual(4, effectiveVolumes.Length); foreach (string ev in effectiveVolumes) { Assert.AssertThat(StorageLocation.Parse(ev).GetFile().GetCanonicalPath(), IS.Is(CoreMatchers.Not (CoreMatchers.AnyOf(IS.Is(newDirs[0]), IS.Is(newDirs[2]))))); } }
/// <param name="blockSize"/> /// <param name="perVolumeCapacity"> /// limit the capacity of each volume to the given /// value. If negative, then don't limit. /// </param> /// <exception cref="System.IO.IOException"/> private void StartCluster(int blockSize, int numDatanodes, long perVolumeCapacity ) { InitConfig(blockSize); cluster = new MiniDFSCluster.Builder(conf).StoragesPerDatanode(StoragesPerDatanode ).NumDataNodes(numDatanodes).Build(); fs = cluster.GetFileSystem(); client = fs.GetClient(); cluster.WaitActive(); if (perVolumeCapacity >= 0) { foreach (DataNode dn in cluster.GetDataNodes()) { foreach (FsVolumeSpi volume in dn.GetFSDataset().GetVolumes()) { ((FsVolumeImpl)volume).SetCapacityForTesting(perVolumeCapacity); } } } if (numDatanodes == 1) { IList <FsVolumeSpi> volumes = cluster.GetDataNodes()[0].GetFSDataset().GetVolumes( ); Assert.AssertThat(volumes.Count, IS.Is(1)); singletonVolume = ((FsVolumeImpl)volumes[0]); } }
/// <summary> /// Similar to BlockReport_03() but works with two DNs /// Test writes a file and closes it. /// </summary> /// <remarks> /// Similar to BlockReport_03() but works with two DNs /// Test writes a file and closes it. /// The second datanode is started in the cluster. /// As soon as the replication process is completed test finds a block from /// the second DN and sets its GS to be < of original one. /// this is the markBlockAsCorrupt case 3 so we expect one pending deletion /// Block report is forced and the check for # of currupted blocks is performed. /// Another block is chosen and its length is set to a lesser than original. /// A check for another corrupted block is performed after yet another /// BlockReport /// </remarks> /// <exception cref="System.IO.IOException">in case of an error</exception> /// <exception cref="System.Exception"/> public virtual void BlockReport_07() { string MethodName = GenericTestUtils.GetMethodName(); Path filePath = new Path("/" + MethodName + ".dat"); int DnN1 = DnN0 + 1; // write file and start second node to be "older" than the original WriteFile(MethodName, FileSize, filePath); StartDNandWait(filePath, true); // all blocks belong to the same file, hence same BP DataNode dn = cluster.GetDataNodes()[DnN1]; string poolId = cluster.GetNamesystem().GetBlockPoolId(); DatanodeRegistration dnR = dn.GetDNRegistrationForBP(poolId); StorageBlockReport[] reports = GetBlockReports(dn, poolId, true, false); SendBlockReports(dnR, poolId, reports); PrintStats(); Assert.AssertThat("Wrong number of corrupt blocks", cluster.GetNamesystem().GetCorruptReplicaBlocks (), IS.Is(0L)); Assert.AssertThat("Wrong number of PendingDeletion blocks", cluster.GetNamesystem ().GetPendingDeletionBlocks(), IS.Is(1L)); Assert.AssertThat("Wrong number of PendingReplication blocks", cluster.GetNamesystem ().GetPendingReplicationBlocks(), IS.Is(0L)); reports = GetBlockReports(dn, poolId, false, true); SendBlockReports(dnR, poolId, reports); PrintStats(); Assert.AssertThat("Wrong number of corrupt blocks", cluster.GetNamesystem().GetCorruptReplicaBlocks (), IS.Is(1L)); Assert.AssertThat("Wrong number of PendingDeletion blocks", cluster.GetNamesystem ().GetPendingDeletionBlocks(), IS.Is(1L)); Assert.AssertThat("Wrong number of PendingReplication blocks", cluster.GetNamesystem ().GetPendingReplicationBlocks(), IS.Is(0L)); PrintStats(); }
public virtual void TestStorageReportHasStorageTypeAndState() { // Make sure we are not testing with the default type, that would not // be a very good test. NUnit.Framework.Assert.AreNotSame(storageType, StorageType.Default); NameNode nn = cluster.GetNameNode(); DataNode dn = cluster.GetDataNodes()[0]; // Insert a spy object for the NN RPC. DatanodeProtocolClientSideTranslatorPB nnSpy = DataNodeTestUtils.SpyOnBposToNN(dn , nn); // Trigger a heartbeat so there is an interaction with the spy // object. DataNodeTestUtils.TriggerHeartbeat(dn); // Verify that the callback passed in the expected parameters. ArgumentCaptor <StorageReport[]> captor = ArgumentCaptor.ForClass <StorageReport[]> (); Org.Mockito.Mockito.Verify(nnSpy).SendHeartbeat(Matchers.Any <DatanodeRegistration >(), captor.Capture(), Matchers.AnyLong(), Matchers.AnyLong(), Matchers.AnyInt() , Matchers.AnyInt(), Matchers.AnyInt(), Org.Mockito.Mockito.Any <VolumeFailureSummary >()); StorageReport[] reports = captor.GetValue(); foreach (StorageReport report in reports) { Assert.AssertThat(report.GetStorage().GetStorageType(), IS.Is(storageType)); Assert.AssertThat(report.GetStorage().GetState(), IS.Is(DatanodeStorage.State.Normal )); } }
public virtual void TestConcurrentWrites() { StartUpCluster(true, 9); string MethodName = GenericTestUtils.GetMethodName(); int Seed = unchecked ((int)(0xFADED)); int NumWriters = 4; int NumWriterPaths = 5; Path[][] paths = new Path[][] { new Path[NumWriterPaths], new Path[NumWriterPaths ], new Path[NumWriterPaths], new Path[NumWriterPaths] }; for (int i = 0; i < NumWriters; i++) { paths[i] = new Path[NumWriterPaths]; for (int j = 0; j < NumWriterPaths; j++) { paths[i][j] = new Path("/" + MethodName + ".Writer" + i + ".File." + j + ".dat"); } } CountDownLatch latch = new CountDownLatch(NumWriters); AtomicBoolean testFailed = new AtomicBoolean(false); ExecutorService executor = Executors.NewFixedThreadPool(ThreadpoolSize); for (int i_1 = 0; i_1 < NumWriters; i_1++) { Runnable writer = new TestLazyPersistFiles.WriterRunnable(this, i_1, paths[i_1], Seed, latch, testFailed); executor.Execute(writer); } Sharpen.Thread.Sleep(3 * LazyWriterIntervalSec * 1000); TriggerBlockReport(); // Stop executor from adding new tasks to finish existing threads in queue latch.Await(); Assert.AssertThat(testFailed.Get(), IS.Is(false)); }
/// <exception cref="System.Exception"/> public virtual void TestDatanodeRollingUpgradeWithRollback() { try { StartCluster(); // Create files in DFS. Path testFile1 = new Path("/" + GenericTestUtils.GetMethodName() + ".01.dat"); DFSTestUtil.CreateFile(fs, testFile1, FileSize, ReplFactor, Seed); string fileContents1 = DFSTestUtil.ReadFile(fs, testFile1); StartRollingUpgrade(); FilePath blockFile = GetBlockForFile(testFile1, true); FilePath trashFile = GetTrashFileForBlock(blockFile, false); DeleteAndEnsureInTrash(testFile1, blockFile, trashFile); // Now perform a rollback to restore DFS to the pre-rollback state. RollbackRollingUpgrade(); // Ensure that block was restored from trash EnsureTrashRestored(blockFile, trashFile); // Ensure that files exist and restored file contents are the same. System.Diagnostics.Debug.Assert((fs.Exists(testFile1))); string fileContents2 = DFSTestUtil.ReadFile(fs, testFile1); Assert.AssertThat(fileContents1, IS.Is(fileContents2)); } finally { ShutdownCluster(); } }
internal StubBlockPoolSliceStorage(int namespaceID, string bpID, long cTime, string clusterId) : base(namespaceID, bpID, cTime, clusterId) { AddStorageDir(new Storage.StorageDirectory(new FilePath("/tmp/dontcare/" + bpID)) ); Assert.AssertThat(storageDirs.Count, IS.Is(1)); }
private void VerifyStorageType(long blockId, bool expectTransient) { ReplicaInfo memBlock; memBlock = FsDatasetTestUtil.FetchReplicaInfo(fds, bpid, blockId); NUnit.Framework.Assert.IsNotNull(memBlock); MatcherAssert.AssertThat(memBlock.GetVolume().IsTransientStorage(), IS.Is(expectTransient )); }
/// <summary> /// Override createRbw to verify that the block length that is passed /// is correct. /// </summary> /// <remarks> /// Override createRbw to verify that the block length that is passed /// is correct. This requires both DFSOutputStream and BlockReceiver to /// correctly propagate the hint to FsDatasetSpi. /// </remarks> /// <exception cref="System.IO.IOException"/> public override ReplicaHandler CreateRbw(StorageType storageType, ExtendedBlock b , bool allowLazyPersist) { lock (this) { Assert.AssertThat(b.GetLocalBlock().GetNumBytes(), IS.Is(ExpectedBlockLength)); return(base.CreateRbw(storageType, b, allowLazyPersist)); } }
public virtual void TestScheduleBlockReportImmediate() { foreach (long now in GetTimestamps()) { BPServiceActor.Scheduler scheduler = MakeMockScheduler(now); scheduler.ScheduleBlockReport(0); NUnit.Framework.Assert.IsTrue(scheduler.resetBlockReportTime); Assert.AssertThat(scheduler.nextBlockReportTime, IS.Is(now)); } }
public virtual void TestScheduleNextBlockReport2() { foreach (long now in GetTimestamps()) { BPServiceActor.Scheduler scheduler = MakeMockScheduler(now); scheduler.resetBlockReportTime = false; scheduler.ScheduleNextBlockReport(); Assert.AssertThat(scheduler.nextBlockReportTime, IS.Is(now + BlockReportIntervalMs )); } }
public virtual void TestWithRetriableAndRetryDisabled() { Configuration conf = new Configuration(); RetryPolicy policy = RetryUtils.GetDefaultRetryPolicy(conf, "Test.No.Such.Key", false , "Test.No.Such.Key", "10000,6", null); // defaultRetryPolicyEnabled = false RetryPolicy.RetryAction action = policy.ShouldRetry(new RetriableException("Dummy exception" ), 0, 0, true); Assert.AssertThat(action.action, IS.Is(RetryPolicy.RetryAction.RetryDecision.Fail )); }
public virtual void TestPolicyPropagation() { StartUpCluster(false, -1); string MethodName = GenericTestUtils.GetMethodName(); Path path = new Path("/" + MethodName + ".dat"); MakeTestFile(path, 0, true); // Stat the file and check that the lazyPersist flag is returned back. HdfsFileStatus status = client.GetFileInfo(path.ToString()); Assert.AssertThat(status.GetStoragePolicy(), IS.Is(LazyPersistPolicyId)); }
public virtual void TestWithWrappedRetriable() { Configuration conf = new Configuration(); RetryPolicy policy = RetryUtils.GetDefaultRetryPolicy(conf, "Test.No.Such.Key", true , "Test.No.Such.Key", "10000,6", null); // defaultRetryPolicyEnabled = true RetryPolicy.RetryAction action = policy.ShouldRetry(new RemoteException(typeof(RetriableException ).FullName, "Dummy exception"), 0, 0, true); Assert.AssertThat(action.action, IS.Is(RetryPolicy.RetryAction.RetryDecision.Retry )); }
/// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> private void CreateFileAndTestSpaceReservation(string fileNamePrefix, int fileBlockSize ) { // Enough for 1 block + meta files + some delta. long configuredCapacity = fileBlockSize * 2 - 1; StartCluster(BlockSize, 1, configuredCapacity); FSDataOutputStream @out = null; Path path = new Path("/" + fileNamePrefix + ".dat"); try { @out = fs.Create(path, false, 4096, (short)1, fileBlockSize); byte[] buffer = new byte[rand.Next(fileBlockSize / 4)]; @out.Write(buffer); @out.Hsync(); int bytesWritten = buffer.Length; // Check that space was reserved for a full block minus the bytesWritten. Assert.AssertThat(singletonVolume.GetReservedForRbw(), IS.Is((long)fileBlockSize - bytesWritten)); @out.Close(); @out = null; // Check that the reserved space has been released since we closed the // file. Assert.AssertThat(singletonVolume.GetReservedForRbw(), IS.Is(0L)); // Reopen the file for appends and write 1 more byte. @out = fs.Append(path); @out.Write(buffer); @out.Hsync(); bytesWritten += buffer.Length; // Check that space was again reserved for a full block minus the // bytesWritten so far. Assert.AssertThat(singletonVolume.GetReservedForRbw(), IS.Is((long)fileBlockSize - bytesWritten)); // Write once again and again verify the available space. This ensures // that the reserved space is progressively adjusted to account for bytes // written to disk. @out.Write(buffer); @out.Hsync(); bytesWritten += buffer.Length; Assert.AssertThat(singletonVolume.GetReservedForRbw(), IS.Is((long)fileBlockSize - bytesWritten)); } finally { if (@out != null) { @out.Close(); } } }
/// <exception cref="System.IO.IOException"/> private LocatedBlocks CreateFileGetBlocks(string filenamePrefix) { Path filePath = new Path("/" + filenamePrefix + ".dat"); // Write out a file with a few blocks, get block locations. DFSTestUtil.CreateFile(fs, filePath, BlockSize, BlockSize * NumBlocks, BlockSize, NumDatanodes, seed); // Get the block list for the file with the block locations. LocatedBlocks blocks = client.GetLocatedBlocks(filePath.ToString(), 0, BlockSize * NumBlocks); Assert.AssertThat(cluster.GetNamesystem().GetUnderReplicatedBlocks(), IS.Is(0L)); return(blocks); }
/// <exception cref="System.IO.IOException"/> private static void RunTest(string testCaseName, bool createFiles, int numInitialStorages , int expectedStoragesAfterTest) { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StoragesPerDatanode(numInitialStorages ).Build(); cluster.WaitActive(); DataNode dn0 = cluster.GetDataNodes()[0]; // Ensure NN knows about the storage. DatanodeID dnId = dn0.GetDatanodeId(); DatanodeDescriptor dnDescriptor = cluster.GetNamesystem().GetBlockManager().GetDatanodeManager ().GetDatanode(dnId); Assert.AssertThat(dnDescriptor.GetStorageInfos().Length, IS.Is(numInitialStorages )); string bpid = cluster.GetNamesystem().GetBlockPoolId(); DatanodeRegistration dnReg = dn0.GetDNRegistrationForBP(bpid); DataNodeTestUtils.TriggerBlockReport(dn0); if (createFiles) { Path path = new Path("/", testCaseName); DFSTestUtil.CreateFile(cluster.GetFileSystem(), path, 1024, (short)1, unchecked (( int)(0x1BAD5EED))); DataNodeTestUtils.TriggerBlockReport(dn0); } // Generate a fake StorageReport that is missing one storage. StorageReport[] reports = dn0.GetFSDataset().GetStorageReports(bpid); StorageReport[] prunedReports = new StorageReport[numInitialStorages - 1]; System.Array.Copy(reports, 0, prunedReports, 0, prunedReports.Length); // Stop the DataNode and send fake heartbeat with missing storage. cluster.StopDataNode(0); cluster.GetNameNodeRpc().SendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0, 0, null ); // Check that the missing storage was pruned. Assert.AssertThat(dnDescriptor.GetStorageInfos().Length, IS.Is(expectedStoragesAfterTest )); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="System.IO.IOException"/> public void VerifyClusterPostUpgrade(MiniDFSCluster cluster) { // Verify that a GUID-based storage ID was generated. string bpid = cluster.GetNamesystem().GetBlockPoolId(); StorageReport[] reports = cluster.GetDataNodes()[0].GetFSDataset().GetStorageReports (bpid); Assert.AssertThat(reports.Length, IS.Is(1)); string storageID = reports[0].GetStorage().GetStorageID(); NUnit.Framework.Assert.IsTrue(DatanodeStorage.IsValidStorageId(storageID)); if (expectedStorageId != null) { Assert.AssertThat(storageID, IS.Is(expectedStorageId)); } }
/// <exception cref="System.IO.IOException"/> protected internal LocatedBlocks EnsureFileReplicasOnStorageType(Path path, StorageType storageType) { // Ensure that returned block locations returned are correct! Log.Info("Ensure path: " + path + " is on StorageType: " + storageType); Assert.AssertThat(fs.Exists(path), IS.Is(true)); long fileLength = client.GetFileInfo(path.ToString()).GetLen(); LocatedBlocks locatedBlocks = client.GetLocatedBlocks(path.ToString(), 0, fileLength ); foreach (LocatedBlock locatedBlock in locatedBlocks.GetLocatedBlocks()) { Assert.AssertThat(locatedBlock.GetStorageTypes()[0], IS.Is(storageType)); } return(locatedBlocks); }
/// <summary> /// Test conversion from a block file path to its target trash /// directory. /// </summary> public virtual void GetTrashDirectoryForBlockFile(string fileName, int nestingLevel ) { string blockFileSubdir = MakeRandomBlockFileSubdir(nestingLevel); string blockFileName = fileName; string testFilePath = storage.GetSingularStorageDir().GetRoot() + FilePath.separator + Storage.StorageDirCurrent + blockFileSubdir + blockFileName; string expectedTrashPath = storage.GetSingularStorageDir().GetRoot() + FilePath.separator + BlockPoolSliceStorage.TrashRootDir + Sharpen.Runtime.Substring(blockFileSubdir , 0, blockFileSubdir.Length - 1); Log.Info("Got subdir " + blockFileSubdir); Log.Info("Generated file path " + testFilePath); Assert.AssertThat(storage.GetTrashDirectory(new FilePath(testFilePath)), IS.Is(expectedTrashPath )); }
/// <exception cref="System.IO.IOException"/> public virtual void VerifyIncrementalBlockReports(bool splitReports) { // Get the block list for the file with the block locations. LocatedBlocks blocks = CreateFileGetBlocks(GenericTestUtils.GetMethodName()); // We will send 'fake' incremental block reports to the NN that look // like they originated from DN 0. StorageReceivedDeletedBlocks[] reports = new StorageReceivedDeletedBlocks[dn0.GetFSDataset ().GetVolumes().Count]; // Lie to the NN that one block on each storage has been deleted. for (int i = 0; i < reports.Length; ++i) { FsVolumeSpi volume = dn0.GetFSDataset().GetVolumes()[i]; bool foundBlockOnStorage = false; ReceivedDeletedBlockInfo[] rdbi = new ReceivedDeletedBlockInfo[1]; // Find the first block on this storage and mark it as deleted for the // report. foreach (LocatedBlock block in blocks.GetLocatedBlocks()) { if (block.GetStorageIDs()[0].Equals(volume.GetStorageID())) { rdbi[0] = new ReceivedDeletedBlockInfo(block.GetBlock().GetLocalBlock(), ReceivedDeletedBlockInfo.BlockStatus .DeletedBlock, null); foundBlockOnStorage = true; break; } } NUnit.Framework.Assert.IsTrue(foundBlockOnStorage); reports[i] = new StorageReceivedDeletedBlocks(volume.GetStorageID(), rdbi); if (splitReports) { // If we are splitting reports then send the report for this storage now. StorageReceivedDeletedBlocks[] singletonReport = new StorageReceivedDeletedBlocks [] { reports[i] }; cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dn0Reg, poolId, singletonReport); } } if (!splitReports) { // Send a combined report. cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dn0Reg, poolId, reports); } // Make sure that the deleted block from each storage was picked up // by the NameNode. Assert.AssertThat(cluster.GetNamesystem().GetMissingBlocksCount(), IS.Is((long)reports .Length)); }
public virtual void TestBlockHasMultipleReplicasOnSameDN() { string filename = MakeFileName(GenericTestUtils.GetMethodName()); Path filePath = new Path(filename); // Write out a file with a few blocks. DFSTestUtil.CreateFile(fs, filePath, BlockSize, BlockSize * NumBlocks, BlockSize, NumDatanodes, seed); // Get the block list for the file with the block locations. LocatedBlocks locatedBlocks = client.GetLocatedBlocks(filePath.ToString(), 0, BlockSize * NumBlocks); // Generate a fake block report from one of the DataNodes, such // that it reports one copy of each block on either storage. DataNode dn = cluster.GetDataNodes()[0]; DatanodeRegistration dnReg = dn.GetDNRegistrationForBP(bpid); StorageBlockReport[] reports = new StorageBlockReport[cluster.GetStoragesPerDatanode ()]; AList <Replica> blocks = new AList <Replica>(); foreach (LocatedBlock locatedBlock in locatedBlocks.GetLocatedBlocks()) { Block localBlock = locatedBlock.GetBlock().GetLocalBlock(); blocks.AddItem(new FinalizedReplica(localBlock, null, null)); } BlockListAsLongs bll = BlockListAsLongs.Encode(blocks); for (int i = 0; i < cluster.GetStoragesPerDatanode(); ++i) { FsVolumeSpi v = dn.GetFSDataset().GetVolumes()[i]; DatanodeStorage dns = new DatanodeStorage(v.GetStorageID()); reports[i] = new StorageBlockReport(dns, bll); } // Should not assert! cluster.GetNameNodeRpc().BlockReport(dnReg, bpid, reports, new BlockReportContext (1, 0, Runtime.NanoTime())); // Get the block locations once again. locatedBlocks = client.GetLocatedBlocks(filename, 0, BlockSize * NumBlocks); // Make sure that each block has two replicas, one on each DataNode. foreach (LocatedBlock locatedBlock_1 in locatedBlocks.GetLocatedBlocks()) { DatanodeInfo[] locations = locatedBlock_1.GetLocations(); Assert.AssertThat(locations.Length, IS.Is((int)NumDatanodes)); Assert.AssertThat(locations[0].GetDatanodeUuid(), CoreMatchers.Not(locations[1].GetDatanodeUuid ())); } }
private void VerifyCapturedArguments(ArgumentCaptor <StorageBlockReport[]> captor, int expectedReportsPerCall, int expectedTotalBlockCount) { IList <StorageBlockReport[]> listOfReports = captor.GetAllValues(); int numBlocksReported = 0; foreach (StorageBlockReport[] reports in listOfReports) { Assert.AssertThat(reports.Length, IS.Is(expectedReportsPerCall)); foreach (StorageBlockReport report in reports) { BlockListAsLongs blockList = report.GetBlocks(); numBlocksReported += blockList.GetNumberOfBlocks(); } } System.Diagnostics.Debug.Assert((numBlocksReported >= expectedTotalBlockCount)); }
public virtual void TestPolicyPersistenceInFsImage() { StartUpCluster(false, -1); string MethodName = GenericTestUtils.GetMethodName(); Path path = new Path("/" + MethodName + ".dat"); MakeTestFile(path, 0, true); // checkpoint fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter); fs.SaveNamespace(); fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave); cluster.RestartNameNode(true); // Stat the file and check that the lazyPersist flag is returned back. HdfsFileStatus status = client.GetFileInfo(path.ToString()); Assert.AssertThat(status.GetStoragePolicy(), IS.Is(LazyPersistPolicyId)); }
public virtual void TestDeleteBeforePersist() { StartUpCluster(true, -1); string MethodName = GenericTestUtils.GetMethodName(); FsDatasetTestUtil.StopLazyWriter(cluster.GetDataNodes()[0]); Path path = new Path("/" + MethodName + ".dat"); MakeTestFile(path, BlockSize, true); LocatedBlocks locatedBlocks = EnsureFileReplicasOnStorageType(path, StorageType.RamDisk ); // Delete before persist client.Delete(path.ToString(), false); NUnit.Framework.Assert.IsFalse(fs.Exists(path)); Assert.AssertThat(VerifyDeletedBlocks(locatedBlocks), IS.Is(true)); VerifyRamDiskJMXMetric("RamDiskBlocksDeletedBeforeLazyPersisted", 1); }
public virtual void TestGetUserGroupInformation() { string userName = "******"; string currentUser = "******"; UserGroupInformation currentUserUgi = UserGroupInformation.CreateUserForTesting(currentUser , new string[0]); NfsConfiguration conf = new NfsConfiguration(); conf.Set(FileSystem.FsDefaultNameKey, "hdfs://localhost"); DFSClientCache cache = new DFSClientCache(conf); UserGroupInformation ugiResult = cache.GetUserGroupInformation(userName, currentUserUgi ); Assert.AssertThat(ugiResult.GetUserName(), IS.Is(userName)); Assert.AssertThat(ugiResult.GetRealUser(), IS.Is(currentUserUgi)); Assert.AssertThat(ugiResult.GetAuthenticationMethod(), IS.Is(UserGroupInformation.AuthenticationMethod .Proxy)); }
public virtual void TestDeleteAfterPersist() { StartUpCluster(true, -1); string MethodName = GenericTestUtils.GetMethodName(); Path path = new Path("/" + MethodName + ".dat"); MakeTestFile(path, BlockSize, true); LocatedBlocks locatedBlocks = EnsureFileReplicasOnStorageType(path, StorageType.RamDisk ); // Sleep for a short time to allow the lazy writer thread to do its job Sharpen.Thread.Sleep(6 * LazyWriterIntervalSec * 1000); // Delete after persist client.Delete(path.ToString(), false); NUnit.Framework.Assert.IsFalse(fs.Exists(path)); Assert.AssertThat(VerifyDeletedBlocks(locatedBlocks), IS.Is(true)); VerifyRamDiskJMXMetric("RamDiskBlocksLazyPersisted", 1); VerifyRamDiskJMXMetric("RamDiskBytesLazyPersisted", BlockSize); }
/// <summary> /// Process the given arg list as command line arguments to the DataNode /// to make sure we get the expected result. /// </summary> /// <remarks> /// Process the given arg list as command line arguments to the DataNode /// to make sure we get the expected result. If the expected result is /// success then further validate that the parsed startup option is the /// same as what was expected. /// </remarks> /// <param name="expectSuccess"/> /// <param name="expectedOption"/> /// <param name="conf"/> /// <param name="arg"/> private static void CheckExpected(bool expectSuccess, HdfsServerConstants.StartupOption expectedOption, Configuration conf, params string[] arg) { string[] args = new string[arg.Length]; int i = 0; foreach (string currentArg in arg) { args[i++] = currentArg; } bool returnValue = DataNode.ParseArguments(args, conf); HdfsServerConstants.StartupOption option = DataNode.GetStartupOption(conf); Assert.AssertThat(returnValue, IS.Is(expectSuccess)); if (expectSuccess) { Assert.AssertThat(option, IS.Is(expectedOption)); } }
public virtual void TestGetUserGroupInformationSecure() { string userName = "******"; string currentUser = "******"; NfsConfiguration conf = new NfsConfiguration(); UserGroupInformation currentUserUgi = UserGroupInformation.CreateRemoteUser(currentUser ); currentUserUgi.SetAuthenticationMethod(UserGroupInformation.AuthenticationMethod. Kerberos); UserGroupInformation.SetLoginUser(currentUserUgi); DFSClientCache cache = new DFSClientCache(conf); UserGroupInformation ugiResult = cache.GetUserGroupInformation(userName, currentUserUgi ); Assert.AssertThat(ugiResult.GetUserName(), IS.Is(userName)); Assert.AssertThat(ugiResult.GetRealUser(), IS.Is(currentUserUgi)); Assert.AssertThat(ugiResult.GetAuthenticationMethod(), IS.Is(UserGroupInformation.AuthenticationMethod .Proxy)); }