public virtual void TestAddVolumeFailureReleasesInUseLock() { FsDatasetImpl spyDataset = Org.Mockito.Mockito.Spy(dataset); FsVolumeImpl mockVolume = Org.Mockito.Mockito.Mock <FsVolumeImpl>(); FilePath badDir = new FilePath(BaseDir, "bad"); badDir.Mkdirs(); Org.Mockito.Mockito.DoReturn(mockVolume).When(spyDataset).CreateFsVolume(Matchers.AnyString (), Matchers.Any <FilePath>(), Matchers.Any <StorageType>()); Org.Mockito.Mockito.DoThrow(new IOException("Failed to getVolumeMap()")).When(mockVolume ).GetVolumeMap(Matchers.AnyString(), Matchers.Any <ReplicaMap>(), Matchers.Any <RamDiskReplicaLruTracker >()); Storage.StorageDirectory sd = CreateStorageDirectory(badDir); sd.Lock(); DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd); Org.Mockito.Mockito.When(storage.PrepareVolume(Matchers.Eq(datanode), Matchers.Eq (badDir.GetAbsoluteFile()), Matchers.Any <IList <NamespaceInfo> >())).ThenReturn(builder ); StorageLocation location = StorageLocation.Parse(badDir.ToString()); IList <NamespaceInfo> nsInfos = Lists.NewArrayList(); foreach (string bpid in BlockPoolIds) { nsInfos.AddItem(new NamespaceInfo(0, ClusterId, bpid, 1)); } try { spyDataset.AddVolume(location, nsInfos); NUnit.Framework.Assert.Fail("Expect to throw MultipleIOException"); } catch (MultipleIOException) { } FsDatasetTestUtil.AssertFileLockReleased(badDir.ToString()); }
public virtual void TestDnRestartWithUnsavedReplicas() { StartUpCluster(true, 1); FsDatasetTestUtil.StopLazyWriter(cluster.GetDataNodes()[0]); string MethodName = GenericTestUtils.GetMethodName(); Path path1 = new Path("/" + MethodName + ".01.dat"); MakeTestFile(path1, BlockSize, true); EnsureFileReplicasOnStorageType(path1, StorageType.RamDisk); Log.Info("Restarting the DataNode"); cluster.RestartDataNode(0, true); cluster.WaitActive(); // Ensure that the replica is still on transient storage. EnsureFileReplicasOnStorageType(path1, StorageType.RamDisk); }
public virtual void TestDeleteBeforePersist() { StartUpCluster(true, -1); string MethodName = GenericTestUtils.GetMethodName(); FsDatasetTestUtil.StopLazyWriter(cluster.GetDataNodes()[0]); Path path = new Path("/" + MethodName + ".dat"); MakeTestFile(path, BlockSize, true); LocatedBlocks locatedBlocks = EnsureFileReplicasOnStorageType(path, StorageType.RamDisk ); // Delete before persist client.Delete(path.ToString(), false); NUnit.Framework.Assert.IsFalse(fs.Exists(path)); Assert.AssertThat(VerifyDeletedBlocks(locatedBlocks), IS.Is(true)); VerifyRamDiskJMXMetric("RamDiskBlocksDeletedBeforeLazyPersisted", 1); }
public virtual void TestRamDiskEvictionBeforePersist() { StartUpCluster(true, 1); string MethodName = GenericTestUtils.GetMethodName(); Path path1 = new Path("/" + MethodName + ".01.dat"); Path path2 = new Path("/" + MethodName + ".02.dat"); int Seed = 0XFADED; // Stop lazy writer to ensure block for path1 is not persisted to disk. FsDatasetTestUtil.StopLazyWriter(cluster.GetDataNodes()[0]); MakeRandomTestFile(path1, BlockSize, true, Seed); EnsureFileReplicasOnStorageType(path1, StorageType.RamDisk); // Create second file with a replica on RAM_DISK. MakeTestFile(path2, BlockSize, true); // Eviction should not happen for block of the first file that is not // persisted yet. EnsureFileReplicasOnStorageType(path1, StorageType.RamDisk); EnsureFileReplicasOnStorageType(path2, StorageType.Default); System.Diagnostics.Debug.Assert((fs.Exists(path1))); System.Diagnostics.Debug.Assert((fs.Exists(path2))); NUnit.Framework.Assert.IsTrue(VerifyReadRandomFile(path1, BlockSize, Seed)); }
public virtual void TestUpdateReplicaUnderRecovery() { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); cluster.WaitActive(); string bpid = cluster.GetNamesystem().GetBlockPoolId(); //create a file DistributedFileSystem dfs = cluster.GetFileSystem(); string filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.CreateFile(dfs, filepath, 1024L, (short)3, 0L); //get block info LocatedBlock locatedblock = GetLastLocatedBlock(DFSClientAdapter.GetDFSClient(dfs ).GetNamenode(), filestr); DatanodeInfo[] datanodeinfo = locatedblock.GetLocations(); NUnit.Framework.Assert.IsTrue(datanodeinfo.Length > 0); //get DataNode and FSDataset objects DataNode datanode = cluster.GetDataNode(datanodeinfo[0].GetIpcPort()); NUnit.Framework.Assert.IsTrue(datanode != null); //initReplicaRecovery ExtendedBlock b = locatedblock.GetBlock(); long recoveryid = b.GetGenerationStamp() + 1; long newlength = b.GetNumBytes() - 1; FsDatasetSpi <object> fsdataset = DataNodeTestUtils.GetFSDataset(datanode); ReplicaRecoveryInfo rri = fsdataset.InitReplicaRecovery(new BlockRecoveryCommand.RecoveringBlock (b, null, recoveryid)); //check replica ReplicaInfo replica = FsDatasetTestUtil.FetchReplicaInfo(fsdataset, bpid, b.GetBlockId ()); NUnit.Framework.Assert.AreEqual(HdfsServerConstants.ReplicaState.Rur, replica.GetState ()); //check meta data before update FsDatasetImpl.CheckReplicaFiles(replica); { //case "THIS IS NOT SUPPOSED TO HAPPEN" //with (block length) != (stored replica's on disk length). //create a block with same id and gs but different length. ExtendedBlock tmp = new ExtendedBlock(b.GetBlockPoolId(), rri.GetBlockId(), rri.GetNumBytes () - 1, rri.GetGenerationStamp()); try { //update should fail fsdataset.UpdateReplicaUnderRecovery(tmp, recoveryid, tmp.GetBlockId(), newlength ); NUnit.Framework.Assert.Fail(); } catch (IOException ioe) { System.Console.Out.WriteLine("GOOD: getting " + ioe); } } //update string storageID = fsdataset.UpdateReplicaUnderRecovery(new ExtendedBlock(b.GetBlockPoolId (), rri), recoveryid, rri.GetBlockId(), newlength); NUnit.Framework.Assert.IsTrue(storageID != null); } finally { if (cluster != null) { cluster.Shutdown(); } } }