public virtual void TestBlockIdGeneration() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); try { cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); // Create a file that is 10 blocks long. Path path = new Path("testBlockIdGeneration.dat"); DFSTestUtil.CreateFile(fs, path, IoSize, BlockSize * 10, BlockSize, Replication, Seed); IList <LocatedBlock> blocks = DFSTestUtil.GetAllBlocks(fs, path); Log.Info("Block0 id is " + blocks[0].GetBlock().GetBlockId()); long nextBlockExpectedId = blocks[0].GetBlock().GetBlockId() + 1; // Ensure that the block IDs are sequentially increasing. for (int i = 1; i < blocks.Count; ++i) { long nextBlockId = blocks[i].GetBlock().GetBlockId(); Log.Info("Block" + i + " id is " + nextBlockId); Assert.AssertThat(nextBlockId, CoreMatchers.Is(nextBlockExpectedId)); ++nextBlockExpectedId; } } finally { cluster.Shutdown(); } }
public virtual void TestReadURL() { HttpURLConnection conn = Org.Mockito.Mockito.Mock <HttpURLConnection>(); Org.Mockito.Mockito.DoReturn(new ByteArrayInputStream(FakeLogData)).When(conn).GetInputStream (); Org.Mockito.Mockito.DoReturn(HttpURLConnection.HttpOk).When(conn).GetResponseCode (); Org.Mockito.Mockito.DoReturn(Sharpen.Extensions.ToString(FakeLogData.Length)).When (conn).GetHeaderField("Content-Length"); URLConnectionFactory factory = Org.Mockito.Mockito.Mock <URLConnectionFactory>(); Org.Mockito.Mockito.DoReturn(conn).When(factory).OpenConnection(Org.Mockito.Mockito .Any <Uri>(), Matchers.AnyBoolean()); Uri url = new Uri("http://localhost/fakeLog"); EditLogInputStream elis = EditLogFileInputStream.FromUrl(factory, url, HdfsConstants .InvalidTxid, HdfsConstants.InvalidTxid, false); // Read the edit log and verify that we got all of the data. EnumMap <FSEditLogOpCodes, Holder <int> > counts = FSImageTestUtil.CountEditLogOpTypes (elis); Assert.AssertThat(counts[FSEditLogOpCodes.OpAdd].held, CoreMatchers.Is(1)); Assert.AssertThat(counts[FSEditLogOpCodes.OpSetGenstampV1].held, CoreMatchers.Is( 1)); Assert.AssertThat(counts[FSEditLogOpCodes.OpClose].held, CoreMatchers.Is(1)); // Check that length header was picked up. NUnit.Framework.Assert.AreEqual(FakeLogData.Length, elis.Length()); elis.Close(); }
/// <exception cref="System.IO.IOException"/> private LocatedBlock GetLocatedBlock() { LocatedBlocks locatedBlocks = client.GetLocatedBlocks(Path.ToString(), 0, BlockSize ); Assert.AssertThat(locatedBlocks.GetLocatedBlocks().Count, CoreMatchers.Is(1)); return(Iterables.GetOnlyElement(locatedBlocks.GetLocatedBlocks())); }
/// <exception cref="System.Exception"/> public virtual void TestDataDirParsing() { Configuration conf = new Configuration(); IList <StorageLocation> locations; FilePath dir0 = new FilePath("/dir0"); FilePath dir1 = new FilePath("/dir1"); FilePath dir2 = new FilePath("/dir2"); FilePath dir3 = new FilePath("/dir3"); FilePath dir4 = new FilePath("/dir4"); // Verify that a valid string is correctly parsed, and that storage // type is not case-sensitive string locations1 = "[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3,[ram_disk]/dir4"; conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, locations1); locations = DataNode.GetStorageLocations(conf); Assert.AssertThat(locations.Count, CoreMatchers.Is(5)); Assert.AssertThat(locations[0].GetStorageType(), CoreMatchers.Is(StorageType.Disk )); Assert.AssertThat(locations[0].GetUri(), CoreMatchers.Is(dir0.ToURI())); Assert.AssertThat(locations[1].GetStorageType(), CoreMatchers.Is(StorageType.Disk )); Assert.AssertThat(locations[1].GetUri(), CoreMatchers.Is(dir1.ToURI())); Assert.AssertThat(locations[2].GetStorageType(), CoreMatchers.Is(StorageType.Ssd) ); Assert.AssertThat(locations[2].GetUri(), CoreMatchers.Is(dir2.ToURI())); Assert.AssertThat(locations[3].GetStorageType(), CoreMatchers.Is(StorageType.Disk )); Assert.AssertThat(locations[3].GetUri(), CoreMatchers.Is(dir3.ToURI())); Assert.AssertThat(locations[4].GetStorageType(), CoreMatchers.Is(StorageType.RamDisk )); Assert.AssertThat(locations[4].GetUri(), CoreMatchers.Is(dir4.ToURI())); // Verify that an unrecognized storage type result in an exception. string locations2 = "[BadMediaType]/dir0,[ssd]/dir1,[disk]/dir2"; conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, locations2); try { locations = DataNode.GetStorageLocations(conf); NUnit.Framework.Assert.Fail(); } catch (ArgumentException iae) { DataNode.Log.Info("The exception is expected.", iae); } // Assert that a string with no storage type specified is // correctly parsed and the default storage type is picked up. string locations3 = "/dir0,/dir1"; conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, locations3); locations = DataNode.GetStorageLocations(conf); Assert.AssertThat(locations.Count, CoreMatchers.Is(2)); Assert.AssertThat(locations[0].GetStorageType(), CoreMatchers.Is(StorageType.Disk )); Assert.AssertThat(locations[0].GetUri(), CoreMatchers.Is(dir0.ToURI())); Assert.AssertThat(locations[1].GetStorageType(), CoreMatchers.Is(StorageType.Disk )); Assert.AssertThat(locations[1].GetUri(), CoreMatchers.Is(dir1.ToURI())); }
private void ValidateStorageState(StorageReport[] storageReports, DatanodeStorage.State state) { foreach (StorageReport storageReport in storageReports) { DatanodeStorage storage = storageReport.GetStorage(); Assert.AssertThat(storage.GetState(), CoreMatchers.Is(state)); } }
public virtual void TestReadOnlyReplicaCorrupt() { // "Corrupt" a READ_ONLY_SHARED replica by reporting it as a bad replica client.ReportBadBlocks(new LocatedBlock[] { new LocatedBlock(extendedBlock, new DatanodeInfo [] { readOnlyDataNode }) }); // There should now be only 1 *location* for the block as the READ_ONLY_SHARED is corrupt WaitForLocations(1); // However, the corrupt READ_ONLY_SHARED replica should *not* affect the overall corrupt replicas count NumberReplicas numberReplicas = blockManager.CountNodes(block); Assert.AssertThat(numberReplicas.CorruptReplicas(), CoreMatchers.Is(0)); }
/// <exception cref="System.IO.IOException"/> private void ValidateNumberReplicas(int expectedReplicas) { NumberReplicas numberReplicas = blockManager.CountNodes(block); Assert.AssertThat(numberReplicas.LiveReplicas(), CoreMatchers.Is(expectedReplicas )); Assert.AssertThat(numberReplicas.ExcessReplicas(), CoreMatchers.Is(0)); Assert.AssertThat(numberReplicas.CorruptReplicas(), CoreMatchers.Is(0)); Assert.AssertThat(numberReplicas.DecommissionedReplicas(), CoreMatchers.Is(0)); Assert.AssertThat(numberReplicas.ReplicasOnStaleNodes(), CoreMatchers.Is(0)); BlockManagerTestUtil.UpdateState(blockManager); Assert.AssertThat(blockManager.GetUnderReplicatedBlocksCount(), CoreMatchers.Is(0L )); Assert.AssertThat(blockManager.GetExcessBlocksCount(), CoreMatchers.Is(0L)); }
public virtual void Setup() { conf = new HdfsConfiguration(); SimulatedFSDataset.SetFactory(conf); Configuration[] overlays = new Configuration[NumDatanodes]; for (int i = 0; i < overlays.Length; i++) { overlays[i] = new Configuration(); if (i == RoNodeIndex) { overlays[i].SetEnum(SimulatedFSDataset.ConfigPropertyState, i == RoNodeIndex ? DatanodeStorage.State .ReadOnlyShared : DatanodeStorage.State.Normal); } } cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).DataNodeConfOverlays (overlays).Build(); fs = cluster.GetFileSystem(); blockManager = cluster.GetNameNode().GetNamesystem().GetBlockManager(); datanodeManager = blockManager.GetDatanodeManager(); client = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), cluster .GetConfiguration(0)); for (int i_1 = 0; i_1 < NumDatanodes; i_1++) { DataNode dataNode = cluster.GetDataNodes()[i_1]; ValidateStorageState(BlockManagerTestUtil.GetStorageReportsForDatanode(datanodeManager .GetDatanode(dataNode.GetDatanodeId())), i_1 == RoNodeIndex ? DatanodeStorage.State .ReadOnlyShared : DatanodeStorage.State.Normal); } // Create a 1 block file DFSTestUtil.CreateFile(fs, Path, BlockSize, BlockSize, BlockSize, (short)1, seed); LocatedBlock locatedBlock = GetLocatedBlock(); extendedBlock = locatedBlock.GetBlock(); block = extendedBlock.GetLocalBlock(); Assert.AssertThat(locatedBlock.GetLocations().Length, CoreMatchers.Is(1)); normalDataNode = locatedBlock.GetLocations()[0]; readOnlyDataNode = datanodeManager.GetDatanode(cluster.GetDataNodes()[RoNodeIndex ].GetDatanodeId()); Assert.AssertThat(normalDataNode, CoreMatchers.Is(CoreMatchers.Not(readOnlyDataNode ))); ValidateNumberReplicas(1); // Inject the block into the datanode with READ_ONLY_SHARED storage cluster.InjectBlocks(0, RoNodeIndex, Collections.Singleton(block)); // There should now be 2 *locations* for the block // Must wait until the NameNode has processed the block report for the injected blocks WaitForLocations(2); }
public virtual void TestGenerationStampUpdate() { // Setup a mock object and stub out a few routines to // retrieve the generation stamp counters. BlockIdManager bid = Org.Mockito.Mockito.Mock <BlockIdManager>(); long nextGenerationStampV1 = 5000; long nextGenerationStampV2 = 20000; Org.Mockito.Mockito.When(bid.GetNextGenerationStampV1()).ThenReturn(nextGenerationStampV1 ); Org.Mockito.Mockito.When(bid.GetNextGenerationStampV2()).ThenReturn(nextGenerationStampV2 ); // Make sure that the generation stamp is set correctly for both // kinds of blocks. Org.Mockito.Mockito.When(bid.NextGenerationStamp(AnyBoolean())).ThenCallRealMethod (); Assert.AssertThat(bid.NextGenerationStamp(true), CoreMatchers.Is(nextGenerationStampV1 )); Assert.AssertThat(bid.NextGenerationStamp(false), CoreMatchers.Is(nextGenerationStampV2 )); }
/// <exception cref="System.Exception"/> public virtual void TestDataDirValidation() { DataNode.DataNodeDiskChecker diskChecker = Org.Mockito.Mockito.Mock <DataNode.DataNodeDiskChecker >(); Org.Mockito.Mockito.DoThrow(new IOException()).DoThrow(new IOException()).DoNothing ().When(diskChecker).CheckDir(Any <LocalFileSystem>(), Any <Path>()); LocalFileSystem fs = Org.Mockito.Mockito.Mock <LocalFileSystem>(); AbstractList <StorageLocation> locations = new AList <StorageLocation>(); locations.AddItem(StorageLocation.Parse("file:/p1/")); locations.AddItem(StorageLocation.Parse("file:/p2/")); locations.AddItem(StorageLocation.Parse("file:/p3/")); IList <StorageLocation> checkedLocations = DataNode.CheckStorageLocations(locations , fs, diskChecker); NUnit.Framework.Assert.AreEqual("number of valid data dirs", 1, checkedLocations. Count); string validDir = checkedLocations.GetEnumerator().Next().GetFile().GetPath(); Assert.AssertThat("p3 should be valid", new FilePath("/p3/").GetPath(), CoreMatchers.Is (validDir)); }
public virtual void TestTriggerBlockIdCollision() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); try { cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); FSNamesystem fsn = cluster.GetNamesystem(); int blockCount = 10; // Create a file with a few blocks to rev up the global block ID // counter. Path path1 = new Path("testBlockIdCollisionDetection_file1.dat"); DFSTestUtil.CreateFile(fs, path1, IoSize, BlockSize * blockCount, BlockSize, Replication , Seed); IList <LocatedBlock> blocks1 = DFSTestUtil.GetAllBlocks(fs, path1); // Rewind the block ID counter in the name system object. This will result // in block ID collisions when we try to allocate new blocks. SequentialBlockIdGenerator blockIdGenerator = fsn.GetBlockIdManager().GetBlockIdGenerator (); blockIdGenerator.SetCurrentValue(blockIdGenerator.GetCurrentValue() - 5); // Trigger collisions by creating a new file. Path path2 = new Path("testBlockIdCollisionDetection_file2.dat"); DFSTestUtil.CreateFile(fs, path2, IoSize, BlockSize * blockCount, BlockSize, Replication , Seed); IList <LocatedBlock> blocks2 = DFSTestUtil.GetAllBlocks(fs, path2); Assert.AssertThat(blocks2.Count, CoreMatchers.Is(blockCount)); // Make sure that file2 block IDs start immediately after file1 Assert.AssertThat(blocks2[0].GetBlock().GetBlockId(), CoreMatchers.Is(blocks1[9]. GetBlock().GetBlockId() + 1)); } finally { cluster.Shutdown(); } }
public virtual void TestNormalReplicaOffline() { // Stop the datanode hosting the NORMAL replica cluster.StopDataNode(normalDataNode.GetXferAddr()); // Force NameNode to detect that the datanode is down BlockManagerTestUtil.NoticeDeadDatanode(cluster.GetNameNode(), normalDataNode.GetXferAddr ()); // The live replica count should now be zero (since the NORMAL replica is offline) NumberReplicas numberReplicas = blockManager.CountNodes(block); Assert.AssertThat(numberReplicas.LiveReplicas(), CoreMatchers.Is(0)); // The block should be reported as under-replicated BlockManagerTestUtil.UpdateState(blockManager); Assert.AssertThat(blockManager.GetUnderReplicatedBlocksCount(), CoreMatchers.Is(1L )); // The BlockManager should be able to heal the replication count back to 1 // by triggering an inter-datanode replication from one of the READ_ONLY_SHARED replicas BlockManagerTestUtil.ComputeAllPendingWork(blockManager); DFSTestUtil.WaitForReplication(cluster, extendedBlock, 1, 1, 0); // There should now be 2 *locations* for the block, and 1 *replica* Assert.AssertThat(GetLocatedBlock().GetLocations().Length, CoreMatchers.Is(2)); ValidateNumberReplicas(1); }
public virtual void TestBlockTypeDetection() { // Setup a mock object and stub out a few routines to // retrieve the generation stamp counters. BlockIdManager bid = Org.Mockito.Mockito.Mock <BlockIdManager>(); long maxGenStampForLegacyBlocks = 10000; Org.Mockito.Mockito.When(bid.GetGenerationStampV1Limit()).ThenReturn(maxGenStampForLegacyBlocks ); Block legacyBlock = Org.Mockito.Mockito.Spy(new Block()); Org.Mockito.Mockito.When(legacyBlock.GetGenerationStamp()).ThenReturn(maxGenStampForLegacyBlocks / 2); Block newBlock = Org.Mockito.Mockito.Spy(new Block()); Org.Mockito.Mockito.When(newBlock.GetGenerationStamp()).ThenReturn(maxGenStampForLegacyBlocks + 1); // Make sure that isLegacyBlock() can correctly detect // legacy and new blocks. Org.Mockito.Mockito.When(bid.IsLegacyBlock(Any <Block>())).ThenCallRealMethod(); Assert.AssertThat(bid.IsLegacyBlock(legacyBlock), CoreMatchers.Is(true)); Assert.AssertThat(bid.IsLegacyBlock(newBlock), CoreMatchers.Is(false)); }
/// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> private void WaitForLocations(int locations) { for (int tries = 0; tries < Retries;) { try { LocatedBlock locatedBlock = GetLocatedBlock(); Assert.AssertThat(locatedBlock.GetLocations().Length, CoreMatchers.Is(locations)); break; } catch (Exception e) { if (++tries < Retries) { Sharpen.Thread.Sleep(1000); } else { throw; } } } }
internal virtual void Compare(DatanodeStorage dns1, DatanodeStorage dns2) { Assert.AssertThat(dns2.GetStorageID(), CoreMatchers.Is(dns1.GetStorageID())); Assert.AssertThat(dns2.GetState(), CoreMatchers.Is(dns1.GetState())); Assert.AssertThat(dns2.GetStorageType(), CoreMatchers.Is(dns1.GetStorageType())); }
/// <exception cref="System.IO.IOException"/> public virtual void TestTaskCreateWithLimits() { // Generate a unique job id string jobId = string.Format("%f", Math.Random()); // Run a task without any options string @out = Shell.ExecCommand(Shell.Winutils, "task", "create", "job" + jobId, "cmd /c echo job" + jobId); Assert.True(@out.Trim().Equals("job" + jobId)); // Run a task without any limits jobId = string.Format("%f", Math.Random()); @out = Shell.ExecCommand(Shell.Winutils, "task", "create", "-c", "-1", "-m", "-1" , "job" + jobId, "cmd /c echo job" + jobId); Assert.True(@out.Trim().Equals("job" + jobId)); // Run a task with limits (128MB should be enough for a cmd) jobId = string.Format("%f", Math.Random()); @out = Shell.ExecCommand(Shell.Winutils, "task", "create", "-c", "10000", "-m", "128" , "job" + jobId, "cmd /c echo job" + jobId); Assert.True(@out.Trim().Equals("job" + jobId)); // Run a task without enough memory try { jobId = string.Format("%f", Math.Random()); @out = Shell.ExecCommand(Shell.Winutils, "task", "create", "-m", "128", "job" + jobId , "java -Xmx256m -version"); NUnit.Framework.Assert.Fail("Failed to get Shell.ExitCodeException with insufficient memory" ); } catch (Shell.ExitCodeException ece) { Assert.AssertThat(ece.GetExitCode(), CoreMatchers.Is(1)); } // Run tasks with wrong parameters // try { jobId = string.Format("%f", Math.Random()); Shell.ExecCommand(Shell.Winutils, "task", "create", "-c", "-1", "-m", "-1", "foo" , "job" + jobId, "cmd /c echo job" + jobId); NUnit.Framework.Assert.Fail("Failed to get Shell.ExitCodeException with bad parameters" ); } catch (Shell.ExitCodeException ece) { Assert.AssertThat(ece.GetExitCode(), CoreMatchers.Is(1639)); } try { jobId = string.Format("%f", Math.Random()); Shell.ExecCommand(Shell.Winutils, "task", "create", "-c", "-m", "-1", "job" + jobId , "cmd /c echo job" + jobId); NUnit.Framework.Assert.Fail("Failed to get Shell.ExitCodeException with bad parameters" ); } catch (Shell.ExitCodeException ece) { Assert.AssertThat(ece.GetExitCode(), CoreMatchers.Is(1639)); } try { jobId = string.Format("%f", Math.Random()); Shell.ExecCommand(Shell.Winutils, "task", "create", "-c", "foo", "job" + jobId, "cmd /c echo job" + jobId); NUnit.Framework.Assert.Fail("Failed to get Shell.ExitCodeException with bad parameters" ); } catch (Shell.ExitCodeException ece) { Assert.AssertThat(ece.GetExitCode(), CoreMatchers.Is(1639)); } }
/// <exception cref="System.IO.IOException"/> public virtual void TestReadLink() { // Create TEST_DIR\dir1\file1.txt // FilePath dir1 = new FilePath(TestDir, "dir1"); Assert.True(dir1.Mkdirs()); FilePath file1 = new FilePath(dir1, "file1.txt"); Assert.True(file1.CreateNewFile()); FilePath dirLink = new FilePath(TestDir, "dlink"); FilePath fileLink = new FilePath(TestDir, "flink"); // Next create a directory symlink to dir1 and a file // symlink to file1.txt. // Shell.ExecCommand(Shell.Winutils, "symlink", dirLink.ToString(), dir1.ToString()); Shell.ExecCommand(Shell.Winutils, "symlink", fileLink.ToString(), file1.ToString( )); // Read back the two links and ensure we get what we expected. // string readLinkOutput = Shell.ExecCommand(Shell.Winutils, "readlink", dirLink.ToString ()); Assert.AssertThat(readLinkOutput, CoreMatchers.EqualTo(dir1.ToString())); readLinkOutput = Shell.ExecCommand(Shell.Winutils, "readlink", fileLink.ToString( )); Assert.AssertThat(readLinkOutput, CoreMatchers.EqualTo(file1.ToString())); // Try a few invalid inputs and verify we get an ExitCodeException for each. // try { // No link name specified. // Shell.ExecCommand(Shell.Winutils, "readlink", string.Empty); NUnit.Framework.Assert.Fail("Failed to get Shell.ExitCodeException when reading bad symlink" ); } catch (Shell.ExitCodeException ece) { Assert.AssertThat(ece.GetExitCode(), CoreMatchers.Is(1)); } try { // Bad link name. // Shell.ExecCommand(Shell.Winutils, "readlink", "ThereIsNoSuchLink"); NUnit.Framework.Assert.Fail("Failed to get Shell.ExitCodeException when reading bad symlink" ); } catch (Shell.ExitCodeException ece) { Assert.AssertThat(ece.GetExitCode(), CoreMatchers.Is(1)); } try { // Non-symlink directory target. // Shell.ExecCommand(Shell.Winutils, "readlink", dir1.ToString()); NUnit.Framework.Assert.Fail("Failed to get Shell.ExitCodeException when reading bad symlink" ); } catch (Shell.ExitCodeException ece) { Assert.AssertThat(ece.GetExitCode(), CoreMatchers.Is(1)); } try { // Non-symlink file target. // Shell.ExecCommand(Shell.Winutils, "readlink", file1.ToString()); NUnit.Framework.Assert.Fail("Failed to get Shell.ExitCodeException when reading bad symlink" ); } catch (Shell.ExitCodeException ece) { Assert.AssertThat(ece.GetExitCode(), CoreMatchers.Is(1)); } try { // Too many parameters. // Shell.ExecCommand(Shell.Winutils, "readlink", "a", "b"); NUnit.Framework.Assert.Fail("Failed to get Shell.ExitCodeException with bad parameters" ); } catch (Shell.ExitCodeException ece) { Assert.AssertThat(ece.GetExitCode(), CoreMatchers.Is(1)); } }
/// <exception cref="System.IO.IOException"/> /// <exception cref="System.Exception"/> public virtual void TestGetReconfigureStatus() { ReconfigurationUtil ru = Org.Mockito.Mockito.Mock <ReconfigurationUtil>(); datanode.SetReconfigurationUtil(ru); IList <ReconfigurationUtil.PropertyChange> changes = new AList <ReconfigurationUtil.PropertyChange >(); FilePath newDir = new FilePath(cluster.GetDataDirectory(), "data_new"); newDir.Mkdirs(); changes.AddItem(new ReconfigurationUtil.PropertyChange(DFSConfigKeys.DfsDatanodeDataDirKey , newDir.ToString(), datanode.GetConf().Get(DFSConfigKeys.DfsDatanodeDataDirKey) )); changes.AddItem(new ReconfigurationUtil.PropertyChange("randomKey", "new123", "old456" )); Org.Mockito.Mockito.When(ru.ParseChangedProperties(Matchers.Any <Configuration>(), Matchers.Any <Configuration>())).ThenReturn(changes); int port = datanode.GetIpcPort(); string address = "localhost:" + port; Assert.AssertThat(admin.StartReconfiguration("datanode", address), CoreMatchers.Is (0)); IList <string> outputs = null; int count = 100; while (count > 0) { outputs = GetReconfigureStatus("datanode", address); if (!outputs.IsEmpty() && outputs[0].Contains("finished")) { break; } count--; Sharpen.Thread.Sleep(100); } NUnit.Framework.Assert.IsTrue(count > 0); Assert.AssertThat(outputs.Count, CoreMatchers.Is(8)); // 3 (SUCCESS) + 4 (FAILED) IList <StorageLocation> locations = DataNode.GetStorageLocations(datanode.GetConf( )); Assert.AssertThat(locations.Count, CoreMatchers.Is(1)); Assert.AssertThat(locations[0].GetFile(), CoreMatchers.Is(newDir)); // Verify the directory is appropriately formatted. NUnit.Framework.Assert.IsTrue(new FilePath(newDir, Storage.StorageDirCurrent).IsDirectory ()); int successOffset = outputs[1].StartsWith("SUCCESS:") ? 1 : 5; int failedOffset = outputs[1].StartsWith("FAILED:") ? 1 : 4; Assert.AssertThat(outputs[successOffset], CoreMatchers.ContainsString("Change property " + DFSConfigKeys.DfsDatanodeDataDirKey)); Assert.AssertThat(outputs[successOffset + 1], CoreMatchers.Is(CoreMatchers.AllOf( CoreMatchers.ContainsString("From:"), CoreMatchers.ContainsString("data1"), CoreMatchers.ContainsString ("data2")))); Assert.AssertThat(outputs[successOffset + 2], CoreMatchers.Is(CoreMatchers.Not(CoreMatchers.AnyOf (CoreMatchers.ContainsString("data1"), CoreMatchers.ContainsString("data2"))))); Assert.AssertThat(outputs[successOffset + 2], CoreMatchers.Is(CoreMatchers.AllOf( CoreMatchers.ContainsString("To"), CoreMatchers.ContainsString("data_new")))); Assert.AssertThat(outputs[failedOffset], CoreMatchers.ContainsString("Change property randomKey" )); Assert.AssertThat(outputs[failedOffset + 1], CoreMatchers.ContainsString("From: \"old456\"" )); Assert.AssertThat(outputs[failedOffset + 2], CoreMatchers.ContainsString("To: \"new123\"" )); }
/// <summary> /// Test that we cannot read a file beyond its snapshot length /// when accessing it via a snapshot path. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestSnapshotfileLength() { hdfs.Mkdirs(sub); int bytesRead; byte[] buffer = new byte[Blocksize * 8]; int origLen = Blocksize + 1; int toAppend = Blocksize; FSDataInputStream fis = null; FileStatus fileStatus = null; // Create and write a file. Path file1 = new Path(sub, file1Name); DFSTestUtil.CreateFile(hdfs, file1, Blocksize, 0, Blocksize, Replication, Seed); DFSTestUtil.AppendFile(hdfs, file1, origLen); // Create a snapshot on the parent directory. hdfs.AllowSnapshot(sub); hdfs.CreateSnapshot(sub, snapshot1); Path file1snap1 = SnapshotTestHelper.GetSnapshotPath(sub, snapshot1, file1Name); FileChecksum snapChksum1 = hdfs.GetFileChecksum(file1snap1); Assert.AssertThat("file and snapshot file checksums are not equal", hdfs.GetFileChecksum (file1), CoreMatchers.Is(snapChksum1)); // Append to the file. FSDataOutputStream @out = hdfs.Append(file1); // Nothing has been appended yet. All checksums should still be equal. Assert.AssertThat("file and snapshot checksums (open for append) are not equal", hdfs.GetFileChecksum(file1), CoreMatchers.Is(snapChksum1)); Assert.AssertThat("snapshot checksum (post-open for append) has changed", hdfs.GetFileChecksum (file1snap1), CoreMatchers.Is(snapChksum1)); try { AppendTestUtil.Write(@out, 0, toAppend); // Test reading from snapshot of file that is open for append byte[] dataFromSnapshot = DFSTestUtil.ReadFileBuffer(hdfs, file1snap1); Assert.AssertThat("Wrong data size in snapshot.", dataFromSnapshot.Length, CoreMatchers.Is (origLen)); // Verify that checksum didn't change Assert.AssertThat("snapshot file checksum (pre-close) has changed", hdfs.GetFileChecksum (file1), CoreMatchers.Is(snapChksum1)); Assert.AssertThat("snapshot checksum (post-append) has changed", hdfs.GetFileChecksum (file1snap1), CoreMatchers.Is(snapChksum1)); } finally { @out.Close(); } Assert.AssertThat("file and snapshot file checksums (post-close) are equal", hdfs .GetFileChecksum(file1), CoreMatchers.Not(snapChksum1)); Assert.AssertThat("snapshot file checksum (post-close) has changed", hdfs.GetFileChecksum (file1snap1), CoreMatchers.Is(snapChksum1)); // Make sure we can read the entire file via its non-snapshot path. fileStatus = hdfs.GetFileStatus(file1); Assert.AssertThat(fileStatus.GetLen(), CoreMatchers.Is((long)origLen + toAppend)); fis = hdfs.Open(file1); bytesRead = fis.Read(0, buffer, 0, buffer.Length); Assert.AssertThat(bytesRead, CoreMatchers.Is(origLen + toAppend)); fis.Close(); // Try to open the file via its snapshot path. fis = hdfs.Open(file1snap1); fileStatus = hdfs.GetFileStatus(file1snap1); Assert.AssertThat(fileStatus.GetLen(), CoreMatchers.Is((long)origLen)); // Make sure we can only read up to the snapshot length. bytesRead = fis.Read(0, buffer, 0, buffer.Length); Assert.AssertThat(bytesRead, CoreMatchers.Is(origLen)); fis.Close(); byte[] dataFromSnapshot_1 = DFSTestUtil.ReadFileBuffer(hdfs, file1snap1); Assert.AssertThat("Wrong data size in snapshot.", dataFromSnapshot_1.Length, CoreMatchers.Is (origLen)); }