public static void SetupCluster() { if (DomainSocket.GetLoadingFailureReason() != null) { return; } sockDir = new TemporarySocketDirectory(); HdfsConfiguration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), "TestParallelShortCircuitReadUnCached._PORT.sock" ).GetAbsolutePath()); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true); // Enabling data transfer encryption should have no effect when using // short-circuit local reads. This is a regression test for HDFS-5353. conf.SetBoolean(DFSConfigKeys.DfsEncryptDataTransferKey, true); conf.SetBoolean(DFSConfigKeys.DfsBlockAccessTokenEnableKey, true); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false); conf.SetBoolean(DFSConfigKeys.DfsClientDomainSocketDataTraffic, true); // We want to test reading from stale sockets. conf.SetInt(DFSConfigKeys.DfsDatanodeSocketReuseKeepaliveKey, 1); conf.SetLong(DFSConfigKeys.DfsClientSocketCacheExpiryMsecKey, 5 * 60 * 1000); conf.SetInt(DFSConfigKeys.DfsClientSocketCacheCapacityKey, 32); // Avoid using the FileInputStreamCache. conf.SetInt(DFSConfigKeys.DfsClientReadShortcircuitStreamsCacheSizeKey, 0); DomainSocket.DisableBindPathValidation(); DFSInputStream.tcpReadsDisabledForTesting = true; SetupCluster(1, conf); }
public virtual void TestCrcCorruption() { // // default parameters // System.Console.Out.WriteLine("TestCrcCorruption with default parameters"); Configuration conf1 = new HdfsConfiguration(); conf1.SetInt(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 3 * 1000); DFSTestUtil util1 = new DFSTestUtil.Builder().SetName("TestCrcCorruption").SetNumFiles (40).Build(); Thistest(conf1, util1); // // specific parameters // System.Console.Out.WriteLine("TestCrcCorruption with specific parameters"); Configuration conf2 = new HdfsConfiguration(); conf2.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, 17); conf2.SetInt(DFSConfigKeys.DfsBlockSizeKey, 34); DFSTestUtil util2 = new DFSTestUtil.Builder().SetName("TestCrcCorruption").SetNumFiles (40).SetMaxSize(400).Build(); Thistest(conf2, util2); }
/// <summary> /// Test that when access time updates are not needed, the FSNamesystem /// write lock is not taken by getBlockLocations. /// </summary> /// <remarks> /// Test that when access time updates are not needed, the FSNamesystem /// write lock is not taken by getBlockLocations. /// Regression test for HDFS-3981. /// </remarks> /// <exception cref="System.IO.IOException"/> public virtual void TestGetBlockLocationsOnlyUsesReadLock() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsNamenodeAccesstimePrecisionKey, 100 * 1000); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build(); ReentrantReadWriteLock spyLock = NameNodeAdapter.SpyOnFsLock(cluster.GetNamesystem ()); try { // Create empty file in the FSN. Path p = new Path("/empty-file"); DFSTestUtil.CreateFile(cluster.GetFileSystem(), p, 0, (short)1, 0L); // getBlockLocations() should not need the write lock, since we just created // the file (and thus its access time is already within the 100-second // accesstime precision configured above). MockitoUtil.DoThrowWhenCallStackMatches(new Exception("Should not need write lock" ), ".*getBlockLocations.*").When(spyLock).WriteLock(); cluster.GetFileSystem().GetFileBlockLocations(p, 0, 100); } finally { cluster.Shutdown(); } }
/// <exception cref="System.Exception"/> public BlockReaderTestUtil(int replicationFactor, HdfsConfiguration config) { this.conf = config; conf.SetInt(DFSConfigKeys.DfsReplicationKey, replicationFactor); cluster = new MiniDFSCluster.Builder(conf).Format(true).Build(); cluster.WaitActive(); }
public virtual void BlockLengthHintIsPropagated() { string MethodName = GenericTestUtils.GetMethodName(); Path path = new Path("/" + MethodName + ".dat"); Configuration conf = new HdfsConfiguration(); TestWriteBlockGetsBlockLengthHint.FsDatasetChecker.SetFactory(conf); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockLength); conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); try { cluster.WaitActive(); // FsDatasetChecker#createRbw asserts during block creation if the test // fails. DFSTestUtil.CreateFile(cluster.GetFileSystem(), path, 4096, ExpectedBlockLength, ExpectedBlockLength, (short)1, unchecked ((int)(0x1BAD5EED))); } finally { // Buffer size. cluster.Shutdown(); } }
public virtual void TestDisableCache() { HdfsConfiguration confWithoutCache = new HdfsConfiguration(); // Configure a new instance with no peer caching, ensure that it doesn't // cache anything confWithoutCache.SetInt(DFSConfigKeys.DfsClientSocketCacheCapacityKey, 0); BlockReaderTestUtil util = new BlockReaderTestUtil(1, confWithoutCache); Path testFile = new Path("/testConnCache.dat"); util.WriteFile(testFile, FileSize / 1024); FileSystem fsWithoutCache = FileSystem.NewInstance(util.GetConf()); try { DFSTestUtil.ReadFile(fsWithoutCache, testFile); NUnit.Framework.Assert.AreEqual(0, ((DistributedFileSystem)fsWithoutCache).dfs.GetClientContext ().GetPeerCache().Size()); } finally { fsWithoutCache.Close(); util.Shutdown(); } }
/// <exception cref="System.Exception"/> public virtual void TestWriteConf() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 4096); System.Console.Out.WriteLine("Setting conf in: " + Runtime.IdentityHashCode(conf) ); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); FileSystem fs = null; OutputStream os = null; try { fs = cluster.GetFileSystem(); Path filePath = new Path("/testWriteConf.xml"); os = fs.Create(filePath); StringBuilder longString = new StringBuilder(); for (int i = 0; i < 100000; i++) { longString.Append("hello"); } // 500KB conf.Set("foobar", longString.ToString()); conf.WriteXml(os); os.Close(); os = null; fs.Close(); fs = null; } finally { IOUtils.Cleanup(null, os, fs); cluster.Shutdown(); } }
/// <exception cref="System.Exception"/> private void DoTestFSOutputSummer(string checksumType) { Configuration conf = new HdfsConfiguration(); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize); conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, BytesPerChecksum); conf.Set(DFSConfigKeys.DfsChecksumTypeKey, checksumType); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumOfDatanodes ).Build(); fileSys = cluster.GetFileSystem(); try { Path file = new Path("try.dat"); Random rand = new Random(seed); rand.NextBytes(expected); WriteFile1(file); WriteFile2(file); WriteFile3(file); } finally { fileSys.Close(); cluster.Shutdown(); } }
public virtual void TestRestartDfs() { Configuration conf = new HdfsConfiguration(); // Turn off persistent IPC, so that the DFSClient can survive NN restart conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectionMaxidletimeKey, 0); MiniDFSCluster cluster = null; long len = 0; FSDataOutputStream stream; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); FileSystem fs = cluster.GetFileSystem(); // Creating a file with 4096 blockSize to write multiple blocks stream = fs.Create(FilePath, true, BlockSize, (short)1, BlockSize); stream.Write(DataBeforeRestart); stream.Hflush(); // Wait for at least a few blocks to get through while (len <= BlockSize) { FileStatus status = fs.GetFileStatus(FilePath); len = status.GetLen(); Sharpen.Thread.Sleep(100); } // explicitly do NOT close the file. cluster.RestartNameNode(); // Check that the file has no less bytes than before the restart // This would mean that blocks were successfully persisted to the log FileStatus status_1 = fs.GetFileStatus(FilePath); NUnit.Framework.Assert.IsTrue("Length too short: " + status_1.GetLen(), status_1. GetLen() >= len); // And keep writing (ensures that leases are also persisted correctly) stream.Write(DataAfterRestart); stream.Close(); // Verify that the data showed up, both from before and after the restart. FSDataInputStream readStream = fs.Open(FilePath); try { byte[] verifyBuf = new byte[DataBeforeRestart.Length]; IOUtils.ReadFully(readStream, verifyBuf, 0, verifyBuf.Length); Assert.AssertArrayEquals(DataBeforeRestart, verifyBuf); IOUtils.ReadFully(readStream, verifyBuf, 0, verifyBuf.Length); Assert.AssertArrayEquals(DataAfterRestart, verifyBuf); } finally { IOUtils.CloseStream(readStream); } } finally { if (cluster != null) { cluster.Shutdown(); } } }
public static void SetupCluster() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 4096); conf.Set("fs.hdfs.impl.disable.cache", "true"); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); fs = cluster.GetFileSystem(); }
public virtual void TestLeaseExpireEmptyFiles() { Sharpen.Thread.UncaughtExceptionHandler oldUEH = Sharpen.Thread.GetDefaultUncaughtExceptionHandler (); Sharpen.Thread.SetDefaultUncaughtExceptionHandler(new _UncaughtExceptionHandler_43 (this)); System.Console.Out.WriteLine("testLeaseExpireEmptyFiles start"); long leasePeriod = 1000; int DatanodeNum = 3; Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000); conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1); // create cluster MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeNum ).Build(); try { cluster.WaitActive(); DistributedFileSystem dfs = cluster.GetFileSystem(); // create a new file. TestFileCreation.CreateFile(dfs, new Path("/foo"), DatanodeNum); TestFileCreation.CreateFile(dfs, new Path("/foo2"), DatanodeNum); TestFileCreation.CreateFile(dfs, new Path("/foo3"), DatanodeNum); // set the soft and hard limit to be 1 second so that the // namenode triggers lease recovery cluster.SetLeasePeriod(leasePeriod, leasePeriod); // wait for the lease to expire try { Sharpen.Thread.Sleep(5 * leasePeriod); } catch (Exception) { } NUnit.Framework.Assert.IsFalse(isConcurrentModificationException); } finally { Sharpen.Thread.SetDefaultUncaughtExceptionHandler(oldUEH); cluster.Shutdown(); } }
public virtual void HSyncEndBlock_00() { int preferredBlockSize = 1024; Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, preferredBlockSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build(); DistributedFileSystem fileSystem = cluster.GetFileSystem(); FSDataOutputStream stm = null; try { Path path = new Path("/" + fName); stm = fileSystem.Create(path, true, 4096, (short)2, AppendTestUtil.BlockSize); System.Console.Out.WriteLine("Created file " + path.ToString()); ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag .EndBlock)); long currentFileLength = fileSystem.GetFileStatus(path).GetLen(); NUnit.Framework.Assert.AreEqual(0L, currentFileLength); LocatedBlocks blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0); NUnit.Framework.Assert.AreEqual(0, blocks.GetLocatedBlocks().Count); // write a block and call hsync(end_block) at the block boundary stm.Write(new byte[preferredBlockSize]); ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag .EndBlock)); currentFileLength = fileSystem.GetFileStatus(path).GetLen(); NUnit.Framework.Assert.AreEqual(preferredBlockSize, currentFileLength); blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0); NUnit.Framework.Assert.AreEqual(1, blocks.GetLocatedBlocks().Count); // call hsync then call hsync(end_block) immediately stm.Write(new byte[preferredBlockSize / 2]); stm.Hsync(); ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag .EndBlock)); currentFileLength = fileSystem.GetFileStatus(path).GetLen(); NUnit.Framework.Assert.AreEqual(preferredBlockSize + preferredBlockSize / 2, currentFileLength ); blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0); NUnit.Framework.Assert.AreEqual(2, blocks.GetLocatedBlocks().Count); stm.Write(new byte[preferredBlockSize / 4]); stm.Hsync(); currentFileLength = fileSystem.GetFileStatus(path).GetLen(); NUnit.Framework.Assert.AreEqual(preferredBlockSize + preferredBlockSize / 2 + preferredBlockSize / 4, currentFileLength); blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0); NUnit.Framework.Assert.AreEqual(3, blocks.GetLocatedBlocks().Count); } finally { IOUtils.Cleanup(null, stm, fileSystem); if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestDeprecatedKeys() { Configuration conf = new HdfsConfiguration(); conf.Set("topology.script.file.name", "xyz"); string scriptFile = conf.Get(DFSConfigKeys.NetTopologyScriptFileNameKey); NUnit.Framework.Assert.IsTrue(scriptFile.Equals("xyz")); conf.SetInt("dfs.replication.interval", 1); string alpha = DFSConfigKeys.DfsNamenodeReplicationIntervalKey; int repInterval = conf.GetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 3); NUnit.Framework.Assert.IsTrue(repInterval == 1); }
public static void SetUp() { ClearBaseDir(); Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsDatanodeHttpsPortKey, 0); conf.Set(DFSConfigKeys.DfsDatanodeAddressKey, "localhost:0"); conf.Set(DFSConfigKeys.DfsDatanodeIpcAddressKey, "localhost:0"); conf.Set(DFSConfigKeys.DfsDatanodeHttpAddressKey, "localhost:0"); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build(); cluster.WaitActive(); }
public virtual void HSyncEndBlock_02() { Configuration conf = new HdfsConfiguration(); int customPerChecksumSize = 512; int customBlockSize = customPerChecksumSize * 3; // Modify defaul filesystem settings conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, customPerChecksumSize); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, customBlockSize); DoTheJob(conf, fName, customBlockSize, (short)2, true, EnumSet.Of(HdfsDataOutputStream.SyncFlag .EndBlock)); }
public virtual void HFlush_03() { Configuration conf = new HdfsConfiguration(); int customPerChecksumSize = 400; int customBlockSize = customPerChecksumSize * 3; // Modify defaul filesystem settings conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, customPerChecksumSize); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, customBlockSize); DoTheJob(conf, fName, customBlockSize, (short)2, false, EnumSet.NoneOf <HdfsDataOutputStream.SyncFlag >()); }
public virtual void TestRestartWithPartialBlockHflushed() { Configuration conf = new HdfsConfiguration(); // Turn off persistent IPC, so that the DFSClient can survive NN restart conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectionMaxidletimeKey, 0); MiniDFSCluster cluster = null; FSDataOutputStream stream; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); FileSystem fs = cluster.GetFileSystem(); NameNode.GetAddress(conf).Port; // Creating a file with 4096 blockSize to write multiple blocks stream = fs.Create(FilePath, true, BlockSize, (short)1, BlockSize); stream.Write(DataBeforeRestart); stream.Write(unchecked ((byte)1)); stream.Hflush(); // explicitly do NOT close the file before restarting the NN. cluster.RestartNameNode(); // this will fail if the final block of the file is prematurely COMPLETEd stream.Write(unchecked ((byte)2)); stream.Hflush(); stream.Close(); NUnit.Framework.Assert.AreEqual(DataBeforeRestart.Length + 2, fs.GetFileStatus(FilePath ).GetLen()); FSDataInputStream readStream = fs.Open(FilePath); try { byte[] verifyBuf = new byte[DataBeforeRestart.Length + 2]; IOUtils.ReadFully(readStream, verifyBuf, 0, verifyBuf.Length); byte[] expectedBuf = new byte[DataBeforeRestart.Length + 2]; System.Array.Copy(DataBeforeRestart, 0, expectedBuf, 0, DataBeforeRestart.Length); System.Array.Copy(new byte[] { 1, 2 }, 0, expectedBuf, DataBeforeRestart.Length, 2); Assert.AssertArrayEquals(expectedBuf, verifyBuf); } finally { IOUtils.CloseStream(readStream); } } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestAppend2AfterSoftLimit() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1); //Set small soft-limit for lease long softLimit = 1L; long hardLimit = 9999999L; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.SetLeasePeriod(softLimit, hardLimit); cluster.WaitActive(); DistributedFileSystem fs = cluster.GetFileSystem(); DistributedFileSystem fs2 = new DistributedFileSystem(); fs2.Initialize(fs.GetUri(), conf); Path testPath = new Path("/testAppendAfterSoftLimit"); byte[] fileContents = AppendTestUtil.InitBuffer(32); // create a new file without closing FSDataOutputStream @out = fs.Create(testPath); @out.Write(fileContents); //Wait for > soft-limit Sharpen.Thread.Sleep(250); try { FSDataOutputStream appendStream2 = fs2.Append(testPath, EnumSet.Of(CreateFlag.Append , CreateFlag.NewBlock), 4096, null); appendStream2.Write(fileContents); appendStream2.Close(); NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen ()); // make sure we now have 1 block since the first writer was revoked LocatedBlocks blks = fs.GetClient().GetLocatedBlocks(testPath.ToString(), 0L); NUnit.Framework.Assert.AreEqual(1, blks.GetLocatedBlocks().Count); foreach (LocatedBlock blk in blks.GetLocatedBlocks()) { NUnit.Framework.Assert.AreEqual(fileContents.Length, blk.GetBlockSize()); } } finally { fs.Close(); fs2.Close(); cluster.Shutdown(); } }
/// <summary> /// Tests the fileLength when we sync the file and restart the cluster and /// Datanodes not report to Namenode yet. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestFileLengthWithHSyncAndClusterRestartWithOutDNsRegister() { Configuration conf = new HdfsConfiguration(); // create cluster conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build(); HdfsDataInputStream @in = null; try { Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test"); DistributedFileSystem dfs = cluster.GetFileSystem(); FSDataOutputStream @out = dfs.Create(path); int fileLength = 1030; @out.Write(new byte[fileLength]); @out.Hsync(); cluster.RestartNameNode(); cluster.WaitActive(); @in = (HdfsDataInputStream)dfs.Open(path, 1024); // Verify the length when we just restart NN. DNs will register // immediately. NUnit.Framework.Assert.AreEqual(fileLength, @in.GetVisibleLength()); cluster.ShutdownDataNodes(); cluster.RestartNameNode(false); // This is just for ensuring NN started. VerifyNNIsInSafeMode(dfs); try { @in = (HdfsDataInputStream)dfs.Open(path); NUnit.Framework.Assert.Fail("Expected IOException"); } catch (IOException e) { NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage().IndexOf("Name node is in safe mode" ) >= 0); } } finally { if (null != @in) { @in.Close(); } cluster.Shutdown(); } }
public virtual void TestPipelineHeartbeat() { int DatanodeNum = 2; int fileLen = 6; Configuration conf = new HdfsConfiguration(); int timeout = 2000; conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, timeout); Path p = new Path("/pipelineHeartbeat/foo"); System.Console.Out.WriteLine("p=" + p); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeNum ).Build(); try { DistributedFileSystem fs = cluster.GetFileSystem(); byte[] fileContents = AppendTestUtil.InitBuffer(fileLen); // create a new file. FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, p, DatanodeNum); stm.Write(fileContents, 0, 1); Sharpen.Thread.Sleep(timeout); stm.Hflush(); System.Console.Out.WriteLine("Wrote 1 byte and hflush " + p); // write another byte Sharpen.Thread.Sleep(timeout); stm.Write(fileContents, 1, 1); stm.Hflush(); stm.Write(fileContents, 2, 1); Sharpen.Thread.Sleep(timeout); stm.Hflush(); stm.Write(fileContents, 3, 1); Sharpen.Thread.Sleep(timeout); stm.Write(fileContents, 4, 1); stm.Hflush(); stm.Write(fileContents, 5, 1); Sharpen.Thread.Sleep(timeout); stm.Close(); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, p, fileLen, fileContents, "Failed to slowly write to a file" ); } finally { cluster.Shutdown(); } }
public virtual void TestRestartWithAppend() { Configuration conf = new HdfsConfiguration(); // Turn off persistent IPC, so that the DFSClient can survive NN restart conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectionMaxidletimeKey, 0); MiniDFSCluster cluster = null; FSDataOutputStream stream; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); FileSystem fs = cluster.GetFileSystem(); NameNode.GetAddress(conf).Port; // Creating a file with 4096 blockSize to write multiple blocks stream = fs.Create(FilePath, true, BlockSize, (short)1, BlockSize); stream.Write(DataBeforeRestart, 0, DataBeforeRestart.Length / 2); stream.Close(); stream = fs.Append(FilePath, BlockSize); stream.Write(DataBeforeRestart, DataBeforeRestart.Length / 2, DataBeforeRestart.Length / 2); stream.Close(); NUnit.Framework.Assert.AreEqual(DataBeforeRestart.Length, fs.GetFileStatus(FilePath ).GetLen()); cluster.RestartNameNode(); NUnit.Framework.Assert.AreEqual(DataBeforeRestart.Length, fs.GetFileStatus(FilePath ).GetLen()); FSDataInputStream readStream = fs.Open(FilePath); try { byte[] verifyBuf = new byte[DataBeforeRestart.Length]; IOUtils.ReadFully(readStream, verifyBuf, 0, verifyBuf.Length); Assert.AssertArrayEquals(DataBeforeRestart, verifyBuf); } finally { IOUtils.CloseStream(readStream); } } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestPipelineRecoveryForLastBlock() { DFSClientFaultInjector faultInjector = Org.Mockito.Mockito.Mock <DFSClientFaultInjector >(); DFSClientFaultInjector oldInjector = DFSClientFaultInjector.instance; DFSClientFaultInjector.instance = faultInjector; Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsClientBlockWriteLocatefollowingblockRetriesKey, 3); MiniDFSCluster cluster = null; try { int numDataNodes = 3; cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build(); cluster.WaitActive(); FileSystem fileSys = cluster.GetFileSystem(); Path file = new Path("dataprotocol1.dat"); Org.Mockito.Mockito.When(faultInjector.FailPacket()).ThenReturn(true); DFSTestUtil.CreateFile(fileSys, file, 68000000L, (short)numDataNodes, 0L); // At this point, NN should have accepted only valid replicas. // Read should succeed. FSDataInputStream @in = fileSys.Open(file); try { int c = @in.Read(); } catch (BlockMissingException) { // Test will fail with BlockMissingException if NN does not update the // replica state based on the latest report. NUnit.Framework.Assert.Fail("Block is missing because the file was closed with" + " corrupt replicas."); } } finally { DFSClientFaultInjector.instance = oldInjector; if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestAppendAfterSoftLimit() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1); conf.SetBoolean(DFSConfigKeys.DfsSupportAppendKey, true); //Set small soft-limit for lease long softLimit = 1L; long hardLimit = 9999999L; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.SetLeasePeriod(softLimit, hardLimit); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); FileSystem fs2 = new DistributedFileSystem(); fs2.Initialize(fs.GetUri(), conf); Path testPath = new Path("/testAppendAfterSoftLimit"); byte[] fileContents = AppendTestUtil.InitBuffer(32); // create a new file without closing FSDataOutputStream @out = fs.Create(testPath); @out.Write(fileContents); //Wait for > soft-limit Sharpen.Thread.Sleep(250); try { FSDataOutputStream appendStream2 = fs2.Append(testPath); appendStream2.Write(fileContents); appendStream2.Close(); NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen ()); } finally { fs.Close(); fs2.Close(); cluster.Shutdown(); } }
public virtual void TestFSInputChecker() { Configuration conf = new HdfsConfiguration(); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize); conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, BytesPerSum); rand.NextBytes(expected); // test DFS MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fileSys = cluster.GetFileSystem(); try { TestChecker(fileSys, true); TestChecker(fileSys, false); TestSeekAndRead(fileSys); } finally { fileSys.Close(); cluster.Shutdown(); } // test Local FS fileSys = FileSystem.GetLocal(conf); try { TestChecker(fileSys, true); TestChecker(fileSys, false); TestFileCorruption((LocalFileSystem)fileSys); TestSeekAndRead(fileSys); } finally { fileSys.Close(); } }
/// <exception cref="System.IO.IOException"/> private static HdfsConfiguration GetConfiguration(TemporarySocketDirectory socketDir ) { HdfsConfiguration conf = new HdfsConfiguration(); if (socketDir == null) { conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, string.Empty); } else { conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(socketDir.GetDir(), "TestBlockReaderLocalLegacy.%d.sock" ).GetAbsolutePath()); } conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true); conf.SetBoolean(DFSConfigKeys.DfsClientUseLegacyBlockreaderlocal, true); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false); conf.Set(DFSConfigKeys.DfsBlockLocalPathAccessUserKey, UserGroupInformation.GetCurrentUser ().GetShortUserName()); conf.SetBoolean(DFSConfigKeys.DfsClientDomainSocketDataTraffic, false); // Set short retry timeouts so this test runs faster conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10); return(conf); }
public virtual void TestInjection() { MiniDFSCluster cluster = null; string testFile = "/replication-test-file"; Path testPath = new Path(testFile); byte[] buffer = new byte[1024]; for (int i = 0; i < buffer.Length; i++) { buffer[i] = (byte)('1'); } try { Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsReplicationKey, Sharpen.Extensions.ToString(numDataNodes )); conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, checksumSize); SimulatedFSDataset.SetFactory(conf); //first time format cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build(); cluster.WaitActive(); string bpid = cluster.GetNamesystem().GetBlockPoolId(); DFSClient dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort ()), conf); WriteFile(cluster.GetFileSystem(), testPath, numDataNodes); WaitForBlockReplication(testFile, dfsClient.GetNamenode(), numDataNodes, 20); IList <IDictionary <DatanodeStorage, BlockListAsLongs> > blocksList = cluster.GetAllBlockReports (bpid); cluster.Shutdown(); cluster = null; /* Start the MiniDFSCluster with more datanodes since once a writeBlock * to a datanode node fails, same block can not be written to it * immediately. In our case some replication attempts will fail. */ Log.Info("Restarting minicluster"); conf = new HdfsConfiguration(); SimulatedFSDataset.SetFactory(conf); conf.Set(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey, "0.0f"); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes * 2).Format( false).Build(); cluster.WaitActive(); ICollection <Block> uniqueBlocks = new HashSet <Block>(); foreach (IDictionary <DatanodeStorage, BlockListAsLongs> map in blocksList) { foreach (BlockListAsLongs blockList in map.Values) { foreach (Block b in blockList) { uniqueBlocks.AddItem(new Block(b)); } } } // Insert all the blocks in the first data node Log.Info("Inserting " + uniqueBlocks.Count + " blocks"); cluster.InjectBlocks(0, uniqueBlocks, null); dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), conf); WaitForBlockReplication(testFile, dfsClient.GetNamenode(), numDataNodes, -1); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestSimpleAppend() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50); fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); try { { // test appending to a file. // create a new file. Path file1 = new Path("/simpleAppend.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); System.Console.Out.WriteLine("Created file simpleAppend.dat"); // write to file int mid = 186; // io.bytes.per.checksum bytes System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1); stm.Write(fileContents, 0, mid); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed first part of file."); // write to file int mid2 = 607; // io.bytes.per.checksum bytes System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1); stm = fs.Append(file1); stm.Write(fileContents, mid, mid2 - mid); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed second part of file."); // write the remainder of the file stm = fs.Append(file1); // ensure getPos is set to reflect existing size of the file NUnit.Framework.Assert.IsTrue(stm.GetPos() > 0); System.Console.Out.WriteLine("Writing " + (AppendTestUtil.FileSize - mid2) + " bytes to file " + file1); stm.Write(fileContents, mid2, AppendTestUtil.FileSize - mid2); System.Console.Out.WriteLine("Written second part of file"); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed second part of file."); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2" ); } { // test appending to an non-existing file. FSDataOutputStream @out = null; try { @out = fs.Append(new Path("/non-existing.dat")); NUnit.Framework.Assert.Fail("Expected to have FileNotFoundException"); } catch (FileNotFoundException fnfe) { System.Console.Out.WriteLine("Good: got " + fnfe); Sharpen.Runtime.PrintStackTrace(fnfe, System.Console.Out); } finally { IOUtils.CloseStream(@out); } } { // test append permission. //set root to all writable Path root = new Path("/"); fs.SetPermission(root, new FsPermission((short)0x1ff)); fs.Close(); // login as a different user UserGroupInformation superuser = UserGroupInformation.GetCurrentUser(); string username = "******"; string group = "testappendgroup"; NUnit.Framework.Assert.IsFalse(superuser.GetShortUserName().Equals(username)); NUnit.Framework.Assert.IsFalse(Arrays.AsList(superuser.GetGroupNames()).Contains( group)); UserGroupInformation appenduser = UserGroupInformation.CreateUserForTesting(username , new string[] { group }); fs = DFSTestUtil.GetFileSystemAs(appenduser, conf); // create a file Path dir = new Path(root, GetType().Name); Path foo = new Path(dir, "foo.dat"); FSDataOutputStream @out = null; int offset = 0; try { @out = fs.Create(foo); int len = 10 + AppendTestUtil.NextInt(100); @out.Write(fileContents, offset, len); offset += len; } finally { IOUtils.CloseStream(@out); } // change dir and foo to minimal permissions. fs.SetPermission(dir, new FsPermission((short)0x40)); fs.SetPermission(foo, new FsPermission((short)0x80)); // try append, should success @out = null; try { @out = fs.Append(foo); int len = 10 + AppendTestUtil.NextInt(100); @out.Write(fileContents, offset, len); offset += len; } finally { IOUtils.CloseStream(@out); } // change dir and foo to all but no write on foo. fs.SetPermission(foo, new FsPermission((short)0x17f)); fs.SetPermission(dir, new FsPermission((short)0x1ff)); // try append, should fail @out = null; try { @out = fs.Append(foo); NUnit.Framework.Assert.Fail("Expected to have AccessControlException"); } catch (AccessControlException ace) { System.Console.Out.WriteLine("Good: got " + ace); Sharpen.Runtime.PrintStackTrace(ace, System.Console.Out); } finally { IOUtils.CloseStream(@out); } } } catch (IOException e) { System.Console.Out.WriteLine("Exception :" + e); throw; } catch (Exception e) { System.Console.Out.WriteLine("Throwable :" + e); Sharpen.Runtime.PrintStackTrace(e); throw new IOException("Throwable : " + e); } finally { fs.Close(); cluster.Shutdown(); } }
/// <summary> /// Test case for data corruption during data transmission for /// create/write. /// </summary> /// <remarks> /// Test case for data corruption during data transmission for /// create/write. To recover from corruption while writing, at /// least two replicas are needed. /// </remarks> /// <exception cref="System.Exception"/> public virtual void TestCorruptionDuringWrt() { Configuration conf = new HdfsConfiguration(); // Set short retry timeouts so this test runs faster conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(10).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); Path file = new Path("/test_corruption_file"); FSDataOutputStream @out = fs.Create(file, true, 8192, (short)3, (long)(128 * 1024 * 1024)); byte[] data = new byte[65536]; for (int i = 0; i < 65536; i++) { data[i] = unchecked ((byte)(i % 256)); } for (int i_1 = 0; i_1 < 5; i_1++) { @out.Write(data, 0, 65535); } @out.Hflush(); // corrupt the packet once Org.Mockito.Mockito.When(faultInjector.CorruptPacket()).ThenReturn(true, false); Org.Mockito.Mockito.When(faultInjector.UncorruptPacket()).ThenReturn(true, false); for (int i_2 = 0; i_2 < 5; i_2++) { @out.Write(data, 0, 65535); } @out.Close(); // read should succeed FSDataInputStream @in = fs.Open(file); for (int c; (c = @in.Read()) != -1;) { } @in.Close(); // test the retry limit @out = fs.Create(file, true, 8192, (short)3, (long)(128 * 1024 * 1024)); // corrupt the packet once and never fix it. Org.Mockito.Mockito.When(faultInjector.CorruptPacket()).ThenReturn(true, false); Org.Mockito.Mockito.When(faultInjector.UncorruptPacket()).ThenReturn(false); // the client should give up pipeline reconstruction after retries. try { for (int i_3 = 0; i_3 < 5; i_3++) { @out.Write(data, 0, 65535); } @out.Close(); NUnit.Framework.Assert.Fail("Write did not fail"); } catch (IOException ioe) { // we should get an ioe DFSClient.Log.Info("Got expected exception", ioe); } } finally { if (cluster != null) { cluster.Shutdown(); } Org.Mockito.Mockito.When(faultInjector.CorruptPacket()).ThenReturn(false); Org.Mockito.Mockito.When(faultInjector.UncorruptPacket()).ThenReturn(false); } }
public virtual void TestAppendRestart() { Configuration conf = new HdfsConfiguration(); // Turn off persistent IPC, so that the DFSClient can survive NN restart conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectionMaxidletimeKey, 0); MiniDFSCluster cluster = null; FSDataOutputStream stream = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); FileSystem fs = cluster.GetFileSystem(); FilePath editLog = new FilePath(FSImageTestUtil.GetNameNodeCurrentDirs(cluster, 0 )[0], NNStorage.GetInProgressEditsFileName(1)); EnumMap <FSEditLogOpCodes, Holder <int> > counts; Path p1 = new Path("/block-boundaries"); WriteAndAppend(fs, p1, BlockSize, BlockSize); counts = FSImageTestUtil.CountEditLogOpTypes(editLog); // OP_ADD to create file // OP_ADD_BLOCK for first block // OP_CLOSE to close file // OP_APPEND to reopen file // OP_ADD_BLOCK for second block // OP_CLOSE to close file NUnit.Framework.Assert.AreEqual(1, (int)counts[FSEditLogOpCodes.OpAdd].held); NUnit.Framework.Assert.AreEqual(1, (int)counts[FSEditLogOpCodes.OpAppend].held); NUnit.Framework.Assert.AreEqual(2, (int)counts[FSEditLogOpCodes.OpAddBlock].held); NUnit.Framework.Assert.AreEqual(2, (int)counts[FSEditLogOpCodes.OpClose].held); Path p2 = new Path("/not-block-boundaries"); WriteAndAppend(fs, p2, BlockSize / 2, BlockSize); counts = FSImageTestUtil.CountEditLogOpTypes(editLog); // OP_ADD to create file // OP_ADD_BLOCK for first block // OP_CLOSE to close file // OP_APPEND to re-establish the lease // OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block) // OP_ADD_BLOCK at the start of the second block // OP_CLOSE to close file // Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs // in addition to the ones above NUnit.Framework.Assert.AreEqual(2, (int)counts[FSEditLogOpCodes.OpAdd].held); NUnit.Framework.Assert.AreEqual(2, (int)counts[FSEditLogOpCodes.OpAppend].held); NUnit.Framework.Assert.AreEqual(1, (int)counts[FSEditLogOpCodes.OpUpdateBlocks].held ); NUnit.Framework.Assert.AreEqual(2 + 2, (int)counts[FSEditLogOpCodes.OpAddBlock].held ); NUnit.Framework.Assert.AreEqual(2 + 2, (int)counts[FSEditLogOpCodes.OpClose].held ); cluster.RestartNameNode(); AppendTestUtil.Check(fs, p1, 2 * BlockSize); AppendTestUtil.Check(fs, p2, 3 * BlockSize / 2); } finally { IOUtils.CloseStream(stream); if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestDataTransferProtocol() { Random random = new Random(); int oneMil = 1024 * 1024; Path file = new Path("dataprotocol.dat"); int numDataNodes = 1; Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsReplicationKey, numDataNodes); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes ).Build(); try { cluster.WaitActive(); datanode = cluster.GetFileSystem().GetDataNodeStats(HdfsConstants.DatanodeReportType .Live)[0]; dnAddr = NetUtils.CreateSocketAddr(datanode.GetXferAddr()); FileSystem fileSys = cluster.GetFileSystem(); int fileLen = Math.Min(conf.GetInt(DFSConfigKeys.DfsBlockSizeKey, 4096), 4096); CreateFile(fileSys, file, fileLen); // get the first blockid for the file ExtendedBlock firstBlock = DFSTestUtil.GetFirstBlock(fileSys, file); string poolId = firstBlock.GetBlockPoolId(); long newBlockId = firstBlock.GetBlockId() + 1; recvBuf.Reset(); sendBuf.Reset(); // bad version recvOut.WriteShort((short)(DataTransferProtocol.DataTransferVersion - 1)); sendOut.WriteShort((short)(DataTransferProtocol.DataTransferVersion - 1)); SendRecvData("Wrong Version", true); // bad ops sendBuf.Reset(); sendOut.WriteShort((short)DataTransferProtocol.DataTransferVersion); sendOut.WriteByte(OP.WriteBlock.code - 1); SendRecvData("Wrong Op Code", true); /* Test OP_WRITE_BLOCK */ sendBuf.Reset(); DataChecksum badChecksum = Org.Mockito.Mockito.Spy(DefaultChecksum); Org.Mockito.Mockito.DoReturn(-1).When(badChecksum).GetBytesPerChecksum(); WriteBlock(poolId, newBlockId, badChecksum); recvBuf.Reset(); SendResponse(DataTransferProtos.Status.Error, null, null, recvOut); SendRecvData("wrong bytesPerChecksum while writing", true); sendBuf.Reset(); recvBuf.Reset(); WriteBlock(poolId, ++newBlockId, DefaultChecksum); PacketHeader hdr = new PacketHeader(4, 0, 100, false, -1 - random.Next(oneMil), false ); // size of packet // offset in block, // seqno // last packet // bad datalen hdr.Write(sendOut); SendResponse(DataTransferProtos.Status.Success, string.Empty, null, recvOut); new PipelineAck(100, new int[] { PipelineAck.CombineHeader(PipelineAck.ECN.Disabled , DataTransferProtos.Status.Error) }).Write(recvOut); SendRecvData("negative DATA_CHUNK len while writing block " + newBlockId, true); // test for writing a valid zero size block sendBuf.Reset(); recvBuf.Reset(); WriteBlock(poolId, ++newBlockId, DefaultChecksum); hdr = new PacketHeader(8, 0, 100, true, 0, false); // size of packet // OffsetInBlock // sequencenumber // lastPacketInBlock // chunk length hdr.Write(sendOut); sendOut.WriteInt(0); // zero checksum sendOut.Flush(); //ok finally write a block with 0 len SendResponse(DataTransferProtos.Status.Success, string.Empty, null, recvOut); new PipelineAck(100, new int[] { PipelineAck.CombineHeader(PipelineAck.ECN.Disabled , DataTransferProtos.Status.Success) }).Write(recvOut); SendRecvData("Writing a zero len block blockid " + newBlockId, false); /* Test OP_READ_BLOCK */ string bpid = cluster.GetNamesystem().GetBlockPoolId(); ExtendedBlock blk = new ExtendedBlock(bpid, firstBlock.GetLocalBlock()); long blkid = blk.GetBlockId(); // bad block id sendBuf.Reset(); recvBuf.Reset(); blk.SetBlockId(blkid - 1); sender.ReadBlock(blk, BlockTokenSecretManager.DummyToken, "cl", 0L, fileLen, true , CachingStrategy.NewDefaultStrategy()); SendRecvData("Wrong block ID " + newBlockId + " for read", false); // negative block start offset -1L sendBuf.Reset(); blk.SetBlockId(blkid); sender.ReadBlock(blk, BlockTokenSecretManager.DummyToken, "cl", -1L, fileLen, true , CachingStrategy.NewDefaultStrategy()); SendRecvData("Negative start-offset for read for block " + firstBlock.GetBlockId( ), false); // bad block start offset sendBuf.Reset(); sender.ReadBlock(blk, BlockTokenSecretManager.DummyToken, "cl", fileLen, fileLen, true, CachingStrategy.NewDefaultStrategy()); SendRecvData("Wrong start-offset for reading block " + firstBlock.GetBlockId(), false ); // negative length is ok. Datanode assumes we want to read the whole block. recvBuf.Reset(); ((DataTransferProtos.BlockOpResponseProto)DataTransferProtos.BlockOpResponseProto .NewBuilder().SetStatus(DataTransferProtos.Status.Success).SetReadOpChecksumInfo (DataTransferProtos.ReadOpChecksumInfoProto.NewBuilder().SetChecksum(DataTransferProtoUtil .ToProto(DefaultChecksum)).SetChunkOffset(0L)).Build()).WriteDelimitedTo(recvOut ); sendBuf.Reset(); sender.ReadBlock(blk, BlockTokenSecretManager.DummyToken, "cl", 0L, -1L - random. Next(oneMil), true, CachingStrategy.NewDefaultStrategy()); SendRecvData("Negative length for reading block " + firstBlock.GetBlockId(), false ); // length is more than size of block. recvBuf.Reset(); SendResponse(DataTransferProtos.Status.Error, null, "opReadBlock " + firstBlock + " received exception java.io.IOException: " + "Offset 0 and length 4097 don't match block " + firstBlock + " ( blockLen 4096 )", recvOut); sendBuf.Reset(); sender.ReadBlock(blk, BlockTokenSecretManager.DummyToken, "cl", 0L, fileLen + 1, true, CachingStrategy.NewDefaultStrategy()); SendRecvData("Wrong length for reading block " + firstBlock.GetBlockId(), false); //At the end of all this, read the file to make sure that succeeds finally. sendBuf.Reset(); sender.ReadBlock(blk, BlockTokenSecretManager.DummyToken, "cl", 0L, fileLen, true , CachingStrategy.NewDefaultStrategy()); ReadFile(fileSys, file, fileLen); } finally { cluster.Shutdown(); } }