public override void Run() { while (this.running) { try { Sharpen.Thread.Sleep(1000); } catch (Exception) { continue; } // check if all threads have a new stamp. // If so, then all workers have finished at least one file // since the last stamp. bool loop = false; for (int i = 0; i < this._enclosing.numThreads; i++) { if (this._enclosing.workload[i].GetStamp() == 0) { loop = true; break; } } if (loop) { continue; } // Now it is guaranteed that there will be at least one valid // replica of a file. for (int i_1 = 0; i_1 < TestDatanodeDeath.replication - 1; i_1++) { // pick a random datanode to shutdown int victim = AppendTestUtil.NextInt(TestDatanodeDeath.numDatanodes); try { System.Console.Out.WriteLine("Stopping datanode " + victim); this.cluster.RestartDataNode(victim); } catch (IOException e) { // cluster.startDataNodes(conf, 1, true, null, null); System.Console.Out.WriteLine("TestDatanodeDeath Modify exception " + e); NUnit.Framework.Assert.IsTrue("TestDatanodeDeath Modify exception " + e, false); this.running = false; } } // set a new stamp for all workers for (int i_2 = 0; i_2 < this._enclosing.numThreads; i_2++) { this._enclosing.workload[i_2].ResetStamp(); } } }
public virtual void TestHardLeaseRecovery() { //create a file string filestr = "/hardLeaseRecovery"; AppendTestUtil.Log.Info("filestr=" + filestr); Path filepath = new Path(filestr); FSDataOutputStream stm = dfs.Create(filepath, true, BufSize, ReplicationNum, BlockSize ); NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr)); // write bytes into the file. int size = AppendTestUtil.NextInt(FileSize); AppendTestUtil.Log.Info("size=" + size); stm.Write(buffer, 0, size); // hflush file AppendTestUtil.Log.Info("hflush"); stm.Hflush(); // kill the lease renewal thread AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()"); dfs.dfs.GetLeaseRenewer().InterruptAndJoin(); // set the hard limit to be 1 second cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod); // wait for lease recovery to complete LocatedBlocks locatedBlocks; do { Sharpen.Thread.Sleep(ShortLeasePeriod); locatedBlocks = dfs.dfs.GetLocatedBlocks(filestr, 0L, size); }while (locatedBlocks.IsUnderConstruction()); NUnit.Framework.Assert.AreEqual(size, locatedBlocks.GetFileLength()); // make sure that the writer thread gets killed try { stm.Write('b'); stm.Close(); NUnit.Framework.Assert.Fail("Writer thread should have been killed"); } catch (IOException e) { Sharpen.Runtime.PrintStackTrace(e); } // verify data AppendTestUtil.Log.Info("File size is good. Now validating sizes from datanodes..." ); AppendTestUtil.CheckFullFile(dfs, filepath, size, buffer, filestr); }
/// <exception cref="System.Exception"/> private void RecoverLeaseUsingCreate2(Path filepath) { FileSystem dfs2 = GetFSAsAnotherUser(conf); int size = AppendTestUtil.NextInt(FileSize); DistributedFileSystem dfsx = (DistributedFileSystem)dfs2; //create file using dfsx Path filepath2 = new Path("/immediateRecoverLease-x2"); FSDataOutputStream stm = dfsx.Create(filepath2, true, BufSize, ReplicationNum, BlockSize ); NUnit.Framework.Assert.IsTrue(dfsx.dfs.Exists("/immediateRecoverLease-x2")); try { Sharpen.Thread.Sleep(10000); } catch (Exception) { } dfsx.Append(filepath); }
/// <exception cref="System.IO.IOException"/> public virtual void TestExcludedNodes() { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); FileSystem fs = cluster.GetFileSystem(); Path filePath = new Path("/testExcludedNodes"); // kill a datanode cluster.StopDataNode(AppendTestUtil.NextInt(3)); OutputStream @out = fs.Create(filePath, true, 4096, (short)3, fs.GetDefaultBlockSize (filePath)); @out.Write(20); try { @out.Close(); } catch (Exception e) { NUnit.Framework.Assert.Fail("Single DN failure should not result in a block abort: \n" + e.Message); } }
public virtual void TestImmediateRecoveryOfLease() { //create a file // write bytes into the file. byte[] actual = new byte[FileSize]; int size = AppendTestUtil.NextInt(FileSize); Path filepath = CreateFile("/immediateRecoverLease-shortlease", size, true); // set the soft limit to be 1 second so that the // namenode triggers lease recovery on next attempt to write-for-open. cluster.SetLeasePeriod(ShortLeasePeriod, LongLeasePeriod); RecoverLeaseUsingCreate(filepath); VerifyFile(dfs, filepath, actual, size); //test recoverLease // set the soft limit to be 1 hour but recoverLease should // close the file immediately cluster.SetLeasePeriod(LongLeasePeriod, LongLeasePeriod); size = AppendTestUtil.NextInt(FileSize); filepath = CreateFile("/immediateRecoverLease-longlease", size, false); // test recoverLease from a different client RecoverLease(filepath, null); VerifyFile(dfs, filepath, actual, size); // test recoverlease from the same client size = AppendTestUtil.NextInt(FileSize); filepath = CreateFile("/immediateRecoverLease-sameclient", size, false); // create another file using the same client Path filepath1 = new Path(filepath.ToString() + AppendTestUtil.NextInt()); FSDataOutputStream stm = dfs.Create(filepath1, true, BufSize, ReplicationNum, BlockSize ); // recover the first file RecoverLease(filepath, dfs); VerifyFile(dfs, filepath, actual, size); // continue to write to the second file stm.Write(buffer, 0, size); stm.Close(); VerifyFile(dfs, filepath1, actual, size); }
public virtual void TestSimpleAppend() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50); fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); try { { // test appending to a file. // create a new file. Path file1 = new Path("/simpleAppend.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); System.Console.Out.WriteLine("Created file simpleAppend.dat"); // write to file int mid = 186; // io.bytes.per.checksum bytes System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1); stm.Write(fileContents, 0, mid); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed first part of file."); // write to file int mid2 = 607; // io.bytes.per.checksum bytes System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1); stm = fs.Append(file1); stm.Write(fileContents, mid, mid2 - mid); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed second part of file."); // write the remainder of the file stm = fs.Append(file1); // ensure getPos is set to reflect existing size of the file NUnit.Framework.Assert.IsTrue(stm.GetPos() > 0); System.Console.Out.WriteLine("Writing " + (AppendTestUtil.FileSize - mid2) + " bytes to file " + file1); stm.Write(fileContents, mid2, AppendTestUtil.FileSize - mid2); System.Console.Out.WriteLine("Written second part of file"); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed second part of file."); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2" ); } { // test appending to an non-existing file. FSDataOutputStream @out = null; try { @out = fs.Append(new Path("/non-existing.dat")); NUnit.Framework.Assert.Fail("Expected to have FileNotFoundException"); } catch (FileNotFoundException fnfe) { System.Console.Out.WriteLine("Good: got " + fnfe); Sharpen.Runtime.PrintStackTrace(fnfe, System.Console.Out); } finally { IOUtils.CloseStream(@out); } } { // test append permission. //set root to all writable Path root = new Path("/"); fs.SetPermission(root, new FsPermission((short)0x1ff)); fs.Close(); // login as a different user UserGroupInformation superuser = UserGroupInformation.GetCurrentUser(); string username = "******"; string group = "testappendgroup"; NUnit.Framework.Assert.IsFalse(superuser.GetShortUserName().Equals(username)); NUnit.Framework.Assert.IsFalse(Arrays.AsList(superuser.GetGroupNames()).Contains( group)); UserGroupInformation appenduser = UserGroupInformation.CreateUserForTesting(username , new string[] { group }); fs = DFSTestUtil.GetFileSystemAs(appenduser, conf); // create a file Path dir = new Path(root, GetType().Name); Path foo = new Path(dir, "foo.dat"); FSDataOutputStream @out = null; int offset = 0; try { @out = fs.Create(foo); int len = 10 + AppendTestUtil.NextInt(100); @out.Write(fileContents, offset, len); offset += len; } finally { IOUtils.CloseStream(@out); } // change dir and foo to minimal permissions. fs.SetPermission(dir, new FsPermission((short)0x40)); fs.SetPermission(foo, new FsPermission((short)0x80)); // try append, should success @out = null; try { @out = fs.Append(foo); int len = 10 + AppendTestUtil.NextInt(100); @out.Write(fileContents, offset, len); offset += len; } finally { IOUtils.CloseStream(@out); } // change dir and foo to all but no write on foo. fs.SetPermission(foo, new FsPermission((short)0x17f)); fs.SetPermission(dir, new FsPermission((short)0x1ff)); // try append, should fail @out = null; try { @out = fs.Append(foo); NUnit.Framework.Assert.Fail("Expected to have AccessControlException"); } catch (AccessControlException ace) { System.Console.Out.WriteLine("Good: got " + ace); Sharpen.Runtime.PrintStackTrace(ace, System.Console.Out); } finally { IOUtils.CloseStream(@out); } } } catch (IOException e) { System.Console.Out.WriteLine("Exception :" + e); throw; } catch (Exception e) { System.Console.Out.WriteLine("Throwable :" + e); Sharpen.Runtime.PrintStackTrace(e); throw new IOException("Throwable : " + e); } finally { fs.Close(); cluster.Shutdown(); } }
/// <summary>Test that appends to files at random offsets.</summary> /// <exception cref="System.IO.IOException"/> private void TestComplexAppend(bool appendToNewBlock) { fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 2000); conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 2); conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, 2); conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 30000); conf.SetInt(DFSConfigKeys.DfsDatanodeSocketWriteTimeoutKey, 30000); conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes ).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); try { // create a bunch of test files with random replication factors. // Insert them into a linked list. // for (int i = 0; i < numberOfFiles; i++) { int replication = AppendTestUtil.NextInt(numDatanodes - 2) + 1; Path testFile = new Path("/" + i + ".dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, testFile, replication); stm.Close(); testFiles.AddItem(testFile); } // Create threads and make them run workload concurrently. workload = new TestFileAppend2.Workload[numThreads]; for (int i_1 = 0; i_1 < numThreads; i_1++) { workload[i_1] = new TestFileAppend2.Workload(this, cluster, i_1, appendToNewBlock ); workload[i_1].Start(); } // wait for all transactions to get over for (int i_2 = 0; i_2 < numThreads; i_2++) { try { System.Console.Out.WriteLine("Waiting for thread " + i_2 + " to complete..."); workload[i_2].Join(); System.Console.Out.WriteLine("Waiting for thread " + i_2 + " complete."); } catch (Exception) { i_2--; } } } finally { // retry fs.Close(); cluster.Shutdown(); } // If any of the worker thread failed in their job, indicate that // this test failed. // NUnit.Framework.Assert.IsTrue("testComplexAppend Worker encountered exceptions.", globalStatus); }
// create a bunch of files. Write to them and then verify. public override void Run() { System.Console.Out.WriteLine("Workload " + this.id + " starting... "); for (int i = 0; i < this._enclosing.numAppendsPerThread; i++) { // pick a file at random and remove it from pool Path testfile; lock (this._enclosing.testFiles) { if (this._enclosing.testFiles.Count == 0) { System.Console.Out.WriteLine("Completed write to almost all files."); return; } int index = AppendTestUtil.NextInt(this._enclosing.testFiles.Count); testfile = this._enclosing.testFiles.Remove(index); } long len = 0; int sizeToAppend = 0; try { DistributedFileSystem fs = this.cluster.GetFileSystem(); // add a random number of bytes to file len = fs.GetFileStatus(testfile).GetLen(); // if file is already full, then pick another file if (len >= AppendTestUtil.FileSize) { System.Console.Out.WriteLine("File " + testfile + " is full."); continue; } // do small size appends so that we can trigger multiple // appends to the same file. // int left = (int)(AppendTestUtil.FileSize - len) / 3; if (left <= 0) { left = 1; } sizeToAppend = AppendTestUtil.NextInt(left); System.Console.Out.WriteLine("Workload thread " + this.id + " appending " + sizeToAppend + " bytes " + " to file " + testfile + " of size " + len); FSDataOutputStream stm = this.appendToNewBlock ? fs.Append(testfile, EnumSet.Of(CreateFlag .Append, CreateFlag.NewBlock), 4096, null) : fs.Append(testfile); stm.Write(this._enclosing.fileContents, (int)len, sizeToAppend); stm.Close(); // wait for the file size to be reflected in the namenode metadata while (fs.GetFileStatus(testfile).GetLen() != (len + sizeToAppend)) { try { System.Console.Out.WriteLine("Workload thread " + this.id + " file " + testfile + " size " + fs.GetFileStatus(testfile).GetLen() + " expected size " + (len + sizeToAppend ) + " waiting for namenode metadata update."); Sharpen.Thread.Sleep(5000); } catch (Exception) { } } NUnit.Framework.Assert.IsTrue("File " + testfile + " size is " + fs.GetFileStatus (testfile).GetLen() + " but expected " + (len + sizeToAppend), fs.GetFileStatus( testfile).GetLen() == (len + sizeToAppend)); AppendTestUtil.CheckFullFile(fs, testfile, (int)(len + sizeToAppend), this._enclosing .fileContents, "Read 2"); } catch (Exception e) { TestFileAppend2.globalStatus = false; if (e.ToString() != null) { System.Console.Out.WriteLine("Workload exception " + this.id + " testfile " + testfile + " " + e); Sharpen.Runtime.PrintStackTrace(e); } NUnit.Framework.Assert.IsTrue("Workload exception " + this.id + " testfile " + testfile + " expected size " + (len + sizeToAppend), false); } // Add testfile back to the pool of files. lock (this._enclosing.testFiles) { this._enclosing.testFiles.AddItem(testfile); } } }
public virtual void TestClientTriggeredLeaseRecovery() { int Replication = 3; Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 1); conf.SetInt(DFSConfigKeys.DfsReplicationKey, Replication); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(Replication ).Build(); try { FileSystem fs = cluster.GetFileSystem(); Path dir = new Path("/wrwelkj"); TestFileCreationClient.SlowWriter[] slowwriters = new TestFileCreationClient.SlowWriter [10]; for (int i = 0; i < slowwriters.Length; i++) { slowwriters[i] = new TestFileCreationClient.SlowWriter(fs, new Path(dir, "file" + i)); } try { for (int i_1 = 0; i_1 < slowwriters.Length; i_1++) { slowwriters[i_1].Start(); } Sharpen.Thread.Sleep(1000); // let writers get started //stop a datanode, it should have least recover. cluster.StopDataNode(AppendTestUtil.NextInt(Replication)); //let the slow writer writes a few more seconds System.Console.Out.WriteLine("Wait a few seconds"); Sharpen.Thread.Sleep(5000); } finally { for (int i_1 = 0; i_1 < slowwriters.Length; i_1++) { if (slowwriters[i_1] != null) { slowwriters[i_1].running = false; slowwriters[i_1].Interrupt(); } } for (int i_2 = 0; i_2 < slowwriters.Length; i_2++) { if (slowwriters[i_2] != null) { slowwriters[i_2].Join(); } } } //Verify the file System.Console.Out.WriteLine("Verify the file"); for (int i_3 = 0; i_3 < slowwriters.Length; i_3++) { System.Console.Out.WriteLine(slowwriters[i_3].filepath + ": length=" + fs.GetFileStatus (slowwriters[i_3].filepath).GetLen()); FSDataInputStream @in = null; try { @in = fs.Open(slowwriters[i_3].filepath); for (int j = 0; (x = @in.Read()) != -1; j++) { NUnit.Framework.Assert.AreEqual(j, x); } } finally { IOUtils.CloseStream(@in); } } } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <summary> /// Write to one file, then kill one datanode in the pipeline and then /// close the file. /// </summary> /// <exception cref="System.IO.IOException"/> private void SimpleTest(int datanodeToKill) { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 2000); conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1); conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, 2); conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, 5000); int myMaxNodes = 5; System.Console.Out.WriteLine("SimpleTest starting with DataNode to Kill " + datanodeToKill ); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(myMaxNodes ).Build(); cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); short repl = 3; Path filename = new Path("simpletest.dat"); try { // create a file and write one block of data System.Console.Out.WriteLine("SimpleTest creating file " + filename); FSDataOutputStream stm = CreateFile(fs, filename, repl); DFSOutputStream dfstream = (DFSOutputStream)(stm.GetWrappedStream()); // these are test settings dfstream.SetChunksPerPacket(5); dfstream.SetArtificialSlowdown(3000); long myseed = AppendTestUtil.NextLong(); byte[] buffer = AppendTestUtil.RandomBytes(myseed, fileSize); int mid = fileSize / 4; stm.Write(buffer, 0, mid); DatanodeInfo[] targets = dfstream.GetPipeline(); int count = 5; while (count-- > 0 && targets == null) { try { System.Console.Out.WriteLine("SimpleTest: Waiting for pipeline to be created."); Sharpen.Thread.Sleep(1000); } catch (Exception) { } targets = dfstream.GetPipeline(); } if (targets == null) { int victim = AppendTestUtil.NextInt(myMaxNodes); System.Console.Out.WriteLine("SimpleTest stopping datanode random " + victim); cluster.StopDataNode(victim); } else { int victim = datanodeToKill; System.Console.Out.WriteLine("SimpleTest stopping datanode " + targets[victim]); cluster.StopDataNode(targets[victim].GetXferAddr()); } System.Console.Out.WriteLine("SimpleTest stopping datanode complete"); // write some more data to file, close and verify stm.Write(buffer, mid, fileSize - mid); stm.Close(); CheckFile(fs, filename, repl, numBlocks, fileSize, myseed); } catch (Exception e) { System.Console.Out.WriteLine("Simple Workload exception " + e); Sharpen.Runtime.PrintStackTrace(e); NUnit.Framework.Assert.IsTrue(e.ToString(), false); } finally { fs.Close(); cluster.Shutdown(); } }
public virtual void TestReplaceDatanodeOnFailure() { Configuration conf = new HdfsConfiguration(); //always replace a datanode ReplaceDatanodeOnFailure.Write(ReplaceDatanodeOnFailure.Policy.Always, true, conf ); string[] racks = new string[Replication]; Arrays.Fill(racks, Rack0); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Racks(racks).NumDataNodes (Replication).Build(); try { DistributedFileSystem fs = cluster.GetFileSystem(); Path dir = new Path(Dir); TestReplaceDatanodeOnFailure.SlowWriter[] slowwriters = new TestReplaceDatanodeOnFailure.SlowWriter [10]; for (int i = 1; i <= slowwriters.Length; i++) { //create slow writers in different speed slowwriters[i - 1] = new TestReplaceDatanodeOnFailure.SlowWriter(fs, new Path(dir , "file" + i), i * 200L); } foreach (TestReplaceDatanodeOnFailure.SlowWriter s in slowwriters) { s.Start(); } // Let slow writers write something. // Some of them are too slow and will be not yet started. SleepSeconds(1); //start new datanodes cluster.StartDataNodes(conf, 2, true, null, new string[] { Rack1, Rack1 }); //stop an old datanode cluster.StopDataNode(AppendTestUtil.NextInt(Replication)); //Let the slow writer writes a few more seconds //Everyone should have written something. SleepSeconds(5); //check replication and interrupt. foreach (TestReplaceDatanodeOnFailure.SlowWriter s_1 in slowwriters) { s_1.CheckReplication(); s_1.InterruptRunning(); } //close files foreach (TestReplaceDatanodeOnFailure.SlowWriter s_2 in slowwriters) { s_2.JoinAndClose(); } //Verify the file Log.Info("Verify the file"); for (int i_1 = 0; i_1 < slowwriters.Length; i_1++) { Log.Info(slowwriters[i_1].filepath + ": length=" + fs.GetFileStatus(slowwriters[i_1 ].filepath).GetLen()); FSDataInputStream @in = null; try { @in = fs.Open(slowwriters[i_1].filepath); for (int j = 0; (x = @in.Read()) != -1; j++) { NUnit.Framework.Assert.AreEqual(j, x); } } finally { IOUtils.CloseStream(@in); } } } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="System.Exception"/> public virtual void HardLeaseRecoveryRestartHelper(bool doRename, int size) { if (size < 0) { size = AppendTestUtil.NextInt(FileSize + 1); } //create a file string fileStr = "/hardLeaseRecovery"; AppendTestUtil.Log.Info("filestr=" + fileStr); Path filePath = new Path(fileStr); FSDataOutputStream stm = dfs.Create(filePath, true, BufSize, ReplicationNum, BlockSize ); NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(fileStr)); // write bytes into the file. AppendTestUtil.Log.Info("size=" + size); stm.Write(buffer, 0, size); string originalLeaseHolder = NameNodeAdapter.GetLeaseHolderForPath(cluster.GetNameNode (), fileStr); NUnit.Framework.Assert.IsFalse("original lease holder should not be the NN", originalLeaseHolder .Equals(HdfsServerConstants.NamenodeLeaseHolder)); // hflush file AppendTestUtil.Log.Info("hflush"); stm.Hflush(); // check visible length HdfsDataInputStream @in = (HdfsDataInputStream)dfs.Open(filePath); NUnit.Framework.Assert.AreEqual(size, @in.GetVisibleLength()); @in.Close(); if (doRename) { fileStr += ".renamed"; Path renamedPath = new Path(fileStr); NUnit.Framework.Assert.IsTrue(dfs.Rename(filePath, renamedPath)); filePath = renamedPath; } // kill the lease renewal thread AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()"); dfs.dfs.GetLeaseRenewer().InterruptAndJoin(); // Make sure the DNs don't send a heartbeat for a while, so the blocks // won't actually get completed during lease recovery. foreach (DataNode dn in cluster.GetDataNodes()) { DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true); } // set the hard limit to be 1 second cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod); // Make sure lease recovery begins. Sharpen.Thread.Sleep(HdfsServerConstants.NamenodeLeaseRecheckInterval * 2); CheckLease(fileStr, size); cluster.RestartNameNode(false); CheckLease(fileStr, size); // Let the DNs send heartbeats again. foreach (DataNode dn_1 in cluster.GetDataNodes()) { DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn_1, false); } cluster.WaitActive(); // set the hard limit to be 1 second, to initiate lease recovery. cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod); // wait for lease recovery to complete LocatedBlocks locatedBlocks; do { Sharpen.Thread.Sleep(ShortLeasePeriod); locatedBlocks = dfs.dfs.GetLocatedBlocks(fileStr, 0L, size); }while (locatedBlocks.IsUnderConstruction()); NUnit.Framework.Assert.AreEqual(size, locatedBlocks.GetFileLength()); // make sure that the client can't write data anymore. try { stm.Write('b'); stm.Hflush(); NUnit.Framework.Assert.Fail("Should not be able to flush after we've lost the lease" ); } catch (IOException e) { Log.Info("Expceted exception on write/hflush", e); } try { stm.Close(); NUnit.Framework.Assert.Fail("Should not be able to close after we've lost the lease" ); } catch (IOException e) { Log.Info("Expected exception on close", e); } // verify data AppendTestUtil.Log.Info("File size is good. Now validating sizes from datanodes..." ); AppendTestUtil.CheckFullFile(dfs, filePath, size, buffer, fileStr); }
public virtual void TestSoftLeaseRecovery() { IDictionary <string, string[]> u2g_map = new Dictionary <string, string[]>(1); u2g_map[fakeUsername] = new string[] { fakeGroup }; DFSTestUtil.UpdateConfWithFakeGroupMapping(conf, u2g_map); // Reset default lease periods cluster.SetLeasePeriod(HdfsConstants.LeaseSoftlimitPeriod, HdfsConstants.LeaseHardlimitPeriod ); //create a file // create a random file name string filestr = "/foo" + AppendTestUtil.NextInt(); AppendTestUtil.Log.Info("filestr=" + filestr); Path filepath = new Path(filestr); FSDataOutputStream stm = dfs.Create(filepath, true, BufSize, ReplicationNum, BlockSize ); NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr)); // write random number of bytes into it. int size = AppendTestUtil.NextInt(FileSize); AppendTestUtil.Log.Info("size=" + size); stm.Write(buffer, 0, size); // hflush file AppendTestUtil.Log.Info("hflush"); stm.Hflush(); AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()"); dfs.dfs.GetLeaseRenewer().InterruptAndJoin(); // set the soft limit to be 1 second so that the // namenode triggers lease recovery on next attempt to write-for-open. cluster.SetLeasePeriod(ShortLeasePeriod, LongLeasePeriod); { // try to re-open the file before closing the previous handle. This // should fail but will trigger lease recovery. UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(fakeUsername , new string[] { fakeGroup }); FileSystem dfs2 = DFSTestUtil.GetFileSystemAs(ugi, conf); bool done = false; for (int i = 0; i < 10 && !done; i++) { AppendTestUtil.Log.Info("i=" + i); try { dfs2.Create(filepath, false, BufSize, ReplicationNum, BlockSize); NUnit.Framework.Assert.Fail("Creation of an existing file should never succeed."); } catch (FileAlreadyExistsException) { done = true; } catch (AlreadyBeingCreatedException ex) { AppendTestUtil.Log.Info("GOOD! got " + ex.Message); } catch (IOException ioe) { AppendTestUtil.Log.Warn("UNEXPECTED IOException", ioe); } if (!done) { AppendTestUtil.Log.Info("sleep " + 5000 + "ms"); try { Sharpen.Thread.Sleep(5000); } catch (Exception) { } } } NUnit.Framework.Assert.IsTrue(done); } AppendTestUtil.Log.Info("Lease for file " + filepath + " is recovered. " + "Validating its contents now..." ); // verify that file-size matches long fileSize = dfs.GetFileStatus(filepath).GetLen(); NUnit.Framework.Assert.IsTrue("File should be " + size + " bytes, but is actually " + " found to be " + fileSize + " bytes", fileSize == size); // verify data AppendTestUtil.Log.Info("File size is good. " + "Now validating data and sizes from datanodes..." ); AppendTestUtil.CheckFullFile(dfs, filepath, size, buffer, filestr); }