public virtual void TestAppend() { int maxOldFileLen = 2 * BlockSize + 1; int maxFlushedBytes = BlockSize; byte[] contents = AppendTestUtil.InitBuffer(maxOldFileLen + 2 * maxFlushedBytes); for (int oldFileLen = 0; oldFileLen <= maxOldFileLen; oldFileLen++) { for (int flushedBytes1 = 0; flushedBytes1 <= maxFlushedBytes; flushedBytes1++) { for (int flushedBytes2 = 0; flushedBytes2 <= maxFlushedBytes; flushedBytes2++) { int fileLen = oldFileLen + flushedBytes1 + flushedBytes2; // create the initial file of oldFileLen Path p = new Path("foo" + oldFileLen + "_" + flushedBytes1 + "_" + flushedBytes2); Log.Info("Creating file " + p); FSDataOutputStream @out = fs.Create(p, false, conf.GetInt(CommonConfigurationKeys .IoFileBufferSizeKey, 4096), Replication, BlockSize); @out.Write(contents, 0, oldFileLen); @out.Close(); // append flushedBytes bytes to the file @out = fs.Append(p); @out.Write(contents, oldFileLen, flushedBytes1); @out.Hflush(); // write another flushedBytes2 bytes to the file @out.Write(contents, oldFileLen + flushedBytes1, flushedBytes2); @out.Close(); // validate the file content AppendTestUtil.CheckFullFile(fs, p, fileLen, contents, p.ToString()); fs.Delete(p, false); } } } }
/// <exception cref="System.IO.IOException"/> private static void RollbackRollingUpgrade(Path foo, Path bar, Path file, byte[] data, MiniDFSCluster cluster) { MiniDFSCluster.DataNodeProperties dnprop = cluster.StopDataNode(0); cluster.RestartNameNode("-rollingUpgrade", "rollback"); cluster.RestartDataNode(dnprop, true); DistributedFileSystem dfs = cluster.GetFileSystem(); NUnit.Framework.Assert.IsTrue(dfs.Exists(foo)); NUnit.Framework.Assert.IsFalse(dfs.Exists(bar)); AppendTestUtil.CheckFullFile(dfs, file, data.Length, data); }
public virtual void TestSimpleFlush() { Configuration conf = new HdfsConfiguration(); fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); try { // create a new file. Path file1 = new Path("/simpleFlush.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); System.Console.Out.WriteLine("Created file simpleFlush.dat"); // write to file int mid = AppendTestUtil.FileSize / 2; stm.Write(fileContents, 0, mid); stm.Hflush(); System.Console.Out.WriteLine("Wrote and Flushed first part of file."); // write the remainder of the file stm.Write(fileContents, mid, AppendTestUtil.FileSize - mid); System.Console.Out.WriteLine("Written second part of file"); stm.Hflush(); stm.Hflush(); System.Console.Out.WriteLine("Wrote and Flushed second part of file."); // verify that full blocks are sane CheckFile(fs, file1, 1); stm.Close(); System.Console.Out.WriteLine("Closed file."); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2" ); } catch (IOException e) { System.Console.Out.WriteLine("Exception :" + e); throw; } catch (Exception e) { System.Console.Out.WriteLine("Throwable :" + e); Sharpen.Runtime.PrintStackTrace(e); throw new IOException("Throwable : " + e); } finally { fs.Close(); cluster.Shutdown(); } }
public virtual void TestComplexFlush() { Configuration conf = new HdfsConfiguration(); fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); try { // create a new file. Path file1 = new Path("/complexFlush.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); System.Console.Out.WriteLine("Created file complexFlush.dat"); int start = 0; for (start = 0; (start + 29) < AppendTestUtil.FileSize;) { stm.Write(fileContents, start, 29); stm.Hflush(); start += 29; } stm.Write(fileContents, start, AppendTestUtil.FileSize - start); // need to make sure we completely write out all full blocks before // the checkFile() call (see FSOutputSummer#flush) stm.Flush(); // verify that full blocks are sane CheckFile(fs, file1, 1); stm.Close(); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2" ); } catch (IOException e) { System.Console.Out.WriteLine("Exception :" + e); throw; } catch (Exception e) { System.Console.Out.WriteLine("Throwable :" + e); Sharpen.Runtime.PrintStackTrace(e); throw new IOException("Throwable : " + e); } finally { fs.Close(); cluster.Shutdown(); } }
public virtual void TestHardLeaseRecovery() { //create a file string filestr = "/hardLeaseRecovery"; AppendTestUtil.Log.Info("filestr=" + filestr); Path filepath = new Path(filestr); FSDataOutputStream stm = dfs.Create(filepath, true, BufSize, ReplicationNum, BlockSize ); NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr)); // write bytes into the file. int size = AppendTestUtil.NextInt(FileSize); AppendTestUtil.Log.Info("size=" + size); stm.Write(buffer, 0, size); // hflush file AppendTestUtil.Log.Info("hflush"); stm.Hflush(); // kill the lease renewal thread AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()"); dfs.dfs.GetLeaseRenewer().InterruptAndJoin(); // set the hard limit to be 1 second cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod); // wait for lease recovery to complete LocatedBlocks locatedBlocks; do { Sharpen.Thread.Sleep(ShortLeasePeriod); locatedBlocks = dfs.dfs.GetLocatedBlocks(filestr, 0L, size); }while (locatedBlocks.IsUnderConstruction()); NUnit.Framework.Assert.AreEqual(size, locatedBlocks.GetFileLength()); // make sure that the writer thread gets killed try { stm.Write('b'); stm.Close(); NUnit.Framework.Assert.Fail("Writer thread should have been killed"); } catch (IOException e) { Sharpen.Runtime.PrintStackTrace(e); } // verify data AppendTestUtil.Log.Info("File size is good. Now validating sizes from datanodes..." ); AppendTestUtil.CheckFullFile(dfs, filepath, size, buffer, filestr); }
public virtual void TestPipelineHeartbeat() { int DatanodeNum = 2; int fileLen = 6; Configuration conf = new HdfsConfiguration(); int timeout = 2000; conf.SetInt(DFSConfigKeys.DfsClientSocketTimeoutKey, timeout); Path p = new Path("/pipelineHeartbeat/foo"); System.Console.Out.WriteLine("p=" + p); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeNum ).Build(); try { DistributedFileSystem fs = cluster.GetFileSystem(); byte[] fileContents = AppendTestUtil.InitBuffer(fileLen); // create a new file. FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, p, DatanodeNum); stm.Write(fileContents, 0, 1); Sharpen.Thread.Sleep(timeout); stm.Hflush(); System.Console.Out.WriteLine("Wrote 1 byte and hflush " + p); // write another byte Sharpen.Thread.Sleep(timeout); stm.Write(fileContents, 1, 1); stm.Hflush(); stm.Write(fileContents, 2, 1); Sharpen.Thread.Sleep(timeout); stm.Hflush(); stm.Write(fileContents, 3, 1); Sharpen.Thread.Sleep(timeout); stm.Write(fileContents, 4, 1); stm.Hflush(); stm.Write(fileContents, 5, 1); Sharpen.Thread.Sleep(timeout); stm.Close(); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, p, fileLen, fileContents, "Failed to slowly write to a file" ); } finally { cluster.Shutdown(); } }
/// <summary> /// Append to a partial CRC chunk and the first write does not fill up the /// partial CRC trunk /// </summary> /// <exception cref="System.IO.IOException"/> private void TestAppendToPartialChunk(bool appendToNewBlock) { Path p = new Path("/partialChunk/foo" + (appendToNewBlock ? "0" : "1")); int fileLen = 513; System.Console.Out.WriteLine("p=" + p); byte[] fileContents = AppendTestUtil.InitBuffer(fileLen); // create a new file. FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, p, 1); // create 1 byte file stm.Write(fileContents, 0, 1); stm.Close(); System.Console.Out.WriteLine("Wrote 1 byte and closed the file " + p); // append to file stm = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock ), 4096, null) : fs.Append(p); // Append to a partial CRC trunk stm.Write(fileContents, 1, 1); stm.Hflush(); // The partial CRC trunk is not full yet and close the file stm.Close(); System.Console.Out.WriteLine("Append 1 byte and closed the file " + p); // write the remainder of the file stm = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock ), 4096, null) : fs.Append(p); // ensure getPos is set to reflect existing size of the file NUnit.Framework.Assert.AreEqual(2, stm.GetPos()); // append to a partial CRC trunk stm.Write(fileContents, 2, 1); // The partial chunk is not full yet, force to send a packet to DN stm.Hflush(); System.Console.Out.WriteLine("Append and flush 1 byte"); // The partial chunk is not full yet, force to send another packet to DN stm.Write(fileContents, 3, 2); stm.Hflush(); System.Console.Out.WriteLine("Append and flush 2 byte"); // fill up the partial chunk and close the file stm.Write(fileContents, 5, fileLen - 5); stm.Close(); System.Console.Out.WriteLine("Flush 508 byte and closed the file " + p); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, p, fileLen, fileContents, "Failed to append to a partial chunk" ); }
/// <exception cref="System.IO.IOException"/> internal static string CheckFullFile(Path file, FilePath localFile) { StringBuilder b = new StringBuilder("checkFullFile: ").Append(file.GetName()).Append (" vs ").Append(localFile); byte[] bytes = new byte[CheckLength(file, localFile)]; b.Append(", length=").Append(bytes.Length); FileInputStream @in = new FileInputStream(localFile); for (int n = 0; n < bytes.Length;) { n += @in.Read(bytes, n, bytes.Length - n); } @in.Close(); AppendTestUtil.CheckFullFile(dfs, file, bytes.Length, bytes, "File content mismatch: " + b, false); return(b.ToString()); }
/// <exception cref="System.IO.IOException"/> private static void StartRollingUpgrade(Path foo, Path bar, Path file, byte[] data , MiniDFSCluster cluster) { DistributedFileSystem dfs = cluster.GetFileSystem(); //start rolling upgrade dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter); dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare); dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave); dfs.Mkdirs(bar); NUnit.Framework.Assert.IsTrue(dfs.Exists(foo)); NUnit.Framework.Assert.IsTrue(dfs.Exists(bar)); //truncate a file int newLength = DFSUtil.GetRandom().Next(data.Length - 1) + 1; dfs.Truncate(file, newLength); TestFileTruncate.CheckBlockRecovery(file, dfs); AppendTestUtil.CheckFullFile(dfs, file, newLength, data); }
// // verify that the data written to the full blocks are sane // /// <exception cref="System.IO.IOException"/> private void CheckFile(FileSystem fileSys, Path name, int repl) { bool done = false; // wait till all full blocks are confirmed by the datanodes. while (!done) { try { Sharpen.Thread.Sleep(1000); } catch (Exception) { } done = true; BlockLocation[] locations = fileSys.GetFileBlockLocations(fileSys.GetFileStatus(name ), 0, AppendTestUtil.FileSize); if (locations.Length < AppendTestUtil.NumBlocks) { System.Console.Out.WriteLine("Number of blocks found " + locations.Length); done = false; continue; } for (int idx = 0; idx < AppendTestUtil.NumBlocks; idx++) { if (locations[idx].GetHosts().Length < repl) { System.Console.Out.WriteLine("Block index " + idx + " not yet replciated."); done = false; break; } } } byte[] expected = new byte[AppendTestUtil.NumBlocks * AppendTestUtil.BlockSize]; System.Array.Copy(fileContents, 0, expected, 0, expected.Length); // do a sanity check. Read the file // do not check file status since the file is not yet closed. AppendTestUtil.CheckFullFile(fileSys, name, AppendTestUtil.NumBlocks * AppendTestUtil .BlockSize, expected, "Read 1", false); }
/// <summary> /// The method starts new cluster with defined Configuration; creates a file /// with specified block_size and writes 10 equal sections in it; it also calls /// hflush/hsync after each write and throws an IOException in case of an error. /// </summary> /// <param name="conf">cluster configuration</param> /// <param name="fileName">of the file to be created and processed as required</param> /// <param name="block_size">value to be used for the file's creation</param> /// <param name="replicas">is the number of replicas</param> /// <param name="isSync">hsync or hflush</param> /// <param name="syncFlags">specify the semantic of the sync/flush</param> /// <exception cref="System.IO.IOException">in case of any errors</exception> public static void DoTheJob(Configuration conf, string fileName, long block_size, short replicas, bool isSync, EnumSet <HdfsDataOutputStream.SyncFlag> syncFlags) { byte[] fileContent; int Sections = 10; fileContent = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(replicas). Build(); // Make sure we work with DFS in order to utilize all its functionality DistributedFileSystem fileSystem = cluster.GetFileSystem(); FSDataInputStream @is; try { Path path = new Path(fileName); string pathName = new Path(fileSystem.GetWorkingDirectory(), path).ToUri().GetPath (); FSDataOutputStream stm = fileSystem.Create(path, false, 4096, replicas, block_size ); System.Console.Out.WriteLine("Created file " + fileName); int tenth = AppendTestUtil.FileSize / Sections; int rounding = AppendTestUtil.FileSize - tenth * Sections; for (int i = 0; i < Sections; i++) { System.Console.Out.WriteLine("Writing " + (tenth * i) + " to " + (tenth * (i + 1) ) + " section to file " + fileName); // write to the file stm.Write(fileContent, tenth * i, tenth); // Wait while hflush/hsync pushes all packets through built pipeline if (isSync) { ((DFSOutputStream)stm.GetWrappedStream()).Hsync(syncFlags); } else { ((DFSOutputStream)stm.GetWrappedStream()).Hflush(); } // Check file length if updatelength is required if (isSync && syncFlags.Contains(HdfsDataOutputStream.SyncFlag.UpdateLength)) { long currentFileLength = fileSystem.GetFileStatus(path).GetLen(); NUnit.Framework.Assert.AreEqual("File size doesn't match for hsync/hflush with updating the length" , tenth * (i + 1), currentFileLength); } else { if (isSync && syncFlags.Contains(HdfsDataOutputStream.SyncFlag.EndBlock)) { LocatedBlocks blocks = fileSystem.dfs.GetLocatedBlocks(pathName, 0); NUnit.Framework.Assert.AreEqual(i + 1, blocks.GetLocatedBlocks().Count); } } byte[] toRead = new byte[tenth]; byte[] expected = new byte[tenth]; System.Array.Copy(fileContent, tenth * i, expected, 0, tenth); // Open the same file for read. Need to create new reader after every write operation(!) @is = fileSystem.Open(path); @is.Seek(tenth * i); int readBytes = @is.Read(toRead, 0, tenth); System.Console.Out.WriteLine("Has read " + readBytes); NUnit.Framework.Assert.IsTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth)); @is.Close(); CheckData(toRead, 0, readBytes, expected, "Partial verification"); } System.Console.Out.WriteLine("Writing " + (tenth * Sections) + " to " + (tenth * Sections + rounding) + " section to file " + fileName); stm.Write(fileContent, tenth * Sections, rounding); stm.Close(); NUnit.Framework.Assert.AreEqual("File size doesn't match ", AppendTestUtil.FileSize , fileSystem.GetFileStatus(path).GetLen()); AppendTestUtil.CheckFullFile(fileSystem, path, fileContent.Length, fileContent, "hflush()" ); } finally { fileSystem.Close(); cluster.Shutdown(); } }
public virtual void TestSimpleAppend() { Configuration conf = new HdfsConfiguration(); conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50); fileContents = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); try { { // test appending to a file. // create a new file. Path file1 = new Path("/simpleAppend.dat"); FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, file1, 1); System.Console.Out.WriteLine("Created file simpleAppend.dat"); // write to file int mid = 186; // io.bytes.per.checksum bytes System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1); stm.Write(fileContents, 0, mid); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed first part of file."); // write to file int mid2 = 607; // io.bytes.per.checksum bytes System.Console.Out.WriteLine("Writing " + mid + " bytes to file " + file1); stm = fs.Append(file1); stm.Write(fileContents, mid, mid2 - mid); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed second part of file."); // write the remainder of the file stm = fs.Append(file1); // ensure getPos is set to reflect existing size of the file NUnit.Framework.Assert.IsTrue(stm.GetPos() > 0); System.Console.Out.WriteLine("Writing " + (AppendTestUtil.FileSize - mid2) + " bytes to file " + file1); stm.Write(fileContents, mid2, AppendTestUtil.FileSize - mid2); System.Console.Out.WriteLine("Written second part of file"); stm.Close(); System.Console.Out.WriteLine("Wrote and Closed second part of file."); // verify that entire file is good AppendTestUtil.CheckFullFile(fs, file1, AppendTestUtil.FileSize, fileContents, "Read 2" ); } { // test appending to an non-existing file. FSDataOutputStream @out = null; try { @out = fs.Append(new Path("/non-existing.dat")); NUnit.Framework.Assert.Fail("Expected to have FileNotFoundException"); } catch (FileNotFoundException fnfe) { System.Console.Out.WriteLine("Good: got " + fnfe); Sharpen.Runtime.PrintStackTrace(fnfe, System.Console.Out); } finally { IOUtils.CloseStream(@out); } } { // test append permission. //set root to all writable Path root = new Path("/"); fs.SetPermission(root, new FsPermission((short)0x1ff)); fs.Close(); // login as a different user UserGroupInformation superuser = UserGroupInformation.GetCurrentUser(); string username = "******"; string group = "testappendgroup"; NUnit.Framework.Assert.IsFalse(superuser.GetShortUserName().Equals(username)); NUnit.Framework.Assert.IsFalse(Arrays.AsList(superuser.GetGroupNames()).Contains( group)); UserGroupInformation appenduser = UserGroupInformation.CreateUserForTesting(username , new string[] { group }); fs = DFSTestUtil.GetFileSystemAs(appenduser, conf); // create a file Path dir = new Path(root, GetType().Name); Path foo = new Path(dir, "foo.dat"); FSDataOutputStream @out = null; int offset = 0; try { @out = fs.Create(foo); int len = 10 + AppendTestUtil.NextInt(100); @out.Write(fileContents, offset, len); offset += len; } finally { IOUtils.CloseStream(@out); } // change dir and foo to minimal permissions. fs.SetPermission(dir, new FsPermission((short)0x40)); fs.SetPermission(foo, new FsPermission((short)0x80)); // try append, should success @out = null; try { @out = fs.Append(foo); int len = 10 + AppendTestUtil.NextInt(100); @out.Write(fileContents, offset, len); offset += len; } finally { IOUtils.CloseStream(@out); } // change dir and foo to all but no write on foo. fs.SetPermission(foo, new FsPermission((short)0x17f)); fs.SetPermission(dir, new FsPermission((short)0x1ff)); // try append, should fail @out = null; try { @out = fs.Append(foo); NUnit.Framework.Assert.Fail("Expected to have AccessControlException"); } catch (AccessControlException ace) { System.Console.Out.WriteLine("Good: got " + ace); Sharpen.Runtime.PrintStackTrace(ace, System.Console.Out); } finally { IOUtils.CloseStream(@out); } } } catch (IOException e) { System.Console.Out.WriteLine("Exception :" + e); throw; } catch (Exception e) { System.Console.Out.WriteLine("Throwable :" + e); Sharpen.Runtime.PrintStackTrace(e); throw new IOException("Throwable : " + e); } finally { fs.Close(); cluster.Shutdown(); } }
// create a bunch of files. Write to them and then verify. public override void Run() { System.Console.Out.WriteLine("Workload " + this.id + " starting... "); for (int i = 0; i < this._enclosing.numAppendsPerThread; i++) { // pick a file at random and remove it from pool Path testfile; lock (this._enclosing.testFiles) { if (this._enclosing.testFiles.Count == 0) { System.Console.Out.WriteLine("Completed write to almost all files."); return; } int index = AppendTestUtil.NextInt(this._enclosing.testFiles.Count); testfile = this._enclosing.testFiles.Remove(index); } long len = 0; int sizeToAppend = 0; try { DistributedFileSystem fs = this.cluster.GetFileSystem(); // add a random number of bytes to file len = fs.GetFileStatus(testfile).GetLen(); // if file is already full, then pick another file if (len >= AppendTestUtil.FileSize) { System.Console.Out.WriteLine("File " + testfile + " is full."); continue; } // do small size appends so that we can trigger multiple // appends to the same file. // int left = (int)(AppendTestUtil.FileSize - len) / 3; if (left <= 0) { left = 1; } sizeToAppend = AppendTestUtil.NextInt(left); System.Console.Out.WriteLine("Workload thread " + this.id + " appending " + sizeToAppend + " bytes " + " to file " + testfile + " of size " + len); FSDataOutputStream stm = this.appendToNewBlock ? fs.Append(testfile, EnumSet.Of(CreateFlag .Append, CreateFlag.NewBlock), 4096, null) : fs.Append(testfile); stm.Write(this._enclosing.fileContents, (int)len, sizeToAppend); stm.Close(); // wait for the file size to be reflected in the namenode metadata while (fs.GetFileStatus(testfile).GetLen() != (len + sizeToAppend)) { try { System.Console.Out.WriteLine("Workload thread " + this.id + " file " + testfile + " size " + fs.GetFileStatus(testfile).GetLen() + " expected size " + (len + sizeToAppend ) + " waiting for namenode metadata update."); Sharpen.Thread.Sleep(5000); } catch (Exception) { } } NUnit.Framework.Assert.IsTrue("File " + testfile + " size is " + fs.GetFileStatus (testfile).GetLen() + " but expected " + (len + sizeToAppend), fs.GetFileStatus( testfile).GetLen() == (len + sizeToAppend)); AppendTestUtil.CheckFullFile(fs, testfile, (int)(len + sizeToAppend), this._enclosing .fileContents, "Read 2"); } catch (Exception e) { TestFileAppend2.globalStatus = false; if (e.ToString() != null) { System.Console.Out.WriteLine("Workload exception " + this.id + " testfile " + testfile + " " + e); Sharpen.Runtime.PrintStackTrace(e); } NUnit.Framework.Assert.IsTrue("Workload exception " + this.id + " testfile " + testfile + " expected size " + (len + sizeToAppend), false); } // Add testfile back to the pool of files. lock (this._enclosing.testFiles) { this._enclosing.testFiles.AddItem(testfile); } } }
public virtual void TestSoftLeaseRecovery() { IDictionary <string, string[]> u2g_map = new Dictionary <string, string[]>(1); u2g_map[fakeUsername] = new string[] { fakeGroup }; DFSTestUtil.UpdateConfWithFakeGroupMapping(conf, u2g_map); // Reset default lease periods cluster.SetLeasePeriod(HdfsConstants.LeaseSoftlimitPeriod, HdfsConstants.LeaseHardlimitPeriod ); //create a file // create a random file name string filestr = "/foo" + AppendTestUtil.NextInt(); AppendTestUtil.Log.Info("filestr=" + filestr); Path filepath = new Path(filestr); FSDataOutputStream stm = dfs.Create(filepath, true, BufSize, ReplicationNum, BlockSize ); NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr)); // write random number of bytes into it. int size = AppendTestUtil.NextInt(FileSize); AppendTestUtil.Log.Info("size=" + size); stm.Write(buffer, 0, size); // hflush file AppendTestUtil.Log.Info("hflush"); stm.Hflush(); AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()"); dfs.dfs.GetLeaseRenewer().InterruptAndJoin(); // set the soft limit to be 1 second so that the // namenode triggers lease recovery on next attempt to write-for-open. cluster.SetLeasePeriod(ShortLeasePeriod, LongLeasePeriod); { // try to re-open the file before closing the previous handle. This // should fail but will trigger lease recovery. UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(fakeUsername , new string[] { fakeGroup }); FileSystem dfs2 = DFSTestUtil.GetFileSystemAs(ugi, conf); bool done = false; for (int i = 0; i < 10 && !done; i++) { AppendTestUtil.Log.Info("i=" + i); try { dfs2.Create(filepath, false, BufSize, ReplicationNum, BlockSize); NUnit.Framework.Assert.Fail("Creation of an existing file should never succeed."); } catch (FileAlreadyExistsException) { done = true; } catch (AlreadyBeingCreatedException ex) { AppendTestUtil.Log.Info("GOOD! got " + ex.Message); } catch (IOException ioe) { AppendTestUtil.Log.Warn("UNEXPECTED IOException", ioe); } if (!done) { AppendTestUtil.Log.Info("sleep " + 5000 + "ms"); try { Sharpen.Thread.Sleep(5000); } catch (Exception) { } } } NUnit.Framework.Assert.IsTrue(done); } AppendTestUtil.Log.Info("Lease for file " + filepath + " is recovered. " + "Validating its contents now..." ); // verify that file-size matches long fileSize = dfs.GetFileStatus(filepath).GetLen(); NUnit.Framework.Assert.IsTrue("File should be " + size + " bytes, but is actually " + " found to be " + fileSize + " bytes", fileSize == size); // verify data AppendTestUtil.Log.Info("File size is good. " + "Now validating data and sizes from datanodes..." ); AppendTestUtil.CheckFullFile(dfs, filepath, size, buffer, filestr); }
/// <exception cref="System.Exception"/> public virtual void HardLeaseRecoveryRestartHelper(bool doRename, int size) { if (size < 0) { size = AppendTestUtil.NextInt(FileSize + 1); } //create a file string fileStr = "/hardLeaseRecovery"; AppendTestUtil.Log.Info("filestr=" + fileStr); Path filePath = new Path(fileStr); FSDataOutputStream stm = dfs.Create(filePath, true, BufSize, ReplicationNum, BlockSize ); NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(fileStr)); // write bytes into the file. AppendTestUtil.Log.Info("size=" + size); stm.Write(buffer, 0, size); string originalLeaseHolder = NameNodeAdapter.GetLeaseHolderForPath(cluster.GetNameNode (), fileStr); NUnit.Framework.Assert.IsFalse("original lease holder should not be the NN", originalLeaseHolder .Equals(HdfsServerConstants.NamenodeLeaseHolder)); // hflush file AppendTestUtil.Log.Info("hflush"); stm.Hflush(); // check visible length HdfsDataInputStream @in = (HdfsDataInputStream)dfs.Open(filePath); NUnit.Framework.Assert.AreEqual(size, @in.GetVisibleLength()); @in.Close(); if (doRename) { fileStr += ".renamed"; Path renamedPath = new Path(fileStr); NUnit.Framework.Assert.IsTrue(dfs.Rename(filePath, renamedPath)); filePath = renamedPath; } // kill the lease renewal thread AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()"); dfs.dfs.GetLeaseRenewer().InterruptAndJoin(); // Make sure the DNs don't send a heartbeat for a while, so the blocks // won't actually get completed during lease recovery. foreach (DataNode dn in cluster.GetDataNodes()) { DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true); } // set the hard limit to be 1 second cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod); // Make sure lease recovery begins. Sharpen.Thread.Sleep(HdfsServerConstants.NamenodeLeaseRecheckInterval * 2); CheckLease(fileStr, size); cluster.RestartNameNode(false); CheckLease(fileStr, size); // Let the DNs send heartbeats again. foreach (DataNode dn_1 in cluster.GetDataNodes()) { DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn_1, false); } cluster.WaitActive(); // set the hard limit to be 1 second, to initiate lease recovery. cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod); // wait for lease recovery to complete LocatedBlocks locatedBlocks; do { Sharpen.Thread.Sleep(ShortLeasePeriod); locatedBlocks = dfs.dfs.GetLocatedBlocks(fileStr, 0L, size); }while (locatedBlocks.IsUnderConstruction()); NUnit.Framework.Assert.AreEqual(size, locatedBlocks.GetFileLength()); // make sure that the client can't write data anymore. try { stm.Write('b'); stm.Hflush(); NUnit.Framework.Assert.Fail("Should not be able to flush after we've lost the lease" ); } catch (IOException e) { Log.Info("Expceted exception on write/hflush", e); } try { stm.Close(); NUnit.Framework.Assert.Fail("Should not be able to close after we've lost the lease" ); } catch (IOException e) { Log.Info("Expected exception on close", e); } // verify data AppendTestUtil.Log.Info("File size is good. Now validating sizes from datanodes..." ); AppendTestUtil.CheckFullFile(dfs, filePath, size, buffer, fileStr); }
public virtual void TestHFlushInterrupted() { int DatanodeNum = 2; int fileLen = 6; byte[] fileContents = AppendTestUtil.InitBuffer(fileLen); Configuration conf = new HdfsConfiguration(); Path p = new Path("/hflush-interrupted"); System.Console.Out.WriteLine("p=" + p); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeNum ).Build(); try { DistributedFileSystem fs = cluster.GetFileSystem(); // create a new file. FSDataOutputStream stm = AppendTestUtil.CreateFile(fs, p, DatanodeNum); stm.Write(fileContents, 0, 2); Sharpen.Thread.CurrentThread().Interrupt(); try { stm.Hflush(); // If we made it past the hflush(), then that means that the ack made it back // from the pipeline before we got to the wait() call. In that case we should // still have interrupted status. NUnit.Framework.Assert.IsTrue(Sharpen.Thread.Interrupted()); } catch (ThreadInterruptedException) { System.Console.Out.WriteLine("Got expected exception during flush"); } NUnit.Framework.Assert.IsFalse(Sharpen.Thread.Interrupted()); // Try again to flush should succeed since we no longer have interrupt status stm.Hflush(); // Write some more data and flush stm.Write(fileContents, 2, 2); stm.Hflush(); // Write some data and close while interrupted stm.Write(fileContents, 4, 2); Sharpen.Thread.CurrentThread().Interrupt(); try { stm.Close(); // If we made it past the close(), then that means that the ack made it back // from the pipeline before we got to the wait() call. In that case we should // still have interrupted status. NUnit.Framework.Assert.IsTrue(Sharpen.Thread.Interrupted()); } catch (ThreadInterruptedException) { System.Console.Out.WriteLine("Got expected exception during close"); // If we got the exception, we shouldn't have interrupted status anymore. NUnit.Framework.Assert.IsFalse(Sharpen.Thread.Interrupted()); // Now do a successful close. stm.Close(); } // verify that entire file is good AppendTestUtil.CheckFullFile(fs, p, 4, fileContents, "Failed to deal with thread interruptions" , false); } finally { cluster.Shutdown(); } }
public virtual void TestMultiAppend2() { Configuration conf = new HdfsConfiguration(); conf.Set("dfs.client.block.write.replace-datanode-on-failure.enable", "false"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); DistributedFileSystem fs = null; string hello = "hello\n"; try { fs = cluster.GetFileSystem(); Path path = new Path("/test"); FSDataOutputStream @out = fs.Create(path); @out.WriteBytes(hello); @out.Close(); // stop one datanode MiniDFSCluster.DataNodeProperties dnProp = cluster.StopDataNode(0); string dnAddress = dnProp.datanode.GetXferAddress().ToString(); if (dnAddress.StartsWith("/")) { dnAddress = Sharpen.Runtime.Substring(dnAddress, 1); } // append again to bump genstamps for (int i = 0; i < 2; i++) { @out = fs.Append(path, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock), 4096, null); @out.WriteBytes(hello); @out.Close(); } // re-open and make the block state as underconstruction @out = fs.Append(path, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock), 4096, null); cluster.RestartDataNode(dnProp, true); // wait till the block report comes Sharpen.Thread.Sleep(2000); @out.WriteBytes(hello); @out.Close(); // check the block locations LocatedBlocks blocks = fs.GetClient().GetLocatedBlocks(path.ToString(), 0L); // since we append the file 3 time, we should be 4 blocks NUnit.Framework.Assert.AreEqual(4, blocks.GetLocatedBlocks().Count); foreach (LocatedBlock block in blocks.GetLocatedBlocks()) { NUnit.Framework.Assert.AreEqual(hello.Length, block.GetBlockSize()); } StringBuilder sb = new StringBuilder(); for (int i_1 = 0; i_1 < 4; i_1++) { sb.Append(hello); } byte[] content = Sharpen.Runtime.GetBytesForString(sb.ToString()); AppendTestUtil.CheckFullFile(fs, path, content.Length, content, "Read /test"); // restart namenode to make sure the editlog can be properly applied cluster.RestartNameNode(true); cluster.WaitActive(); AppendTestUtil.CheckFullFile(fs, path, content.Length, content, "Read /test"); blocks = fs.GetClient().GetLocatedBlocks(path.ToString(), 0L); // since we append the file 3 time, we should be 4 blocks NUnit.Framework.Assert.AreEqual(4, blocks.GetLocatedBlocks().Count); foreach (LocatedBlock block_1 in blocks.GetLocatedBlocks()) { NUnit.Framework.Assert.AreEqual(hello.Length, block_1.GetBlockSize()); } } finally { IOUtils.CloseStream(fs); cluster.Shutdown(); } }