public override void Run() { int i = 0; try { Sleep(sleepms); for (; running; i++) { Log.Info(GetName() + " writes " + i); @out.Write(i); @out.Hflush(); Sleep(sleepms); } } catch (Exception e) { Log.Info(GetName() + " interrupted:" + e); } catch (IOException e) { throw new RuntimeException(GetName(), e); } finally { Log.Info(GetName() + " terminated: i=" + i); } }
/// <exception cref="System.Exception"/> public virtual void TestMaxBlocksPerFileLimit() { Configuration conf = new HdfsConfiguration(); // Make a small block size and a low limit long blockSize = 4096; long numBlocks = 2; conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, blockSize); conf.SetLong(DFSConfigKeys.DfsNamenodeMaxBlocksPerFileKey, numBlocks); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = cluster.GetFileSystem(); HdfsDataOutputStream fout = (HdfsDataOutputStream)fs.Create(new Path("/testmaxfilelimit" )); try { // Write maximum number of blocks fout.Write(new byte[(int)blockSize * (int)numBlocks]); fout.Hflush(); // Try to write one more block try { fout.Write(new byte[1]); fout.Hflush(); System.Diagnostics.Debug.Assert(false, "Expected IOException after writing too many blocks" ); } catch (IOException e) { GenericTestUtils.AssertExceptionContains("File has reached the limit" + " on maximum number of" , e); } } finally { cluster.Shutdown(); } }