public virtual void TestWrite() { MiniDFSCluster cluster = null; int numDataNodes = 2; Configuration conf = GetConf(numDataNodes); try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build(); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); NameNode nn = cluster.GetNameNode(); BlockManager bm = nn.GetNamesystem().GetBlockManager(); BlockTokenSecretManager sm = bm.GetBlockTokenSecretManager(); // set a short token lifetime (1 second) SecurityTestUtil.SetBlockTokenLifetime(sm, 1000L); Path fileToWrite = new Path(FileToWrite); FileSystem fs = cluster.GetFileSystem(); FSDataOutputStream stm = WriteFile(fs, fileToWrite, (short)numDataNodes, BlockSize ); // write a partial block int mid = rawData.Length - 1; stm.Write(rawData, 0, mid); stm.Hflush(); /* * wait till token used in stm expires */ Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token = DFSTestUtil. GetBlockToken(stm); while (!SecurityTestUtil.IsBlockTokenExpired(token)) { try { Sharpen.Thread.Sleep(10); } catch (Exception) { } } // remove a datanode to force re-establishing pipeline cluster.StopDataNode(0); // write the rest of the file stm.Write(rawData, mid, rawData.Length - mid); stm.Close(); // check if write is successful FSDataInputStream in4 = fs.Open(fileToWrite); NUnit.Framework.Assert.IsTrue(CheckFile1(in4)); } finally { if (cluster != null) { cluster.Shutdown(); } } }