public virtual void EnsureSerialNumbersNeverOverlap() { BlockTokenSecretManager btsm1 = cluster.GetNamesystem(0).GetBlockManager().GetBlockTokenSecretManager (); BlockTokenSecretManager btsm2 = cluster.GetNamesystem(1).GetBlockManager().GetBlockTokenSecretManager (); btsm1.SetSerialNo(0); btsm2.SetSerialNo(0); NUnit.Framework.Assert.IsFalse(btsm1.GetSerialNoForTesting() == btsm2.GetSerialNoForTesting ()); btsm1.SetSerialNo(int.MaxValue); btsm2.SetSerialNo(int.MaxValue); NUnit.Framework.Assert.IsFalse(btsm1.GetSerialNoForTesting() == btsm2.GetSerialNoForTesting ()); btsm1.SetSerialNo(int.MinValue); btsm2.SetSerialNo(int.MinValue); NUnit.Framework.Assert.IsFalse(btsm1.GetSerialNoForTesting() == btsm2.GetSerialNoForTesting ()); btsm1.SetSerialNo(int.MaxValue / 2); btsm2.SetSerialNo(int.MaxValue / 2); NUnit.Framework.Assert.IsFalse(btsm1.GetSerialNoForTesting() == btsm2.GetSerialNoForTesting ()); btsm1.SetSerialNo(int.MinValue / 2); btsm2.SetSerialNo(int.MinValue / 2); NUnit.Framework.Assert.IsFalse(btsm1.GetSerialNoForTesting() == btsm2.GetSerialNoForTesting ()); }
/// <exception cref="System.IO.IOException"/> public KeyManager(string blockpoolID, NamenodeProtocol namenode, bool encryptDataTransfer , Configuration conf) { this.namenode = namenode; this.encryptDataTransfer = encryptDataTransfer; ExportedBlockKeys keys = namenode.GetBlockKeys(); this.isBlockTokenEnabled = keys.IsBlockTokenEnabled(); if (isBlockTokenEnabled) { long updateInterval = keys.GetKeyUpdateInterval(); long tokenLifetime = keys.GetTokenLifetime(); Log.Info("Block token params received from NN: update interval=" + StringUtils.FormatTime (updateInterval) + ", token lifetime=" + StringUtils.FormatTime(tokenLifetime)); string encryptionAlgorithm = conf.Get(DFSConfigKeys.DfsDataEncryptionAlgorithmKey ); this.blockTokenSecretManager = new BlockTokenSecretManager(updateInterval, tokenLifetime , blockpoolID, encryptionAlgorithm); this.blockTokenSecretManager.AddKeys(keys); // sync block keys with NN more frequently than NN updates its block keys this.blockKeyUpdater = new KeyManager.BlockKeyUpdater(this, updateInterval / 4); this.shouldRun = true; } else { this.blockTokenSecretManager = null; this.blockKeyUpdater = null; } }
private static void LowerKeyUpdateIntervalAndClearKeys(FSNamesystem namesystem) { BlockTokenSecretManager btsm = namesystem.GetBlockManager().GetBlockTokenSecretManager (); btsm.SetKeyUpdateIntervalForTesting(2 * 1000); btsm.SetTokenLifetime(2 * 1000); btsm.ClearAllKeysForTesting(); }
public virtual void TestWrite() { MiniDFSCluster cluster = null; int numDataNodes = 2; Configuration conf = GetConf(numDataNodes); try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build(); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); NameNode nn = cluster.GetNameNode(); BlockManager bm = nn.GetNamesystem().GetBlockManager(); BlockTokenSecretManager sm = bm.GetBlockTokenSecretManager(); // set a short token lifetime (1 second) SecurityTestUtil.SetBlockTokenLifetime(sm, 1000L); Path fileToWrite = new Path(FileToWrite); FileSystem fs = cluster.GetFileSystem(); FSDataOutputStream stm = WriteFile(fs, fileToWrite, (short)numDataNodes, BlockSize ); // write a partial block int mid = rawData.Length - 1; stm.Write(rawData, 0, mid); stm.Hflush(); /* * wait till token used in stm expires */ Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token = DFSTestUtil. GetBlockToken(stm); while (!SecurityTestUtil.IsBlockTokenExpired(token)) { try { Sharpen.Thread.Sleep(10); } catch (Exception) { } } // remove a datanode to force re-establishing pipeline cluster.StopDataNode(0); // write the rest of the file stm.Write(rawData, mid, rawData.Length - mid); stm.Close(); // check if write is successful FSDataInputStream in4 = fs.Open(fileToWrite); NUnit.Framework.Assert.IsTrue(CheckFile1(in4)); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestLongLivedClient() { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).Build(); FileSystem fs = GetFileSystem(conf); WriteTestDataToFile(fs); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); FileChecksum checksum = fs.GetFileChecksum(TestPath); fs.Close(); cluster.Shutdown(); SetEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf).ManageDataDfsDirs(false).ManageNameDfsDirs (false).Format(false).StartupOption(HdfsServerConstants.StartupOption.Regular).Build (); BlockTokenSecretManager btsm = cluster.GetNamesystem().GetBlockManager().GetBlockTokenSecretManager (); btsm.SetKeyUpdateIntervalForTesting(2 * 1000); btsm.SetTokenLifetime(2 * 1000); btsm.ClearAllKeysForTesting(); fs = GetFileSystem(conf); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); NUnit.Framework.Assert.AreEqual(checksum, fs.GetFileChecksum(TestPath)); // Sleep for 15 seconds, after which the encryption key will no longer be // valid. It needs to be a few multiples of the block token lifetime, // since several block tokens are valid at any given time (the current // and the last two, by default.) Log.Info("Sleeping so that encryption keys expire..."); Sharpen.Thread.Sleep(15 * 1000); Log.Info("Done sleeping."); NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath)); NUnit.Framework.Assert.AreEqual(checksum, fs.GetFileChecksum(TestPath)); fs.Close(); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestRead() { MiniDFSCluster cluster = null; int numDataNodes = 2; Configuration conf = GetConf(numDataNodes); try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build(); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); NameNode nn = cluster.GetNameNode(); NamenodeProtocols nnProto = nn.GetRpcServer(); BlockManager bm = nn.GetNamesystem().GetBlockManager(); BlockTokenSecretManager sm = bm.GetBlockTokenSecretManager(); // set a short token lifetime (1 second) initially SecurityTestUtil.SetBlockTokenLifetime(sm, 1000L); Path fileToRead = new Path(FileToRead); FileSystem fs = cluster.GetFileSystem(); CreateFile(fs, fileToRead); /* * setup for testing expiration handling of cached tokens */ // read using blockSeekTo(). Acquired tokens are cached in in1 FSDataInputStream in1 = fs.Open(fileToRead); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // read using blockSeekTo(). Acquired tokens are cached in in2 FSDataInputStream in2 = fs.Open(fileToRead); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // read using fetchBlockByteRange(). Acquired tokens are cached in in3 FSDataInputStream in3 = fs.Open(fileToRead); NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing READ interface on DN using a BlockReader */ DFSClient client = null; try { client = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), conf ); } finally { if (client != null) { client.Close(); } } IList <LocatedBlock> locatedBlocks = nnProto.GetBlockLocations(FileToRead, 0, FileSize ).GetLocatedBlocks(); LocatedBlock lblock = locatedBlocks[0]; // first block Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> myToken = lblock.GetBlockToken (); // verify token is not expired NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(myToken)); // read with valid token, should succeed TryRead(conf, lblock, true); /* * wait till myToken and all cached tokens in in1, in2 and in3 expire */ while (!SecurityTestUtil.IsBlockTokenExpired(myToken)) { try { Sharpen.Thread.Sleep(10); } catch (Exception) { } } /* * continue testing READ interface on DN using a BlockReader */ // verify token is expired NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(myToken)); // read should fail TryRead(conf, lblock, false); // use a valid new token lblock.SetBlockToken(sm.GenerateToken(lblock.GetBlock(), EnumSet.Of(BlockTokenSecretManager.AccessMode .Read))); // read should succeed TryRead(conf, lblock, true); // use a token with wrong blockID ExtendedBlock wrongBlock = new ExtendedBlock(lblock.GetBlock().GetBlockPoolId(), lblock.GetBlock().GetBlockId() + 1); lblock.SetBlockToken(sm.GenerateToken(wrongBlock, EnumSet.Of(BlockTokenSecretManager.AccessMode .Read))); // read should fail TryRead(conf, lblock, false); // use a token with wrong access modes lblock.SetBlockToken(sm.GenerateToken(lblock.GetBlock(), EnumSet.Of(BlockTokenSecretManager.AccessMode .Write, BlockTokenSecretManager.AccessMode.Copy, BlockTokenSecretManager.AccessMode .Replace))); // read should fail TryRead(conf, lblock, false); // set a long token lifetime for future tokens SecurityTestUtil.SetBlockTokenLifetime(sm, 600 * 1000L); /* * testing that when cached tokens are expired, DFSClient will re-fetch * tokens transparently for READ. */ // confirm all tokens cached in in1 are expired by now IList <LocatedBlock> lblocks = DFSTestUtil.GetAllBlocks(in1); foreach (LocatedBlock blk in lblocks) { NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(blk.GetBlockToken ())); } // verify blockSeekTo() is able to re-fetch token transparently in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // confirm all tokens cached in in2 are expired by now IList <LocatedBlock> lblocks2 = DFSTestUtil.GetAllBlocks(in2); foreach (LocatedBlock blk_1 in lblocks2) { NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(blk_1.GetBlockToken ())); } // verify blockSeekTo() is able to re-fetch token transparently (testing // via another interface method) NUnit.Framework.Assert.IsTrue(in2.SeekToNewSource(0)); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // confirm all tokens cached in in3 are expired by now IList <LocatedBlock> lblocks3 = DFSTestUtil.GetAllBlocks(in3); foreach (LocatedBlock blk_2 in lblocks3) { NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(blk_2.GetBlockToken ())); } // verify fetchBlockByteRange() is able to re-fetch token transparently NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing that after datanodes are restarted on the same ports, cached * tokens should still work and there is no need to fetch new tokens from * namenode. This test should run while namenode is down (to make sure no * new tokens can be fetched from namenode). */ // restart datanodes on the same ports that they currently use NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes(true)); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); cluster.ShutdownNameNode(0); // confirm tokens cached in in1 are still valid lblocks = DFSTestUtil.GetAllBlocks(in1); foreach (LocatedBlock blk_3 in lblocks) { NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(blk_3.GetBlockToken ())); } // verify blockSeekTo() still works (forced to use cached tokens) in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // confirm tokens cached in in2 are still valid lblocks2 = DFSTestUtil.GetAllBlocks(in2); foreach (LocatedBlock blk_4 in lblocks2) { NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(blk_4.GetBlockToken ())); } // verify blockSeekTo() still works (forced to use cached tokens) in2.SeekToNewSource(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // confirm tokens cached in in3 are still valid lblocks3 = DFSTestUtil.GetAllBlocks(in3); foreach (LocatedBlock blk_5 in lblocks3) { NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(blk_5.GetBlockToken ())); } // verify fetchBlockByteRange() still works (forced to use cached tokens) NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing that when namenode is restarted, cached tokens should still * work and there is no need to fetch new tokens from namenode. Like the * previous test, this test should also run while namenode is down. The * setup for this test depends on the previous test. */ // restart the namenode and then shut it down for test cluster.RestartNameNode(0); cluster.ShutdownNameNode(0); // verify blockSeekTo() still works (forced to use cached tokens) in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // verify again blockSeekTo() still works (forced to use cached tokens) in2.SeekToNewSource(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // verify fetchBlockByteRange() still works (forced to use cached tokens) NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing that after both namenode and datanodes got restarted (namenode * first, followed by datanodes), DFSClient can't access DN without * re-fetching tokens and is able to re-fetch tokens transparently. The * setup of this test depends on the previous test. */ // restore the cluster and restart the datanodes for test cluster.RestartNameNode(0); NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes(true)); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); // shutdown namenode so that DFSClient can't get new tokens from namenode cluster.ShutdownNameNode(0); // verify blockSeekTo() fails (cached tokens become invalid) in1.Seek(0); NUnit.Framework.Assert.IsFalse(CheckFile1(in1)); // verify fetchBlockByteRange() fails (cached tokens become invalid) NUnit.Framework.Assert.IsFalse(CheckFile2(in3)); // restart the namenode to allow DFSClient to re-fetch tokens cluster.RestartNameNode(0); // verify blockSeekTo() works again (by transparently re-fetching // tokens from namenode) in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); in2.SeekToNewSource(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // verify fetchBlockByteRange() works again (by transparently // re-fetching tokens from namenode) NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); /* * testing that when datanodes are restarted on different ports, DFSClient * is able to re-fetch tokens transparently to connect to them */ // restart datanodes on newly assigned ports NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes(false)); cluster.WaitActive(); NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count); // verify blockSeekTo() is able to re-fetch token transparently in1.Seek(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in1)); // verify blockSeekTo() is able to re-fetch token transparently in2.SeekToNewSource(0); NUnit.Framework.Assert.IsTrue(CheckFile1(in2)); // verify fetchBlockByteRange() is able to re-fetch token transparently NUnit.Framework.Assert.IsTrue(CheckFile2(in3)); } finally { if (cluster != null) { cluster.Shutdown(); } } }