public virtual void TestMoveSpecificPaths() { Log.Info("testMoveSpecificPaths"); Path foo = new Path("/foo"); Path barFile = new Path(foo, "bar"); Path foo2 = new Path("/foo2"); Path bar2File = new Path(foo2, "bar2"); IDictionary <Path, BlockStoragePolicy> policyMap = Maps.NewHashMap(); policyMap[foo] = Cold; policyMap[foo2] = Warm; TestStorageMover.NamespaceScheme nsScheme = new TestStorageMover.NamespaceScheme( Arrays.AsList(foo, foo2), Arrays.AsList(barFile, bar2File), BlockSize, null, policyMap ); TestStorageMover.ClusterScheme clusterScheme = new TestStorageMover.ClusterScheme (DefaultConf, NumDatanodes, Repl, GenStorageTypes(NumDatanodes), null); TestStorageMover.MigrationTest test = new TestStorageMover.MigrationTest(this, clusterScheme , nsScheme); test.SetupCluster(); try { test.PrepareNamespace(); test.SetStoragePolicy(); IDictionary <URI, IList <Path> > map = Mover.Cli.GetNameNodePathsToMove(test.conf, "-p" , "/foo/bar", "/foo2"); int result = Org.Apache.Hadoop.Hdfs.Server.Mover.Mover.Run(map, test.conf); NUnit.Framework.Assert.AreEqual(ExitStatus.Success.GetExitCode(), result); Sharpen.Thread.Sleep(5000); test.Verify(true); } finally { test.ShutdownCluster(); } }
internal MigrationTest(TestStorageMover _enclosing, TestStorageMover.ClusterScheme cScheme, TestStorageMover.NamespaceScheme nsScheme) { this._enclosing = _enclosing; this.clusterScheme = cScheme; this.nsScheme = nsScheme; this.conf = this.clusterScheme.conf; this.policies = TestStorageMover.DefaultPolicies; }
public virtual void TestNoSpaceArchive() { Log.Info("testNoSpaceArchive"); TestStorageMover.PathPolicyMap pathPolicyMap = new TestStorageMover.PathPolicyMap (0); TestStorageMover.NamespaceScheme nsScheme = pathPolicyMap.NewNamespaceScheme(); TestStorageMover.ClusterScheme clusterScheme = new TestStorageMover.ClusterScheme (DefaultConf, NumDatanodes, Repl, GenStorageTypes(NumDatanodes), null); TestStorageMover.MigrationTest test = new TestStorageMover.MigrationTest(this, clusterScheme , nsScheme); try { test.RunBasicTest(false); // create 2 hot files with replication 3 short replication = 3; for (int i = 0; i < 2; i++) { Path p = new Path(pathPolicyMap.cold, "file" + i); DFSTestUtil.CreateFile(test.dfs, p, BlockSize, replication, 0L); WaitForAllReplicas(replication, p, test.dfs); } // set all the ARCHIVE volume to full foreach (DataNode dn in test.cluster.GetDataNodes()) { SetVolumeFull(dn, StorageType.Archive); DataNodeTestUtils.TriggerHeartbeat(dn); } { // test increasing replication but new replicas cannot be created // since no more ARCHIVE space. Path file0 = new Path(pathPolicyMap.cold, "file0"); TestStorageMover.Replication r = test.GetReplication(file0); NUnit.Framework.Assert.AreEqual(0, r.disk); short newReplication = (short)5; test.dfs.SetReplication(file0, newReplication); Sharpen.Thread.Sleep(10000); test.VerifyReplication(file0, 0, r.archive); } { // test creating a hot file Path p = new Path(pathPolicyMap.hot, "foo"); DFSTestUtil.CreateFile(test.dfs, p, BlockSize, (short)3, 0L); } { //test move a cold file to warm Path file1 = new Path(pathPolicyMap.cold, "file1"); test.dfs.Rename(file1, pathPolicyMap.warm); test.Migrate(); test.Verify(true); } } finally { test.ShutdownCluster(); } }
public virtual void TestNoSpaceDisk() { Log.Info("testNoSpaceDisk"); TestStorageMover.PathPolicyMap pathPolicyMap = new TestStorageMover.PathPolicyMap (0); TestStorageMover.NamespaceScheme nsScheme = pathPolicyMap.NewNamespaceScheme(); Configuration conf = new Configuration(DefaultConf); TestStorageMover.ClusterScheme clusterScheme = new TestStorageMover.ClusterScheme (conf, NumDatanodes, Repl, GenStorageTypes(NumDatanodes), null); TestStorageMover.MigrationTest test = new TestStorageMover.MigrationTest(this, clusterScheme , nsScheme); try { test.RunBasicTest(false); // create 2 hot files with replication 3 short replication = 3; for (int i = 0; i < 2; i++) { Path p = new Path(pathPolicyMap.hot, "file" + i); DFSTestUtil.CreateFile(test.dfs, p, BlockSize, replication, 0L); WaitForAllReplicas(replication, p, test.dfs); } // set all the DISK volume to full foreach (DataNode dn in test.cluster.GetDataNodes()) { SetVolumeFull(dn, StorageType.Disk); DataNodeTestUtils.TriggerHeartbeat(dn); } // test increasing replication. Since DISK is full, // new replicas should be stored in ARCHIVE as a fallback storage. Path file0 = new Path(pathPolicyMap.hot, "file0"); TestStorageMover.Replication r = test.GetReplication(file0); short newReplication = (short)5; test.dfs.SetReplication(file0, newReplication); Sharpen.Thread.Sleep(10000); test.VerifyReplication(file0, r.disk, newReplication - r.disk); // test creating a cold file and then increase replication Path p_1 = new Path(pathPolicyMap.cold, "foo"); DFSTestUtil.CreateFile(test.dfs, p_1, BlockSize, replication, 0L); test.VerifyReplication(p_1, 0, replication); test.dfs.SetReplication(p_1, newReplication); Sharpen.Thread.Sleep(10000); test.VerifyReplication(p_1, 0, newReplication); //test move a hot file to warm Path file1 = new Path(pathPolicyMap.hot, "file1"); test.dfs.Rename(file1, pathPolicyMap.warm); test.Migrate(); test.VerifyFile(new Path(pathPolicyMap.warm, "file1"), Warm.GetId()); } finally { test.ShutdownCluster(); } }
public virtual void TestMigrateFileToArchival() { Log.Info("testMigrateFileToArchival"); Path foo = new Path("/foo"); IDictionary <Path, BlockStoragePolicy> policyMap = Maps.NewHashMap(); policyMap[foo] = Cold; TestStorageMover.NamespaceScheme nsScheme = new TestStorageMover.NamespaceScheme( null, Arrays.AsList(foo), 2 * BlockSize, null, policyMap); TestStorageMover.ClusterScheme clusterScheme = new TestStorageMover.ClusterScheme (DefaultConf, NumDatanodes, Repl, GenStorageTypes(NumDatanodes), null); new TestStorageMover.MigrationTest(this, clusterScheme, nsScheme).RunBasicTest(true ); }
public virtual void TestHotWarmColdDirs() { Log.Info("testHotWarmColdDirs"); TestStorageMover.PathPolicyMap pathPolicyMap = new TestStorageMover.PathPolicyMap (3); TestStorageMover.NamespaceScheme nsScheme = pathPolicyMap.NewNamespaceScheme(); TestStorageMover.ClusterScheme clusterScheme = new TestStorageMover.ClusterScheme (); TestStorageMover.MigrationTest test = new TestStorageMover.MigrationTest(this, clusterScheme , nsScheme); try { test.RunBasicTest(false); pathPolicyMap.MoveAround(test.dfs); test.Migrate(); test.Verify(true); } finally { test.ShutdownCluster(); } }
public virtual void TestMigrateOpenFileToArchival() { Log.Info("testMigrateOpenFileToArchival"); Path fooDir = new Path("/foo"); IDictionary <Path, BlockStoragePolicy> policyMap = Maps.NewHashMap(); policyMap[fooDir] = Cold; TestStorageMover.NamespaceScheme nsScheme = new TestStorageMover.NamespaceScheme( Arrays.AsList(fooDir), null, BlockSize, null, policyMap); TestStorageMover.ClusterScheme clusterScheme = new TestStorageMover.ClusterScheme (DefaultConf, NumDatanodes, Repl, GenStorageTypes(NumDatanodes), null); TestStorageMover.MigrationTest test = new TestStorageMover.MigrationTest(this, clusterScheme , nsScheme); test.SetupCluster(); // create an open file Banner("writing to file /foo/bar"); Path barFile = new Path(fooDir, "bar"); DFSTestUtil.CreateFile(test.dfs, barFile, BlockSize, (short)1, 0L); FSDataOutputStream @out = test.dfs.Append(barFile); @out.WriteBytes("hello, "); ((DFSOutputStream)@out.GetWrappedStream()).Hsync(); try { Banner("start data migration"); test.SetStoragePolicy(); // set /foo to COLD test.Migrate(); // make sure the under construction block has not been migrated LocatedBlocks lbs = test.dfs.GetClient().GetLocatedBlocks(barFile.ToString(), BlockSize ); Log.Info("Locations: " + lbs); IList <LocatedBlock> blks = lbs.GetLocatedBlocks(); NUnit.Framework.Assert.AreEqual(1, blks.Count); NUnit.Framework.Assert.AreEqual(1, blks[0].GetLocations().Length); Banner("finish the migration, continue writing"); // make sure the writing can continue @out.WriteBytes("world!"); ((DFSOutputStream)@out.GetWrappedStream()).Hsync(); IOUtils.Cleanup(Log, @out); lbs = test.dfs.GetClient().GetLocatedBlocks(barFile.ToString(), BlockSize); Log.Info("Locations: " + lbs); blks = lbs.GetLocatedBlocks(); NUnit.Framework.Assert.AreEqual(1, blks.Count); NUnit.Framework.Assert.AreEqual(1, blks[0].GetLocations().Length); Banner("finish writing, starting reading"); // check the content of /foo/bar FSDataInputStream @in = test.dfs.Open(barFile); byte[] buf = new byte[13]; // read from offset 1024 @in.ReadFully(BlockSize, buf, 0, buf.Length); IOUtils.Cleanup(Log, @in); NUnit.Framework.Assert.AreEqual("hello, world!", Sharpen.Runtime.GetStringForBytes (buf)); } finally { test.ShutdownCluster(); } }