static void BlankTest(string[] args) { if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; string dfsxmlpathbackup = dfsxmlpath + "$" + Guid.NewGuid().ToString(); string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); dfs dc = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); string[] allmachines; { string[] sl = dc.Slaves.SlaveList.Split(';'); List <string> aml = new List <string>(sl.Length + 1); aml.Add(masterhost); foreach (string slave in sl) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } { // Test logic: } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); }
static void Kill(string[] args) { if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); dfs dc = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); string[] allmachines; { string[] sl = dc.Slaves.SlaveList.Split(';'); List <string> aml = new List <string>(sl.Length + 1); aml.Add(masterhost); foreach (string slave in sl) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } { Console.WriteLine("Ensure cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); Console.WriteLine("Run job in one thread, kill from another..."); //System.Threading.Thread.Sleep(1000 * 8); string exectempdir = @"\\" + System.Net.Dns.GetHostName() + @"\C$\temp\qizmt\regression_test_Kill-" + Guid.NewGuid().ToString(); if (!System.IO.Directory.Exists(exectempdir)) { System.IO.Directory.CreateDirectory(exectempdir); } string execfp = exectempdir + @"\exec{3EAE6884-28BB-4340-8F94-6E9421B68C92}"; System.IO.File.WriteAllText(execfp, (@"<SourceCode> <Jobs> <Job Name=`exec{3EAE6884-28BB-4340-8F94-6E9421B68C92}_Preprocessing 1`> <IOSettings> <JobType>local</JobType> </IOSettings> <Local> <![CDATA[ public virtual void Local() { // Sleep forever so that kill will take it down. System.Threading.Thread.Sleep(System.Threading.Timeout.Infinite); } ]]> </Local> </Job> <Job Name=`exec{3EAE6884-28BB-4340-8F94-6E9421B68C92}_Preprocessing 2`> <IOSettings> <JobType>local</JobType> </IOSettings> <Local> <![CDATA[ public virtual void Local() { Qizmt_Log(`{DC46FA81-A69F-4d46-9A30-54869168916B}`); } ]]> </Local> </Job> </Jobs> </SourceCode> ").Replace('`', '"')); Exec.Shell("Qizmt importdir " + exectempdir); try { try { System.IO.File.Delete(execfp); System.IO.Directory.Delete(exectempdir); } catch { } bool execdone = false; bool execx = false; System.Threading.Thread execthread = new System.Threading.Thread( new System.Threading.ThreadStart( delegate() { try { Console.WriteLine(" Running exec..."); string output = Exec.Shell("Qizmt exec exec{3EAE6884-28BB-4340-8F94-6E9421B68C92}"); Console.WriteLine("exec output: {0}", output.Trim()); if (-1 != output.IndexOf("{DC46FA81-A69F-4d46-9A30-54869168916B}")) { lock (typeof(Program)) { execx = true; } } } catch (Exception e) { Console.Error.WriteLine("Warning: exec exception: {0}", e.ToString()); } lock (typeof(Program)) { execdone = true; } })); execthread.Start(); // Wait a few seconds to give the exec a chance to get started. System.Threading.Thread.Sleep(1000 * 5); lock (typeof(Program)) { if (execx) { throw new Exception("exec completed; problem with test"); } if (execdone) { throw new Exception("exec finished too early, did not get a chance to call kill"); } } int execjid = 0; string execsjid = "0"; string psexecline = "N/A"; { foreach (string psln in Exec.Shell("qizmt ps").Split('\n')) { if (-1 != psln.IndexOf("exec{3EAE6884-28BB-4340-8F94-6E9421B68C92}")) { psexecline = psln.Trim(); { int isp = psexecline.IndexOf(' '); if (-1 != isp) { execsjid = psexecline.Substring(0, isp); } } break; } } } if (!int.TryParse(execsjid, out execjid) || execjid < 1) { throw new Exception("JID for job not valid: " + execsjid); } execsjid = execjid.ToString(); // Normalize. Console.WriteLine(" Running kill... ({0})", psexecline); Exec.Shell("Qizmt kill " + execsjid); lock (typeof(Program)) { if (execx) { throw new Exception("exec completed; problem with test"); } if (!execdone) { throw new Exception("kill completed but exec has not yet returned"); } } // Wait a couple seconds to give the services a chance to come back up. System.Threading.Thread.Sleep(1000 * 2); } finally { try { Exec.Shell("Qizmt del exec{3EAE6884-28BB-4340-8F94-6E9421B68C92}"); } catch { } } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); } }
static void CacheWithRedundancy(string[] args) { if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; string dfsxmlpathbackup = dfsxmlpath + "$" + Guid.NewGuid().ToString(); string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); dfs olddfs = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); string[] allmachines; { string[] sl = olddfs.Slaves.SlaveList.Split(';'); List <string> aml = new List <string>(sl.Length + 1); aml.Add(masterhost); foreach (string slave in sl) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } Console.WriteLine("Backing up DFS.xml to: {0} ...", dfsxmlpathbackup); try { System.IO.File.Delete(dfsxmlpathbackup); } catch { } System.IO.File.Move(dfsxmlpath, dfsxmlpathbackup); try { { // Note: added replication. Console.WriteLine("Formatting DFS with Replication for test..."); Exec.Shell("Qizmt @format Machines=" + string.Join(",", allmachines) + " Replication=2"); } { // Test logic: Console.WriteLine("Ensure the cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); Console.WriteLine("Adding files to DFS..."); Exec.Shell("Qizmt wordgen Cacher_input 10MB 100"); Console.WriteLine("Generating cache files..."); string cachertempdir = @"\\" + System.Net.Dns.GetHostName() + @"\C$\temp\qizmt\regression_test_Cacher-" + Guid.NewGuid().ToString(); if (!System.IO.Directory.Exists(cachertempdir)) { System.IO.Directory.CreateDirectory(cachertempdir); } string cacherfp = cachertempdir + @"\Cacher"; System.IO.File.WriteAllText(cacherfp, (@"<?xml version=`1.0` encoding=`utf-8`?> <SourceCode> <Jobs> <Job Name=`Cacher`> <Delta> <Name>Cacher_cache</Name> <DFSInput>dfs://Cacher_input</DFSInput> </Delta> <IOSettings> <JobType>mapreduce</JobType> <KeyLength>100</KeyLength> <DFSInput></DFSInput> <DFSOutput>dfs://Cacher_output</DFSOutput> <KeyMajor>8</KeyMajor> <OutputMethod>grouped</OutputMethod> </IOSettings> <MapReduce> <Map> <![CDATA[ public virtual void Map(ByteSlice line, MapOutput output) { output.Add(line, ByteSlice.Prepare()); } ]]> </Map> <Reduce> <![CDATA[ public override void Reduce(ByteSlice key, RandomAccessEntries values, RandomAccessOutput output) { for(int i = 0; i < values.Length; i++) { output.Add(key); } } ]]> </Reduce> </MapReduce> </Job> </Jobs> </SourceCode> ").Replace('`', '"')); Exec.Shell("Qizmt importdir " + cachertempdir); try { System.IO.File.Delete(cacherfp); System.IO.Directory.Delete(cachertempdir); } catch { } Exec.Shell("Qizmt exec Cacher"); // Creates cache file Cacher_cache string cacher_output_sum = DfsSum("Cacher_output"); string cacher_output_md5 = DfsSum("md5", "Cacher_output"); if (cacher_output_sum != DfsSum("Cacher_input")) { throw new Exception("Output file does not have same checksum as input file"); } Exec.Shell("Qizmt del Cacher_output"); Console.WriteLine("Ensure the cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); Console.WriteLine("Validate cache files..."); Exec.Shell("Qizmt exec Cacher"); // Uses existing cache file Cacher_cache if (cacher_output_sum != DfsSum("Cacher_output")) { throw new Exception("Output file not the same when using cache (sum)"); } if (cacher_output_md5 != DfsSum("md5", "Cacher_output")) { throw new Exception("Output file not the same when using cache (md5)"); } Exec.Shell("Qizmt del Cacher_output"); Console.WriteLine("Ensure the cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); } finally { Console.WriteLine("Restoring DFS.xml backup..."); // Note: these are safe; the try/finally only wraps the new dfs. try { Exec.Shell("Qizmt del *"); } catch { } try { // Delete temp dfs.xml, it's being replaced with the good one. System.IO.File.Delete(dfsxmlpath); } catch { } System.IO.File.Move(dfsxmlpathbackup, dfsxmlpath); } }
static void ReduceInitializeThrow(string[] args) { if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; string dfsxmlpathbackup = dfsxmlpath + "$" + Guid.NewGuid().ToString(); string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); dfs dc = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); string[] allmachines; { string[] sl = dc.Slaves.SlaveList.Split(';'); List <string> aml = new List <string>(sl.Length + 1); aml.Add(masterhost); foreach (string slave in sl) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } { // Test logic: { string exectempdir = @"\\" + System.Net.Dns.GetHostName() + @"\C$\temp\qizmt\regression_test_ReduceInitializeThrow-" + Guid.NewGuid().ToString(); if (!System.IO.Directory.Exists(exectempdir)) { System.IO.Directory.CreateDirectory(exectempdir); } string execfp = exectempdir + @"\baderrordup{2E829DB6-E853-4fc2-B0B4-82ADAF637312}"; System.IO.File.WriteAllText(execfp, (@"<?xml version=`1.0` encoding=`utf-8`?> <SourceCode> <Jobs> <Job Name=`PrepJob` Custodian=`` email=``> <IOSettings> <JobType>local</JobType> </IOSettings> <Local> <![CDATA[ public virtual void Local() { Shell(@`Qizmt -dfs del baderrordup{2E829DB6-E853-4fc2-B0B4-82ADAF637312}_*.txt`,true); // Clean previous run. } ]]> </Local> </Job> <Job description=`Load sample data` Name=`baderrordup{2E829DB6-E853-4fc2-B0B4-82ADAF637312}_LoadData` Custodian=`` email=``> <IOSettings> <JobType>remote</JobType> <DFS_IO> <DFSReader></DFSReader> <DFSWriter>dfs://baderrordup{2E829DB6-E853-4fc2-B0B4-82ADAF637312}_Input.txt</DFSWriter> </DFS_IO> </IOSettings> <Remote> <![CDATA[ public virtual void Remote(RemoteInputStream dfsinput, RemoteOutputStream dfsoutput) { dfsoutput.WriteLine(@` MySpace is for everyone: Friends who want to talk Online Single people who want to meet other Singles Matchmakers who want to connect their friends with other friends Families who want to keep in touch--map your Family Tree Business people and co-workers interested in networking Classmates and study partners Anyone looking for long lost friends! `); } ]]> </Remote> </Job> <Job Name=`baderrordup{2E829DB6-E853-4fc2-B0B4-82ADAF637312}` Custodian=`` email=``> <IOSettings> <JobType>mapreduce</JobType> <KeyLength>16</KeyLength> <DFSInput>dfs://baderrordup{2E829DB6-E853-4fc2-B0B4-82ADAF637312}_Input.txt</DFSInput> <DFSOutput>dfs://baderrordup{2E829DB6-E853-4fc2-B0B4-82ADAF637312}_Output.txt</DFSOutput> </IOSettings> <MapReduce> <Map> <![CDATA[ public virtual void Map(ByteSlice line, MapOutput output) { mstring sLine= mstring.Prepare(line); mstringarray parts = sLine.SplitM(' '); for(int i=0; i < parts.Length; i++) { mstring word = parts[i]; if(word.Length > 0 && word.Length <= 16) // Word cannot be longer than the KeyLength! { output.Add(word.ToLowerM(), mstring.Prepare(1)); } } } ]]> </Map> <ReduceInitialize> <![CDATA[ public void ReduceInitialize() { Qizmt_Log(`ReduceInitialize {3AF77789-4D38-4fde-B5DD-DC5115A909F5}`); throw new Exception(`Exception from ReduceInitialize {D5C5B240-537E-4b80-AD70-6463F632EE7B}`); } ]]> </ReduceInitialize> <Reduce> <![CDATA[ public override void Reduce(ByteSlice key, ByteSliceList values, ReduceOutput output) { mstring sLine = mstring.Prepare(UnpadKey(key)); sLine = sLine.AppendM(',').AppendM(values.Length); output.Add(sLine); } ]]> </Reduce> <ReduceFinalize> <![CDATA[ public void ReduceFinalize() { Qizmt_Log(`ReduceFinalize {3C677456-22C5-46cd-A1E7-47383274C0C5}`); } ]]> </ReduceFinalize> </MapReduce> </Job> </Jobs> </SourceCode> ").Replace('`', '"')); Exec.Shell("Qizmt importdir " + exectempdir); try { System.IO.File.Delete(execfp); System.IO.Directory.Delete(exectempdir); } catch { } // Run it twice: first suppressing errors to get stdout, then again to get stderr exception. string output = Exec.Shell("Qizmt exec baderrordup{2E829DB6-E853-4fc2-B0B4-82ADAF637312}", true); try { Exec.Shell("Qizmt exec baderrordup{2E829DB6-E853-4fc2-B0B4-82ADAF637312}"); throw new Exception("<<< Job completed without errors; this is wrong! >>>"); } catch (Exception e) { string err = e.ToString(); string badstr = "System.FormatException: Expected 16 hex digits, got \"-000000000000000\""; if (-1 != err.IndexOf(badstr) || -1 != output.IndexOf(badstr)) { throw new Exception("Test failed: broken protocol!", e); } if (-1 == err.IndexOf("{D5C5B240-537E-4b80-AD70-6463F632EE7B}")) { throw new Exception("Test failed: did not get expected exception from exec", e); } } finally { Exec.Shell("Qizmt del baderrordup{2E829DB6-E853-4fc2-B0B4-82ADAF637312}*"); } } } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); }
static void ReplicationFailover(string[] args) { if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; string dfsxmlpathbackup = dfsxmlpath + "$" + Guid.NewGuid().ToString(); string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); dfs olddfs = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); string[] allmachines; { string[] sl = olddfs.Slaves.SlaveList.Split(';'); List <string> aml = new List <string>(sl.Length + 1); aml.Add(masterhost); foreach (string slave in sl) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } Console.WriteLine("Backing up DFS.xml to: {0} ...", dfsxmlpathbackup); try { System.IO.File.Delete(dfsxmlpathbackup); } catch { } System.IO.File.Move(dfsxmlpath, dfsxmlpathbackup); try { { // Note: added replication and much lower DataNodeBaseSize! Console.WriteLine("Formatting DFS with Replication=3 for test..."); Exec.Shell("Qizmt @format Machines=" + string.Join(",", allmachines) + " Replication=3 DataNodeBaseSize=1048576"); } { // Test logic: if (allmachines.Length < 3) { throw new Exception("This test needs a cluster of at least 3 machines!"); } long XBYTES = (long)4194304 * (long)allmachines.Length; Console.WriteLine("Generating data..."); Console.Write(" "); Exec.Shell("Qizmt gen data{476D6FE8-D645-41cc-83A1-3AB5E2DE23E7} " + (XBYTES / 4).ToString()); Console.Write("25%"); Exec.Shell("Qizmt gen data{61136275-16EC-4ff9-84CE-ACC967550181} " + (XBYTES / 4).ToString()); Console.Write("..50%"); Exec.Shell("Qizmt gen data{C76F6C06-EFC8-4808-B214-DB4D167171EB} " + (XBYTES / 2).ToString()); Console.Write("..100%"); Console.WriteLine(); Console.WriteLine("Ensure the cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); Console.WriteLine("Running job on healthy cluster..."); string exec_md5; { string exectempdir = @"\\" + System.Net.Dns.GetHostName() + @"\C$\temp\qizmt\regression_test_ReplicationFailover-" + Guid.NewGuid().ToString(); if (!System.IO.Directory.Exists(exectempdir)) { System.IO.Directory.CreateDirectory(exectempdir); } string execfp = exectempdir + @"\exec{FA19CAB0-5225-4cc8-8728-9BFC3A1B834C}"; System.IO.File.WriteAllText(execfp, (@"<?xml version=`1.0` encoding=`utf-8`?> <SourceCode> <Jobs> <Job Name=`exec{FA19CAB0-5225-4cc8-8728-9BFC3A1B834C}`> <IOSettings> <JobType>mapreduce</JobType> <KeyLength>100</KeyLength> <DFSInput>dfs://data{*}</DFSInput> <DFSOutput>dfs://output{04454992-E2CD-4342-AEEB-1D0607B32D84}</DFSOutput> <KeyMajor>8</KeyMajor> <OutputMethod>sorted</OutputMethod> </IOSettings> <MapReduce> <Map> <![CDATA[ public virtual void Map(ByteSlice line, MapOutput output) { output.Add(line, ByteSlice.Prepare()); } ]]> </Map> <Reduce> <![CDATA[ public override void Reduce(ByteSlice key, RandomAccessEntries values, RandomAccessOutput output) { for(int i = 0; i < values.Length; i++) { output.Add(key); } } ]]> </Reduce> </MapReduce> </Job> </Jobs> </SourceCode> ").Replace('`', '"')); Exec.Shell("Qizmt importdir " + exectempdir); try { System.IO.File.Delete(execfp); System.IO.Directory.Delete(exectempdir); } catch { } Exec.Shell("Qizmt exec exec{FA19CAB0-5225-4cc8-8728-9BFC3A1B834C}"); exec_md5 = DfsSum("md5", "output{04454992-E2CD-4342-AEEB-1D0607B32D84}"); Exec.Shell("Qizmt del output{04454992-E2CD-4342-AEEB-1D0607B32D84}"); } try { Console.WriteLine("Disrupting 2 machines..."); { string badmachine = allmachines[allmachines.Length - 1]; Console.WriteLine(" Bad disk on {0}", badmachine); string netpath = Surrogate.NetworkPathForHost(badmachine); foreach (System.IO.FileInfo fi in (new System.IO.DirectoryInfo(netpath)).GetFiles("zd.*.zd")) { if (!fi.Name.StartsWith("zd.!.")) { System.IO.File.Move(fi.FullName, fi.DirectoryName + @"\zd.!." + fi.Name.Substring(3)); } } } { string badmachine = allmachines[allmachines.Length - 2]; Console.WriteLine(" Bad network connection on {0}", badmachine); Exec.Shell(@"sc \\" + badmachine + @" stop DistributedObjects"); } Console.WriteLine("Ensure the cluster is NOT perfectly healthy..."); { bool healthy; try { EnsurePerfectQizmtHealtha(); healthy = true; } catch { healthy = false; } if (healthy) { throw new Exception("Cluster is still healthy"); } } Console.WriteLine("Running job on unhealthy cluster..."); { try { Exec.Shell("Qizmt exec exec{FA19CAB0-5225-4cc8-8728-9BFC3A1B834C}"); } catch { // Replication will output a warning and throw an exception, // so we need to ignore that exception. // The MD5 check will ensure it ran fine. } string new_exec_md5 = DfsSum("md5", "output{04454992-E2CD-4342-AEEB-1D0607B32D84}"); Exec.Shell("Qizmt del output{04454992-E2CD-4342-AEEB-1D0607B32D84}"); if (new_exec_md5 != exec_md5) { throw new Exception("Output files from before and after disrupting cluster do not match"); } } } finally { { Console.WriteLine("Repairing disrupted disk"); string badmachine = allmachines[allmachines.Length - 1]; string netpath = Surrogate.NetworkPathForHost(badmachine); foreach (System.IO.FileInfo fi in (new System.IO.DirectoryInfo(netpath)).GetFiles("zd.!.*.zd")) { System.IO.File.Move(fi.FullName, fi.DirectoryName + @"\zd." + fi.Name.Substring(5)); } } } } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); } finally { Console.WriteLine("Restoring DFS.xml backup..."); // Note: these are safe; the try/finally only wraps the new dfs. try { Exec.Shell("Qizmt del *"); } catch { } try { // Delete temp dfs.xml, it's being replaced with the good one. System.IO.File.Delete(dfsxmlpath); } catch { } System.IO.File.Move(dfsxmlpathbackup, dfsxmlpath); { // Note: killall issued to fix disrupted machines Console.WriteLine("Running killall to repair"); Exec.Shell("Qizmt killall -f"); } } }
static void MetaPath(string[] args) { { string thisservicedir = Surrogate.FetchServiceNetworkPath(System.Net.Dns.GetHostName()); string master = Surrogate.LocateMasterHost(thisservicedir); Surrogate.SetNewMasterHost(master); Surrogate.SetNewMetaLocation(thisservicedir); } // NetworkPathForHost works here due to the above. string internalpath = Surrogate.NetworkPathForHost(Surrogate.MasterHost) + @"\" + dfs.DFSXMLNAME; Console.WriteLine("Internal DFS.xml path: {0}", internalpath); string dfsxmlpath; if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { //throw new Exception("Expected path to DFS.xml"); dfsxmlpath = null; } else { dfsxmlpath = args[1]; Console.WriteLine("Command-line path: {0}", dfsxmlpath); } string metapath = Exec.Shell("Qizmt metapath").Trim(); Console.WriteLine("Qizmt metapath path: {0}", metapath); Console.WriteLine("Comparing..."); if (null != dfsxmlpath) { if (!System.IO.File.Exists(dfsxmlpath)) { throw new Exception("Command-line path does not exist: " + dfsxmlpath); } if (!System.IO.File.Exists(metapath)) { throw new Exception("metapath path does not exist: " + metapath); } if (System.IO.File.ReadAllText(dfsxmlpath) != System.IO.File.ReadAllText(metapath)) { throw new Exception("metapath failure: command-line and metapath are not the same"); } } if (!System.IO.File.Exists(internalpath)) { throw new Exception("Internal path does not exist: " + internalpath); } if (!System.IO.File.Exists(metapath)) { throw new Exception("metapath path does not exist: " + metapath); } if (System.IO.File.ReadAllText(internalpath) != System.IO.File.ReadAllText(metapath)) { throw new Exception("metapath failure: internal path and metapath are not the same"); } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); }
static void RemoveSurrogate(string[] args) { if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; string dfsxmlpathbackup = dfsxmlpath + "$" + Guid.NewGuid().ToString(); bool incluster; if (args[2] == "incluster") { incluster = true; } else if (args[2] == "isolated") { incluster = false; } else { throw new Exception("Expected: incluster or isolated"); } string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); string masterslavedat = masterdir + @"\slave.dat"; dfs olddfs = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); List <string> otherhosts = new List <string>(); // Non-surrogate machines, reguardless if participating surrogate or not. foreach (string slave in olddfs.Slaves.SlaveList.Split(';')) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { otherhosts.Add(slave); } } string newmaster = otherhosts[0]; Console.WriteLine("Backing up DFS.xml to: {0} ...", dfsxmlpathbackup); try { System.IO.File.Delete(dfsxmlpathbackup); } catch { } System.IO.File.Move(dfsxmlpath, dfsxmlpathbackup); try { StringBuilder sbmachines = new StringBuilder(1000); if (incluster) { sbmachines.Append(masterhost); } foreach (string host in otherhosts) { if (0 != sbmachines.Length) { sbmachines.Append(','); } sbmachines.Append(host); } Console.WriteLine("Formatting DFS for test..."); Exec.Shell("Qizmt @format Machines=" + sbmachines.ToString()); Console.WriteLine("Adding some files to DFS..."); Console.Write(" "); Exec.Shell("Qizmt bingen 1MB 1MB 50"); Console.Write("10%"); Exec.Shell("Qizmt examples"); Console.Write("..15%"); Exec.Shell("Qizmt wordgen 10MB 10MB 100"); Console.Write("..50%"); Exec.Shell("Qizmt asciigen 50MB 50MB 500"); Console.Write("..100%"); Console.WriteLine(); int ls_output_linecount = Exec.Shell("Qizmt ls").Split('\n').Length; Console.WriteLine("Ensure the cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); Console.WriteLine("Run test job, save output..."); string md5_10MB_output = Exec.Shell("Qizmt md5 10MB"); Console.WriteLine("Removing Surrogate (removemachine {0}) ...", masterhost); Console.WriteLine(Exec.Shell("Qizmt removemachine " + masterhost)); Console.WriteLine("Interface with new surrogate..."); System.IO.File.WriteAllText(masterslavedat, "master=" + newmaster + Environment.NewLine); { // Not comparing contents because of the free disk space line. int new_ls_output_linecount = Exec.Shell("Qizmt ls").Split('\n').Length; if (ls_output_linecount != new_ls_output_linecount) { throw new Exception("Cluster does not contain the same files as before removemachine " + masterdir + ", or problem issuing commands on new surrogate"); } } Console.WriteLine("Ensure the cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); Console.WriteLine("Run test job, confirm output..."); if (md5_10MB_output != Exec.Shell("Qizmt md5 10MB")) { throw new Exception("Test job output does not match previous run"); } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); } finally { Console.WriteLine("Restoring DFS.xml backup..."); // Note: these are safe; the try/finally only wraps the new dfs. try { Exec.Shell("Qizmt del *"); } catch { } try { System.IO.File.Delete(masterslavedat); } catch { } for (int si = 0; si < otherhosts.Count; si++) { try { System.IO.File.Delete(Surrogate.NetworkPathForHost(otherhosts[si]) + @"\slave.dat"); // Deleting dfs.xml should go last because it'll usually fail. System.IO.File.Delete(Surrogate.NetworkPathForHost(otherhosts[si]) + @"\dfs.xml"); } catch { } } try { System.IO.File.Delete(dfsxmlpath); } catch { } try { // Reformat the cluster so stuff like slave.dat is correct... Exec.Shell("Qizmt @format Machines=" + string.Join(",", otherhosts.ToArray())); } catch (Exception exf) { Console.Error.WriteLine("Problem during reformat, there may be an issue with the cluster", exf); } try { // Delete the dfs.xml just written, it's being replaced with the good one. System.IO.File.Delete(dfsxmlpath); } catch { } System.IO.File.Move(dfsxmlpathbackup, dfsxmlpath); } }
static void RemoveMachine(string[] args) { bool two2one = -1 != args[0].IndexOf("2to1", StringComparison.OrdinalIgnoreCase); bool three2two = -1 != args[0].IndexOf("3to2", StringComparison.OrdinalIgnoreCase); if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; string dfsxmlpathbackup = dfsxmlpath + "$" + Guid.NewGuid().ToString(); string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); string masterslavedat = masterdir + @"\slave.dat"; dfs olddfs = dfs.ReadDfsConfig_unlocked(dfsxmlpath); int iarg = 2; string sreplication = "1"; if (args.Length > iarg) { if (args[iarg].StartsWith("#")) { sreplication = args[iarg++].Substring(1); } } string masterhost = System.Net.Dns.GetHostName(); string[] allmachines; { string[] sl = olddfs.Slaves.SlaveList.Split(';'); List <string> aml = new List <string>(sl.Length + 1); aml.Add(masterhost); foreach (string slave in sl) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } if (two2one) { if (allmachines.Length < 2) { throw new Exception("Need >= 2 machines for 2to1"); } allmachines = new string[] { allmachines[0], allmachines[1] }; } if (three2two) { if (allmachines.Length < 3) { throw new Exception("Need >= 3 machines for 3to2"); } allmachines = new string[] { allmachines[0], allmachines[1], allmachines[2] }; } if (allmachines.Length < 2) { throw new Exception("Cluster needs at least 2 machines for this test"); } Console.WriteLine("Backing up DFS.xml to: {0} ...", dfsxmlpathbackup); try { System.IO.File.Delete(dfsxmlpathbackup); } catch { } System.IO.File.Move(dfsxmlpath, dfsxmlpathbackup); try { Console.WriteLine("Formatting DFS for test..."); { string fmtcmd = "Qizmt @format Machines=" + string.Join(",", allmachines) + " Replication=" + sreplication; Console.WriteLine(" {0}", fmtcmd); Exec.Shell(fmtcmd); } Console.WriteLine("Adding some files to DFS..."); Console.Write(" "); Exec.Shell("Qizmt bingen 1MB 1MB 50"); Console.Write("10%"); Exec.Shell("Qizmt examples"); Console.Write("..15%"); Exec.Shell("Qizmt wordgen 10MB 10MB 100"); Console.Write("..50%"); Exec.Shell("Qizmt asciigen 50MB 50MB 500"); Console.Write("..100%"); Console.WriteLine(); int ls_output_linecount = Exec.Shell("Qizmt ls").Split('\n').Length; Console.WriteLine("Ensure the cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); Console.WriteLine("Run test job, save output..."); string md5_10MB_output = Exec.Shell("Qizmt md5 10MB"); string rmachine = allmachines[allmachines.Length - 1]; Console.WriteLine("Removing machine (removemachine {0}) ...", rmachine); Console.WriteLine(Exec.Shell("Qizmt removemachine " + rmachine)); Console.WriteLine("Ensure the cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); Console.WriteLine("Run test job, confirm output..."); if (md5_10MB_output != Exec.Shell("Qizmt md5 10MB")) { throw new Exception("Test job output does not match previous run"); } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); } finally { Console.WriteLine("Restoring DFS.xml backup..."); // Note: these are safe; the try/finally only wraps the new dfs. try { Exec.Shell("Qizmt del *"); } catch { } try { // Delete temp dfs.xml, it's being replaced with the good one. System.IO.File.Delete(dfsxmlpath); } catch { } System.IO.File.Move(dfsxmlpathbackup, dfsxmlpath); } }
static void DfsUpdateStressTest(string[] args) { if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); dfs dc = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); string[] allmachines; { string[] sl = dc.Slaves.SlaveList.Split(';'); List <string> aml = new List <string>(sl.Length + 1); aml.Add(masterhost); foreach (string slave in sl) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } { Console.WriteLine("Ensure cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); Console.WriteLine("Stressing DFS updates..."); //System.Threading.Thread.Sleep(1000 * 8); string exectempdir = @"\\" + System.Net.Dns.GetHostName() + @"\C$\temp\qizmt\regression_test_DfsUpdateStressTest-" + Guid.NewGuid().ToString(); if (!System.IO.Directory.Exists(exectempdir)) { System.IO.Directory.CreateDirectory(exectempdir); } string execfp = exectempdir + @"\exec{8B8F731B-3BEC-4e99-B08F-BDEB81525172}"; const int NUMBER_OF_JOBS = 50; // <--STRESS-NUMBER-- for (int njob = 0; njob < NUMBER_OF_JOBS; njob++) { System.IO.File.WriteAllText(execfp + njob.ToString(), (@"<SourceCode><Jobs></Jobs></SourceCode>").Replace('`', '"')); } try { Exec.Shell("Qizmt importdirmt " + exectempdir); Console.WriteLine("Confirming updates..."); { string lsoutput = Exec.Shell("Qizmt ls"); int njobs = 0; for (int i = 0; ;) { if (njobs == NUMBER_OF_JOBS) { break; // Good! } i = lsoutput.IndexOf("{8B8F731B-3BEC-4e99-B08F-BDEB81525172}", i); if (-1 == i) { throw new Exception("Not all updates to DFS were written (only found " + njobs.ToString() + " jobs imported, expected " + NUMBER_OF_JOBS.ToString() + ")"); } i += "{8B8F731B-3BEC-4e99-B08F-BDEB81525172}".Length; njobs++; } } } finally { try { for (int njob = 0; njob < NUMBER_OF_JOBS; njob++) { System.IO.File.Delete(execfp + njob.ToString()); } System.IO.Directory.Delete(exectempdir); } catch { } try { Exec.Shell("Qizmt del exec{8B8F731B-3BEC-4e99-B08F-BDEB81525172}*"); } catch { } } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); } }
static void EnableReplication(string[] args) { if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } bool withcache = -1 != args[0].IndexOf("withcache", StringComparison.OrdinalIgnoreCase); string dfsxmlpath = args[1]; string dfsxmlpathbackup = dfsxmlpath + "$" + Guid.NewGuid().ToString(); long bytes_to_add = 0; if (args.Length > 2) { // To-do: ParseCapacity. bytes_to_add = long.Parse(args[2]); if (bytes_to_add < 0) { throw new Exception("Invalid bytes-to-add (" + bytes_to_add.ToString() + " bytes)"); } if (bytes_to_add < 1048576) { throw new Exception("bytes-to-add must be at least 1 MB"); } } int num_files = 0; if (args.Length > 3) { num_files = int.Parse(args[3]); if (num_files < 0 || num_files > bytes_to_add / 20) { throw new Exception("Invalid #files"); } } string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); dfs olddfs = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); string[] allmachines; { string[] sl = olddfs.Slaves.SlaveList.Split(';'); List <string> aml = new List <string>(sl.Length + 1); aml.Add(masterhost); foreach (string slave in sl) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } Console.WriteLine("Backing up DFS.xml to: {0} ...", dfsxmlpathbackup); try { System.IO.File.Delete(dfsxmlpathbackup); } catch { } System.IO.File.Move(dfsxmlpath, dfsxmlpathbackup); try { { Console.WriteLine("Formatting DFS for test..."); Exec.Shell("Qizmt @format Machines=" + string.Join(",", allmachines)); } { // Test logic: Console.WriteLine("Adding some files to DFS..."); Console.Write(" "); Exec.Shell("Qizmt bingen 1MB 1MB 50"); Console.Write("10%"); Exec.Shell("Qizmt examples"); Console.Write("..15%"); Exec.Shell("Qizmt wordgen 10MB 10MB 100"); // Note: also used by Cacher. Console.Write("..50%"); Exec.Shell("Qizmt asciigen 50MB 50MB 500"); Console.Write("..100%"); Console.WriteLine(); if (bytes_to_add > 0) { Console.WriteLine("Adding {0} bytes as requested (bytes-to-add)...", bytes_to_add); long bta10 = bytes_to_add / 10; Console.Write(" "); Exec.Shell("Qizmt gen bta10-" + Guid.NewGuid().ToString() + " " + bta10.ToString()); Console.Write("10%"); Exec.Shell("Qizmt gen bta20-" + Guid.NewGuid().ToString() + " " + (bta10 * 2).ToString()); Console.Write("..30%"); { long totsz = (bta10 * 3); if (num_files > 1) { long onesz = totsz / num_files; //for (int inf = 0; inf < num_files; inf++) MySpace.DataMining.Threading.ThreadTools.Parallel( new Action <int>( delegate(int inf) { Exec.Shell("Qizmt gen bta30." + inf.ToString() + "-" + Guid.NewGuid().ToString() + " " + onesz.ToString()); }), num_files, 15); } else { Exec.Shell("Qizmt gen bta30-" + Guid.NewGuid().ToString() + " " + totsz.ToString()); Console.Write("..60%"); } } Exec.Shell("Qizmt gen bta40-" + Guid.NewGuid().ToString() + " " + (bta10 * 4).ToString()); Console.Write("..100%"); Console.WriteLine(); } if (withcache) { Console.WriteLine("Generating cache files..."); string cachertempdir = @"\\" + System.Net.Dns.GetHostName() + @"\C$\temp\qizmt\regression_test_Cacher-" + Guid.NewGuid().ToString(); if (!System.IO.Directory.Exists(cachertempdir)) { System.IO.Directory.CreateDirectory(cachertempdir); } string cacherfp = cachertempdir + @"\Cacher"; System.IO.File.WriteAllText(cacherfp, (@"<?xml version=`1.0` encoding=`utf-8`?> <SourceCode> <Jobs> <Job Name=`Cacher`> <Delta> <Name>Cacher_cache</Name> <DFSInput>dfs://10MB</DFSInput> </Delta> <IOSettings> <JobType>mapreduce</JobType> <KeyLength>100</KeyLength> <DFSInput></DFSInput> <DFSOutput>dfs://Cacher_output</DFSOutput> <KeyMajor>8</KeyMajor> <OutputMethod>grouped</OutputMethod> </IOSettings> <MapReduce> <Map> <![CDATA[ public virtual void Map(ByteSlice line, MapOutput output) { output.Add(line, ByteSlice.Prepare()); } ]]> </Map> <Reduce> <![CDATA[ public override void Reduce(ByteSlice key, RandomAccessEntries values, RandomAccessOutput output) { for(int i = 0; i < values.Length; i++) { output.Add(key); } } ]]> </Reduce> </MapReduce> </Job> </Jobs> </SourceCode> ").Replace('`', '"')); Exec.Shell("Qizmt importdir " + cachertempdir); try { System.IO.File.Delete(cacherfp); System.IO.Directory.Delete(cachertempdir); } catch { } Exec.Shell("Qizmt exec Cacher"); // Creates cache file Cacher_cache Exec.Shell("Qizmt del Cacher_output"); } Console.WriteLine("Ensure the cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); string ls_output = Exec.Shell("Qizmt ls"); int ls_output_linecount = ls_output.Split('\n').Length; Console.WriteLine("*** ls output before replication:"); Console.WriteLine(ls_output); Console.WriteLine("Updating Replication Factor..."); const int replicationfactor = 2; Console.WriteLine(Exec.Shell("Qizmt replicationupdate " + replicationfactor.ToString())); Console.WriteLine("Ensure the cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); if (withcache) { Console.WriteLine("Validate cache files..."); Exec.Shell("Qizmt exec Cacher"); // Uses existing cache file Cacher_cache Exec.Shell("Qizmt del Cacher_output"); } Console.WriteLine("Ensure data is replicated..."); EnsureReplication(dfsxmlpath, replicationfactor); { // Not comparing contents because of the free disk space line. string new_ls_output = Exec.Shell("Qizmt ls"); Console.WriteLine("*** ls output after replication:"); Console.WriteLine(new_ls_output); int new_ls_output_linecount = new_ls_output.Split('\n').Length; if (ls_output_linecount != new_ls_output_linecount) { throw new Exception("Cluster does not contain the same files as before replication"); } } } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); } finally { Console.WriteLine("Restoring DFS.xml backup..."); // Note: these are safe; the try/finally only wraps the new dfs. try { Exec.Shell("Qizmt del *"); } catch { } try { // Delete temp dfs.xml, it's being replaced with the good one. System.IO.File.Delete(dfsxmlpath); } catch { } System.IO.File.Move(dfsxmlpathbackup, dfsxmlpath); } }
static void NewDFS(string[] args) { if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; string dfsxmlpathbackup = dfsxmlpath + "$" + Guid.NewGuid().ToString(); string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); dfs olddfs = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); string[] allmachines; { string[] sl = olddfs.Slaves.SlaveList.Split(';'); List <string> aml = new List <string>(sl.Length + 1); aml.Add(masterhost); foreach (string slave in sl) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } Console.WriteLine("Backing up DFS.xml to: {0} ...", dfsxmlpathbackup); try { System.IO.File.Delete(dfsxmlpathbackup); } catch { } System.IO.File.Move(dfsxmlpath, dfsxmlpathbackup); try { { Console.WriteLine("Formatting DFS for test..."); Exec.Shell("Qizmt @format Machines=" + string.Join(",", allmachines)); } { // Test logic: } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); } finally { Console.WriteLine("Restoring DFS.xml backup..."); // Note: these are safe; the try/finally only wraps the new dfs. try { Exec.Shell("Qizmt del *"); } catch { } try { // Delete temp dfs.xml, it's being replaced with the good one. System.IO.File.Delete(dfsxmlpath); } catch { } System.IO.File.Move(dfsxmlpathbackup, dfsxmlpath); } }
static void SortedCache(string[] args) { if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; string dfsxmlpathbackup = dfsxmlpath + "$" + Guid.NewGuid().ToString(); string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); dfs dc = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); string[] allmachines; { string[] sl = dc.Slaves.SlaveList.Split(';'); List <string> aml = new List <string>(sl.Length + 1); aml.Add(masterhost); foreach (string slave in sl) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } { // Test logic: string fguid = "{" + Guid.NewGuid().ToString() + "}"; string jobfn = "regression_test_SortedCache-" + Guid.NewGuid().ToString(); SortedCacheCleanup(jobfn); // Cleanup previous run. try { { Console.WriteLine("Generating data and jobs..."); string exectempdir = @"\\" + System.Net.Dns.GetHostName() + @"\C$\temp\qizmt\regression_test_SortedCache" + fguid; if (!System.IO.Directory.Exists(exectempdir)) { System.IO.Directory.CreateDirectory(exectempdir); } string execfp = exectempdir + @"\" + jobfn; string scguid = Guid.NewGuid().ToString(); string ECODE = (@"<SourceCode> <Jobs> <Job Name=`CS` Custodian=`Chris Miller` Email=``> <Delta> <Name>{D7D3A6FE-8472-4320-9144-486E436D4542}CS_cache</Name> <DFSInput>{D7D3A6FE-8472-4320-9144-486E436D4542}a.txt;dfs://{D7D3A6FE-8472-4320-9144-486E436D4542}b?.txt</DFSInput> </Delta> <IOSettings> <JobType>mapreduce</JobType> <KeyLength>1</KeyLength> <DFSInput></DFSInput> <DFSOutput>dfs://{D7D3A6FE-8472-4320-9144-486E436D4542}CS_Output.txt</DFSOutput> <OutputMethod>{9235036E-4A47-4ee5-985F-F19D2F2DE85C}</OutputMethod> </IOSettings> <MapReduce> <Map> <![CDATA[ List<byte> foo = new List<byte>(); List<byte> bar = new List<byte>(); public virtual void Map(ByteSlice line, MapOutput output) { foo.Clear(); bar.Clear(); foo.Add((byte)('A' + line[0] % 16)); // A-F only. bar.Add(line[1]); output.Add(ByteSlice.Prepare(foo), ByteSlice.Prepare(bar)); } ]]> </Map> <Reduce> <![CDATA[ public override void Reduce(ByteSlice key, ByteSliceList values, ReduceOutput output) { long result = 0; while(values.MoveNext()) { ByteSlice v = values.Current; result += v[0]; } mstring ms = mstring.Prepare(); ms.AppendM((char)key[0]); ms.AppendM(result); output.Add(ms); } ]]> </Reduce> </MapReduce> </Job> </Jobs> </SourceCode> ").Replace('`', '"'); System.IO.File.WriteAllText(execfp + ".grouped", ECODE.Replace("{9235036E-4A47-4ee5-985F-F19D2F2DE85C}", "grouped")); System.IO.File.WriteAllText(execfp + ".sorted", ECODE.Replace("{9235036E-4A47-4ee5-985F-F19D2F2DE85C}", "sorted")); Exec.Shell("Qizmt importdir " + exectempdir); try { System.IO.File.Delete(execfp + ".grouped"); System.IO.File.Delete(execfp + ".sorted"); System.IO.Directory.Delete(exectempdir); } catch { } Exec.Shell("Qizmt asciigen {D7D3A6FE-8472-4320-9144-486E436D4542}a.txt 16KB 2B"); Exec.Shell("Qizmt asciigen {D7D3A6FE-8472-4320-9144-486E436D4542}b1.txt 8KB 2B"); Exec.Shell("Qizmt asciigen {D7D3A6FE-8472-4320-9144-486E436D4542}b2.txt_ 8KB 2B"); } string checksum_grouped = ""; { Console.WriteLine("Running grouped job..."); Exec.Shell("Qizmt del {D7D3A6FE-8472-4320-9144-486E436D4542}CS_cache"); Exec.Shell("Qizmt del {D7D3A6FE-8472-4320-9144-486E436D4542}CS_Output.txt"); Exec.Shell("Qizmt exec " + jobfn + ".grouped"); Exec.Shell("Qizmt rename {D7D3A6FE-8472-4320-9144-486E436D4542}b2.txt_ {D7D3A6FE-8472-4320-9144-486E436D4542}b2.txt"); Exec.Shell("Qizmt del {D7D3A6FE-8472-4320-9144-486E436D4542}CS_Output.txt"); Exec.Shell("Qizmt exec " + jobfn + ".grouped"); Exec.Shell("Qizmt rename {D7D3A6FE-8472-4320-9144-486E436D4542}b2.txt {D7D3A6FE-8472-4320-9144-486E436D4542}b2.txt_"); checksum_grouped = DfsSum("Sum2", "{D7D3A6FE-8472-4320-9144-486E436D4542}CS_Output.txt"); Console.WriteLine(" checksum2 = {0}", checksum_grouped); } string checksum_sorted = ""; { Console.WriteLine("Running sorted job..."); Exec.Shell("Qizmt del {D7D3A6FE-8472-4320-9144-486E436D4542}CS_cache"); Exec.Shell("Qizmt del {D7D3A6FE-8472-4320-9144-486E436D4542}CS_Output.txt"); Exec.Shell("Qizmt exec " + jobfn + ".sorted"); Exec.Shell("Qizmt rename {D7D3A6FE-8472-4320-9144-486E436D4542}b2.txt_ {D7D3A6FE-8472-4320-9144-486E436D4542}b2.txt"); Exec.Shell("Qizmt del {D7D3A6FE-8472-4320-9144-486E436D4542}CS_Output.txt"); Exec.Shell("Qizmt exec " + jobfn + ".sorted"); Exec.Shell("Qizmt rename {D7D3A6FE-8472-4320-9144-486E436D4542}b2.txt {D7D3A6FE-8472-4320-9144-486E436D4542}b2.txt_"); checksum_sorted = DfsSum("Sum2", "{D7D3A6FE-8472-4320-9144-486E436D4542}CS_Output.txt"); Console.WriteLine(" checksum2 = {0}", checksum_sorted); } if (checksum_grouped != checksum_sorted) { throw new Exception("Checksums do not match; sort with cache test failed!"); } } finally { SortedCacheCleanup(jobfn); } } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); }
static void RangeSort(string[] args) { string sortmethod = args[0]; if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; // Checking for arg[2]==pause later... string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); dfs dc = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); string[] slaves = dc.Slaves.SlaveList.Split(';'); string[] allmachines; { List <string> aml = new List <string>(slaves.Length + 1); aml.Add(masterhost); foreach (string slave in slaves) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } string pausefile = ""; if (args.Length > 2 && "pause" == args[2]) { string pausedir = @"\\" + masterhost + @"\c$\temp\qizmt"; try { System.IO.Directory.CreateDirectory(pausedir); } catch { } pausefile = pausedir + @"\" + sortmethod + @"-pause.txt"; System.IO.File.WriteAllText(pausefile, "Delete this file to un-pause..." + Environment.NewLine); Console.WriteLine(); Console.WriteLine("Delete the file '{0}' to un-pause...", pausefile); Console.WriteLine(); } { Console.WriteLine("Ensure cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); { string fguid = "{" + Guid.NewGuid().ToString() + "}"; // Generate some data to operate on. Console.WriteLine("Generating data..."); // Note: this test depends on wordgen, and wordgen lines always starting with uppercase! long gensize = 1048576 * dc.Blocks.SortedTotalCount; // 1MB * #processes string gencmd = "wordgen"; int keymajor = 8; if (-1 != sortmethod.IndexOf("hash", StringComparison.OrdinalIgnoreCase)) { //gencmd = "bingen"; // Will write crazy files to c:\temp keymajor = 2; } Exec.Shell("Qizmt " + gencmd + " data" + fguid + " " + gensize.ToString()); try { string exectempdir = @"\\" + System.Net.Dns.GetHostName() + @"\C$\temp\qizmt\regression_test_" + sortmethod + @"-" + Guid.NewGuid().ToString(); if (!System.IO.Directory.Exists(exectempdir)) { System.IO.Directory.CreateDirectory(exectempdir); } string execfp = exectempdir + @"\exec" + fguid; // Note: using c:\temp instead of IOUtils.GetTempDirectory() in the following test // because I can't get the IOUtils.GetTempDirectory() for other machines. string scguid = Guid.NewGuid().ToString(); System.IO.File.WriteAllText(execfp, (@"<?xml version=`1.0` encoding=`utf-8`?> <SourceCode> <Jobs> <Job Name=`exec" + fguid + @"`> <IOSettings> <JobType>mapreduce</JobType> <KeyLength>100</KeyLength> <DFSInput>dfs://data" + fguid + @"</DFSInput> <DFSOutput>dfs://output" + fguid + @"</DFSOutput> <KeyMajor>" + keymajor.ToString() + @"</KeyMajor> <OutputMethod>" + sortmethod + @"</OutputMethod> <Setting name=`Subprocess_TotalPrime` value=`0` /> <!-- Don't use grouped. --> <Setting name=`Subprocess_SortedTotalCount` value=`" + slaves.Length.ToString() + @"` /> <!-- ^ One process per participating machine. --> </IOSettings> <MapReduce> <Map> <![CDATA[ public virtual void Map(ByteSlice line, MapOutput output) { output.Add(line, ByteSlice.Prepare()); } ]]> </Map> <ReduceInitialize><![CDATA[ public virtual void ReduceInitialize() { } ]]></ReduceInitialize> <Reduce> <![CDATA[ string dir = null; Dictionary<char, System.IO.StreamWriter> files = new Dictionary<char, System.IO.StreamWriter>(); public override void Reduce(ByteSlice key, ByteSliceList values, ReduceOutput output) { if(null == dir) { dir = @`\\` + Qizmt_MachineHost + @`\c$\temp\qizmt\" + sortmethod + @"-" + scguid + @"`; if(!System.IO.Directory.Exists(dir)) { System.IO.Directory.CreateDirectory(dir); } } System.IO.StreamWriter stmw; if(!files.ContainsKey((char)key[0])) { stmw = new System.IO.StreamWriter(dir + @`\` + (char)key[0] + `.txt`, true); // append=true files[(char)key[0]] = stmw; } stmw = files[(char)key[0]]; stmw.WriteLine(key.ToString()); } ]]> </Reduce> <ReduceFinalize><![CDATA[ public virtual void ReduceFinalize() { foreach(KeyValuePair<char, System.IO.StreamWriter> kvp in files) { kvp.Value.Close(); } } ]]></ReduceFinalize> </MapReduce> </Job> <Job Name=`Verify Sort Range` > <IOSettings> <JobType>local</JobType> </IOSettings> <Local> <![CDATA[ readonly string[] slaves = `" + dc.Slaves.SlaveList + @"`.Split(';'); readonly string pausefile = @`" + pausefile + @"`; public virtual void Local() { if(!string.IsNullOrEmpty(pausefile)) { bool bb = false; while(System.IO.File.Exists(pausefile)) { if(!bb) { bb = true; try { System.IO.File.AppendAllText(pausefile, `Ready!` + Environment.NewLine); } catch { } } System.Threading.Thread.Sleep(1000); } } bool failed = false; char bound = '\0'; foreach(string slave in slaves) { string dir = @`\\` + slave + @`\c$\temp\qizmt\" + sortmethod + @"-" + scguid + @"`; try { char thishighest = '\0'; foreach(System.IO.FileInfo fi in (new System.IO.DirectoryInfo(dir).GetFiles())) { char c = fi.Name[0]; if(c < bound) { failed = true; throw new Exception(`Data is not range sorted (" + sortmethod + @") starting on machine ` + slave + ` (Error 1FC8AB58-4DBD-4d56-9587-96312F9A5886)`); } if(c > thishighest) { thishighest = c; } fi.Delete(); } if(thishighest > bound) { bound = thishighest; } System.IO.Directory.Delete(dir); } catch(Exception e) { Qizmt_Log(`Exception: ` + e.ToString()); } } if(!failed && bound > '\0') { Qizmt_Log(`Success! (OK 55D106EA-AD09-4503-96BA-387795EDEECB)`); } } ]]> </Local> </Job> </Jobs> </SourceCode> ").Replace('`', '"')); Exec.Shell("Qizmt importdir " + exectempdir); try { System.IO.File.Delete(execfp); System.IO.Directory.Delete(exectempdir); } catch { } try { Console.WriteLine("Running " + sortmethod + " job..."); string output = Exec.Shell("Qizmt exec exec" + fguid); Console.WriteLine(output.Trim()); if (-1 == output.IndexOf("55D106EA-AD09-4503-96BA-387795EDEECB")) { throw new Exception("Sort range order verification (" + sortmethod + ") did not succeed"); } } finally { Exec.Shell("Qizmt del output" + fguid); Exec.Shell("Qizmt del exec" + fguid); } } finally { Exec.Shell("Qizmt del data" + fguid); } } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); } }
static void Deploy(string[] args) { if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); dfs dc = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); string[] allmachines; { string[] sl = dc.Slaves.SlaveList.Split(';'); List <string> aml = new List <string>(sl.Length + 1); aml.Add(masterhost); foreach (string slave in sl) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } { Console.WriteLine("Ensure cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); // Run a job... string exec_md5; { // Generate some data to operate on. Exec.Shell("Qizmt gen data{AE7E8F7E-AE48-40e7-B5B2-7E07E39B46F9} " + 1048576.ToString()); string exectempdir = @"\\" + System.Net.Dns.GetHostName() + @"\C$\temp\qizmt\regression_test_Deploy-" + Guid.NewGuid().ToString(); if (!System.IO.Directory.Exists(exectempdir)) { System.IO.Directory.CreateDirectory(exectempdir); } string execfp = exectempdir + @"\exec{07E2B469-80F9-4776-908F-E504A906E3B6}"; System.IO.File.WriteAllText(execfp, (@"<?xml version=`1.0` encoding=`utf-8`?> <SourceCode> <Jobs> <Job Name=`exec{07E2B469-80F9-4776-908F-E504A906E3B6}`> <IOSettings> <JobType>mapreduce</JobType> <KeyLength>100</KeyLength> <DFSInput>dfs://data{*}</DFSInput> <DFSOutput>dfs://output{A785E7D1-9017-45fe-9E07-57695192A5DC}</DFSOutput> <KeyMajor>8</KeyMajor> <OutputMethod>sorted</OutputMethod> </IOSettings> <MapReduce> <Map> <![CDATA[ public virtual void Map(ByteSlice line, MapOutput output) { output.Add(line, ByteSlice.Prepare()); } ]]> </Map> <Reduce> <![CDATA[ public override void Reduce(ByteSlice key, ByteSliceList values, ReduceOutput output) { while(values.MoveNext()) { output.Add(key); } } ]]> </Reduce> </MapReduce> </Job> </Jobs> </SourceCode> ").Replace('`', '"')); Exec.Shell("Qizmt importdir " + exectempdir); try { System.IO.File.Delete(execfp); System.IO.Directory.Delete(exectempdir); } catch { } Exec.Shell("Qizmt exec exec{07E2B469-80F9-4776-908F-E504A906E3B6}"); exec_md5 = DfsSum("md5", "output{A785E7D1-9017-45fe-9E07-57695192A5DC}"); Exec.Shell("Qizmt del output{A785E7D1-9017-45fe-9E07-57695192A5DC}"); } try { const string TEMP_DLLS_PATTERN = "temp_????????-????-????-????-????????????.dll"; // Prepare to detect leaked DLLs: string lmachine = allmachines[allmachines.Length - 1]; string[] dummyleaknames = new string[] { TEMP_DLLS_PATTERN.Replace('?', 'x'), //"dummy1D48A66FD2EF41e3B6266C06D320A17D.dll", //"dummy1D48A66FD2EF41e3B6266C06D320A17D.exe" }; try { // Delete leaked DLLs on lmachine... foreach (string fn in System.IO.Directory.GetFiles(Surrogate.NetworkPathForHost(lmachine), TEMP_DLLS_PATTERN)) { System.IO.File.Delete(fn); } // Delete planted files from lmachine... //foreach (string host in allmachines) { string host = lmachine; string netdir = Surrogate.NetworkPathForHost(host); foreach (string dummyleakname in dummyleaknames) { try { System.IO.File.Delete(netdir + @"\" + dummyleakname); } catch { } } } // Plant some new leaked files on surrogate... foreach (string dummyleakname in dummyleaknames) { System.IO.File.WriteAllText(masterdir + @"\" + dummyleakname, "Dummy file for deploy leak detector" + Environment.NewLine); } } catch (Exception e) { lmachine = null; throw new Exception("Failed to prepare for deploy leak detector", e); } { Console.WriteLine("Deleting critical files across cluster to ensure deploy will succeed..."); int nfailed = 0; string failreason = ""; //foreach (string host in allmachines) if (allmachines.Length > 1) // Important; can't delete slave.exe on surrogate or it can't deploy it. { string host = allmachines[allmachines.Length - 1]; try { string netdir = Surrogate.NetworkPathForHost(host); System.IO.File.Delete(netdir + @"\MySpace.DataMining.DistributedObjects.DistributedObjectsSlave.exe"); } catch (Exception fe) { nfailed++; failreason = fe.ToString(); } } if (nfailed > 0) { Console.WriteLine("Warning: {0} files failed to be deleted; {0}", failreason); } } try { Console.WriteLine("Deploying..."); Exec.Shell("aelight deploy"); System.Threading.Thread.Sleep(1000 * 5); // Wait a bit for the services to come back up. } catch (Exception e) { Console.Error.WriteLine(e.ToString()); Console.Error.WriteLine(" WARNING: cluster may be in a bad state; may need to reinstall"); throw; } Console.WriteLine("Ensuring deploy succeeded..."); Console.WriteLine("(Note: if this hangs indefinitely, deploy failed and need to reinstall)"); if (lmachine != null) { //foreach (string host in allmachines) { string host = lmachine; string netdir = Surrogate.NetworkPathForHost(host); foreach (string dummyleakname in dummyleaknames) { { string fp = netdir + @"\" + dummyleakname; if (System.IO.File.Exists(fp)) { throw new Exception("Deployed dummy/leaked file: " + fp); } } } } { string[] leaks = System.IO.Directory.GetFiles(Surrogate.NetworkPathForHost(lmachine), TEMP_DLLS_PATTERN); if (leaks.Length > 0) { throw new Exception("Deployed leaked dll: " + leaks[0] + " (" + leaks.Length.ToString() + " in total)"); } } // Delete the planted dummy files from surrogate! foreach (string dummyleakname in dummyleaknames) { System.IO.File.Delete(masterdir + @"\" + dummyleakname); } } Console.WriteLine("Ensure cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); // Re-run job, confirm good... { Exec.Shell("Qizmt exec exec{07E2B469-80F9-4776-908F-E504A906E3B6}"); string new_exec_md5 = DfsSum("md5", "output{A785E7D1-9017-45fe-9E07-57695192A5DC}"); Exec.Shell("Qizmt del output{A785E7D1-9017-45fe-9E07-57695192A5DC}"); if (new_exec_md5 != exec_md5) { throw new Exception("Output files from before and after deploy do not match"); } } } finally { try { Console.WriteLine("Cleaning temporary test data..."); Exec.Shell("Qizmt del exec{07E2B469-80F9-4776-908F-E504A906E3B6}"); Exec.Shell("Qizmt del data{AE7E8F7E-AE48-40e7-B5B2-7E07E39B46F9}"); Exec.Shell("Qizmt del output{A785E7D1-9017-45fe-9E07-57695192A5DC}"); } catch { } } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); } }
static void ReplicationChecks(string[] args) { if (args.Length <= 1 || !System.IO.File.Exists(args[1])) { throw new Exception("Expected path to DFS.xml"); } string dfsxmlpath = args[1]; string dfsxmlpathbackup = dfsxmlpath + "$" + Guid.NewGuid().ToString(); string masterdir; { System.IO.FileInfo fi = new System.IO.FileInfo(dfsxmlpath); masterdir = fi.DirectoryName; // Directory's full path. } Surrogate.SetNewMetaLocation(masterdir); dfs olddfs = dfs.ReadDfsConfig_unlocked(dfsxmlpath); string masterhost = System.Net.Dns.GetHostName(); string[] allmachines; { string[] sl = olddfs.Slaves.SlaveList.Split(';'); List <string> aml = new List <string>(sl.Length + 1); aml.Add(masterhost); foreach (string slave in sl) { if (0 != string.Compare(IPAddressUtil.GetName(slave), IPAddressUtil.GetName(masterhost), StringComparison.OrdinalIgnoreCase)) { aml.Add(slave); } } allmachines = aml.ToArray(); } Console.WriteLine("Backing up DFS.xml to: {0} ...", dfsxmlpathbackup); try { System.IO.File.Delete(dfsxmlpathbackup); } catch { } System.IO.File.Move(dfsxmlpath, dfsxmlpathbackup); try { { Console.WriteLine("Formatting DFS for test..."); Exec.Shell("Qizmt @format Machines=" + string.Join(",", allmachines)); } { // Test logic: { long XBYTES = (long)4194304 * (long)allmachines.Length; Console.WriteLine("Generating data..."); Console.Write(" "); Exec.Shell("Qizmt gen data{476D6FE8-D645-41cc-83A1-3AB5E2DE23E7} " + (XBYTES / 4).ToString()); Console.Write("25%"); Exec.Shell("Qizmt gen data{61136275-16EC-4ff9-84CE-ACC967550181} " + (XBYTES / 4).ToString()); Console.Write("..50%"); Exec.Shell("Qizmt gen data{C76F6C06-EFC8-4808-B214-DB4D167171EB} " + (XBYTES / 2).ToString()); Console.Write("..100%"); Console.WriteLine(); } Console.WriteLine("Ensure the cluster is perfectly healthy..."); EnsurePerfectQizmtHealtha(); Console.WriteLine("Raising replication factor to 2..."); Exec.Shell("Qizmt replicationupdate 2"); { Console.WriteLine("Raising replication factor too high (ensure fail)..."); bool ok = false; System.Threading.Thread thd = new System.Threading.Thread( new System.Threading.ThreadStart( delegate { try { Exec.Shell("Qizmt replicationupdate 999999999"); } catch (Exception e) { ok = true; Console.WriteLine("Got exception as expected: {0}", e.Message); } })); thd.Start(); if (!thd.Join(1000 * 10)) { thd.Abort(); } if (!ok) { throw new Exception("Test failed: expected exception"); } } } Console.WriteLine("[PASSED] - " + string.Join(" ", args)); } finally { Console.WriteLine("Restoring DFS.xml backup..."); // Note: these are safe; the try/finally only wraps the new dfs. try { Exec.Shell("Qizmt del *"); } catch { } try { // Delete temp dfs.xml, it's being replaced with the good one. System.IO.File.Delete(dfsxmlpath); } catch { } System.IO.File.Move(dfsxmlpathbackup, dfsxmlpath); } }