public static void ExecOneRemote(SourceCode.Job cfgj, string[] ExecArgs, bool verbose, bool verbosereplication) { if (verbose) { Console.WriteLine("[{0}] [Remote: {2}]", System.DateTime.Now.ToString(), System.DateTime.Now.Millisecond, cfgj.NarrativeName); } string logname = Surrogate.SafeTextPath(cfgj.NarrativeName) + "_" + Guid.NewGuid().ToString() + ".j" + sjid + "_log.txt"; //System.Threading.Thread.Sleep(8000); /*if (cfgj.IOSettings.DFS_IOs == null || cfgj.IOSettings.DFS_IOs.Length == 0) * { * Console.Error.WriteLine("One or more IOSettings/DFS_IO needed in configuration for 'remote'"); * return; * }*/ // Could provide BlockID here, which is just the n-th DFS_IO entry. //cfgj.Remote dfs dc = LoadDfsConfig(); string[] slaves = dc.Slaves.SlaveList.Split(',', ';'); if (dc.Slaves.SlaveList.Length == 0 || slaves.Length < 1) { throw new Exception("SlaveList expected in " + dfs.DFSXMLNAME); } if (dc.Replication > 1) { string[] slavesbefore = slaves; slaves = ExcludeUnhealthySlaveMachines(slaves, true).ToArray(); if (slavesbefore.Length - slaves.Length >= dc.Replication) { throw new Exception("Not enough healthy machines to run job (hit replication count)"); } } if (cfgj.IOSettings.DFS_IO_Multis != null) { cfgj.ExpandDFSIOMultis(slaves.Length, MySpace.DataMining.DistributedObjects.MemoryUtils.NumberOfProcessors); } Dictionary <string, int> slaveIDs = new Dictionary <string, int>(); for (int si = 0; si < slaves.Length; si++) { slaveIDs.Add(slaves[si].ToUpper(), si); } bool aborting = false; try { List <RemoteBlockInfo> blocks = new List <RemoteBlockInfo>(cfgj.IOSettings.DFS_IOs.Length); if (verbose) { Console.WriteLine("{0} processes on {1} machines:", cfgj.IOSettings.DFS_IOs.Length, slaves.Length); } List <string> outputdfsdirs = new List <string>(slaves.Length); { for (int i = 0; i < slaves.Length; i++) { try { outputdfsdirs.Add(NetworkPathForHost(slaves[i])); } catch (Exception e) { Console.Error.WriteLine(" {0}", e.Message); } } } string slaveconfigxml = ""; { System.Xml.XmlDocument pdoc = new System.Xml.XmlDocument(); { System.IO.MemoryStream ms = new System.IO.MemoryStream(); System.Xml.Serialization.XmlSerializer xs = new System.Xml.Serialization.XmlSerializer(typeof(dfs)); xs.Serialize(ms, dc); ms.Seek(0, System.IO.SeekOrigin.Begin); pdoc.Load(ms); } string xml = pdoc.DocumentElement.SelectSingleNode("./slave").OuterXml; //System.Threading.Thread.Sleep(8000); slaveconfigxml = xml; } { // Temporary: for (int si = 0; si < slaves.Length; si++) { System.Threading.Mutex m = new System.Threading.Mutex(false, "AEL_SC_" + slaves[si]); try { m.WaitOne(); } catch (System.Threading.AbandonedMutexException) { } try { System.IO.File.WriteAllText(NetworkPathForHost(slaves[si]) + @"\slaveconfig.j" + sjid + ".xml", slaveconfigxml); } catch { } finally { m.ReleaseMutex(); m.Close(); } } } int nextslave = (new Random(DateTime.Now.Millisecond / 2 + System.Diagnostics.Process.GetCurrentProcess().Id / 2)).Next() % slaves.Length; int hosttypes = 0; List <int> outputrecordlengths = new List <int>(); List <int> inputrecordlengths = new List <int>(); for (int BlockID = 0; BlockID < cfgj.IOSettings.DFS_IOs.Length; BlockID++) { int slaveHostID = 0; RemoteBlockInfo bi = new RemoteBlockInfo(); bi.sampledist = dc.DataNodeBaseSize / dc.DataNodeSamples; bi.BlockID = BlockID; bi.blockcount = cfgj.IOSettings.DFS_IOs.Length; if (string.IsNullOrEmpty(cfgj.IOSettings.DFS_IOs[BlockID].Host)) { if (0 != hosttypes && 1 != hosttypes) { throw new Exception("DFS_IO/Host tag must be specified for all or none"); } hosttypes = 1; bi.SlaveHost = slaves[nextslave]; slaveHostID = nextslave; bi.explicithost = false; } else { if (0 != hosttypes && 2 != hosttypes) { throw new Exception("DFS_IO/Host tag must be specified for all or none"); } hosttypes = 2; bi.SlaveHost = cfgj.IOSettings.DFS_IOs[BlockID].Host; slaveHostID = slaveIDs[bi.SlaveHost.ToUpper()]; bi.explicithost = true; } bi.ExecArgs = ExecArgs; if (++nextslave >= slaves.Length) { nextslave = 0; } bi.logname = logname; bi.outputdfsdirs = outputdfsdirs; bi.slaves = slaves; bi.baseoutputfilesize = dc.DataNodeBaseSize; bi.cfgj = cfgj; bi.DFSWriter = cfgj.IOSettings.DFS_IOs[BlockID].DFSWriter.Trim(); bi.Meta = cfgj.IOSettings.DFS_IOs[BlockID].Meta; List <string> dfswriters = new List <string>(); if (bi.DFSWriter.Length > 0) { string[] writers = bi.DFSWriter.Split(';'); for (int wi = 0; wi < writers.Length; wi++) { string thiswriter = writers[wi].Trim(); if (thiswriter.Length == 0) { continue; } int ic = thiswriter.IndexOf('@'); int reclen = -1; if (-1 != ic) { try { reclen = Surrogate.GetRecordSize(thiswriter.Substring(ic + 1)); thiswriter = thiswriter.Substring(0, ic); } catch (FormatException e) { Console.Error.WriteLine("Error: remote output record length error: {0} ({1})", thiswriter, e.Message); SetFailure(); return; } catch (OverflowException e) { Console.Error.WriteLine("Error: remote output record length error: {0} ({1})", thiswriter, e.Message); SetFailure(); return; } } string outfn = thiswriter; if (outfn.StartsWith(@"dfs://", StringComparison.OrdinalIgnoreCase)) { outfn = outfn.Substring(6); } string reason = ""; if (dfs.IsBadFilename(outfn, out reason)) { Console.Error.WriteLine("Invalid output file: {0}", reason); return; } if (null != DfsFindAny(dc, outfn)) { Console.Error.WriteLine("Error: output file already exists in DFS: {0}", outfn); return; } dfswriters.Add(thiswriter); outputrecordlengths.Add(reclen); } } else { dfswriters.Add(""); outputrecordlengths.Add(-1); } bi.DFSWriters = dfswriters; bi.verbose = verbose; bi.rem = new MySpace.DataMining.DistributedObjects5.Remote(cfgj.NarrativeName + "_remote"); bi.rem.CookRetries = dc.slave.CookRetries; bi.rem.CookTimeout = dc.slave.CookTimeout; bi.rem.DfsSampleDistance = bi.sampledist; bi.rem.CompressFileOutput = dc.slave.CompressDfsChunks; bi.rem.LocalCompile = true; bi.rem.OutputStartingPoint = slaveHostID; bi.rem.CompilerOptions = cfgj.IOSettings.CompilerOptions; bi.rem.CompilerVersion = cfgj.IOSettings.CompilerVersion; if (cfgj.AssemblyReferencesCount > 0) { cfgj.AddAssemblyReferences(bi.rem.CompilerAssemblyReferences, Surrogate.NetworkPathForHost(dc.Slaves.GetFirstSlave())); } if (cfgj.OpenCVExtension != null) { bi.rem.AddOpenCVExtension(); } if (cfgj.MemCache != null) { bi.rem.AddMemCacheExtension(); } if (cfgj.Unsafe != null) { bi.rem.AddUnsafe(); } { List <dfs.DfsFile.FileNode> nodes = new List <dfs.DfsFile.FileNode>(); List <string> mapfileswithnodes = null; List <int> nodesoffsets = null; IList <string> mapfiles = SplitInputPaths(dc, cfgj.IOSettings.DFS_IOs[BlockID].DFSReader); if (mapfiles.Count > 0) { mapfileswithnodes = new List <string>(mapfiles.Count); nodesoffsets = new List <int>(mapfiles.Count); } for (int i = 0; i < mapfiles.Count; i++) { string dp = mapfiles[i].Trim(); int inreclen = -1; if (0 != dp.Length) // Allow empty entry where input isn't wanted. { if (dp.StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { dp = dp.Substring(6); } { int ic = dp.IndexOf('@'); if (-1 != ic) { try { inreclen = Surrogate.GetRecordSize(dp.Substring(ic + 1)); dp = dp.Substring(0, ic); } catch (FormatException e) { Console.Error.WriteLine("Error: remote input record length error: {0} ({1})", dp, e.Message); SetFailure(); return; } catch (OverflowException e) { Console.Error.WriteLine("Error: remote input record length error: {0} ({1})", dp, e.Message); SetFailure(); return; } } } dfs.DfsFile df; if (inreclen > 0 || inreclen == -2) { df = DfsFind(dc, dp, DfsFileTypes.BINARY_RECT); if (null != df && inreclen != df.RecordLength) { Console.Error.WriteLine("Error: remote input file does not have expected record length of {0}: {1}@{2}", inreclen, dp, df.RecordLength); SetFailure(); return; } } else { df = DfsFind(dc, dp); } if (null == df) { //throw new Exception("Remote input file not found in DFS: " + dp); Console.Error.WriteLine("Remote input file not found in DFS: {0}", dp); return; } if (df.Nodes.Count > 0) { mapfileswithnodes.Add(dp); nodesoffsets.Add(nodes.Count); inputrecordlengths.Add(inreclen); nodes.AddRange(df.Nodes); } } } bi.dfsinputpaths = new List <string>(nodes.Count); //MapNodesToNetworkPaths(nodes, bi.dfsinputpaths); dfs.MapNodesToNetworkStarPaths(nodes, bi.dfsinputpaths); bi.dfsinputfilenames = mapfileswithnodes; bi.dfsinputnodesoffsets = nodesoffsets; } blocks.Add(bi); bi.thread = new System.Threading.Thread(new System.Threading.ThreadStart(bi.threadproc)); bi.thread.Name = "RemoteJobBlock" + bi.BlockID; } MySpace.DataMining.DistributedObjects.StaticGlobals.DSpace_InputRecordLength = inputrecordlengths.Count > 0 ? inputrecordlengths[0] : -1; MySpace.DataMining.DistributedObjects.StaticGlobals.DSpace_OutputRecordLength = outputrecordlengths.Count > 0 ? outputrecordlengths[0] : -1; // Need to start threads separately due to StaticGlobals being updated. for (int BlockID = 0; BlockID < cfgj.IOSettings.DFS_IOs.Length; BlockID++) { RemoteBlockInfo bi = blocks[BlockID]; bi.rem.InputRecordLength = MySpace.DataMining.DistributedObjects.StaticGlobals.DSpace_InputRecordLength; bi.rem.InputRecordLengths = inputrecordlengths; bi.rem.OutputRecordLength = MySpace.DataMining.DistributedObjects.StaticGlobals.DSpace_OutputRecordLength; bi.rem.OutputRecordLengths = outputrecordlengths; AELight_StartTraceThread(bi.thread); } for (int BlockID = 0; BlockID < blocks.Count; BlockID++) { AELight_JoinTraceThread(blocks[BlockID].thread); blocks[BlockID].rem.Close(); if (blocks[BlockID].blockfail) { Console.Error.WriteLine("BlockID {0} on host '{1}' did not complete successfully", BlockID, (blocks[BlockID].SlaveHost != null) ? blocks[BlockID].SlaveHost : "<null>"); continue; } } List <string> dfsnames = new List <string>(); List <string> dfsnamesreplicating = new List <string>(); // Reload DFS config to make sure changes since starting get rolled in, and make sure the output file wasn't created in that time... using (LockDfsMutex()) // Needed: change between load & save should be atomic. { dc = LoadDfsConfig(); for (int BlockID = 0; BlockID < blocks.Count; BlockID++) { if (blocks[BlockID].blockfail) { continue; } { bool anyoutput = false; bool nonemptyoutputpath = false; for (int oi = 0; oi < blocks[BlockID].DFSWriters.Count; oi++) { string dfswriter = blocks[BlockID].DFSWriters[oi]; if (string.IsNullOrEmpty(dfswriter)) { if (blocks[BlockID].outputdfsnodeses[oi].Count > 0) { Console.Error.WriteLine("Output data detected with no DFSWriter specified"); } } else { { if (null != DfsFind(dc, dfswriter)) { Console.Error.WriteLine("Error: output file was created during job: {0}", dfswriter); continue; } string dfspath = dfswriter; { nonemptyoutputpath = true; dfs.DfsFile df = new dfs.DfsFile(); if (blocks[BlockID].rem.OutputRecordLengths[oi] > 0) { df.XFileType = DfsFileTypes.BINARY_RECT + "@" + blocks[BlockID].rem.OutputRecordLengths[oi].ToString(); } else if (blocks[BlockID].rem.OutputRecordLengths[oi] == -2) { df.XFileType = DfsFileTypes.BINARY_RECT + "@?"; } df.Nodes = new List <dfs.DfsFile.FileNode>(); df.Size = -1; // Preset if (dfspath.StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { dfspath = dfspath.Substring(6); } string dfspathreplicating = ".$" + dfspath + ".$replicating-" + Guid.NewGuid().ToString(); if (null != dc.FindAny(dfspathreplicating)) { Console.Error.WriteLine("Error: file exists: file put into DFS from another location during job: " + dfspathreplicating); SetFailure(); return; } dfsnames.Add(dfspath); dfsnamesreplicating.Add(dfspathreplicating); df.Name = dfspathreplicating; bool anybad = false; long totalsize = 0; { int i = BlockID; for (int j = 0; j < blocks[i].outputdfsnodeses[oi].Count; j++) { dfs.DfsFile.FileNode fn = new dfs.DfsFile.FileNode(); fn.Host = blocks[i].slaves[(blocks[i].rem.OutputStartingPoint + j) % blocks[i].slaves.Count]; fn.Name = blocks[i].outputdfsnodeses[oi][j]; df.Nodes.Add(fn); fn.Length = -1; // Preset fn.Position = -1; // Preset if (anybad) { continue; } fn.Length = blocks[i].outputsizeses[oi][j]; fn.Position = totalsize; // Position must be set before totalsize updated! if (blocks[i].outputdfsnodeses[oi].Count != blocks[i].outputsizeses[oi].Count) { anybad = true; continue; } totalsize += blocks[i].outputsizeses[oi][j]; } } if (!anybad) { df.Size = totalsize; } if (totalsize != 0) { anyoutput = true; } // Always add the file to DFS, even if blank! dc.Files.Add(df); } } } } if (!anyoutput && verbose && nonemptyoutputpath) { Console.Write(" (no DFS output) "); ConsoleFlush(); } } } UpdateDfsXml(dc); } ReplicationPhase(verbosereplication, blocks.Count, slaves, dfsnamesreplicating); using (LockDfsMutex()) // Needed: change between load & save should be atomic. { dc = LoadDfsConfig(); // Reload in case of change or user modifications. for (int nfile = 0; nfile < dfsnames.Count; nfile++) { string dfspath = dfsnames[nfile]; string dfspathreplicating = dfsnamesreplicating[nfile]; { dfs.DfsFile dfu = dc.FindAny(dfspathreplicating); if (null != dfu) { if (null != DfsFindAny(dc, dfspath)) { Console.Error.WriteLine("Error: file exists: file put into DFS from another location during job"); SetFailure(); continue; } dfu.Name = dfspath; } } } UpdateDfsXml(dc); } if (verbose) { Console.WriteLine(); // Line after output chars. } } catch (System.Threading.ThreadAbortException) { aborting = true; } finally { { for (int si = 0; si < slaves.Length; si++) { System.Threading.Mutex m = new System.Threading.Mutex(false, "AEL_SC_" + slaves[si]); try { m.WaitOne(); } catch (System.Threading.AbandonedMutexException) { } try { System.IO.File.Delete(NetworkPathForHost(slaves[si]) + @"\slaveconfig.j" + sjid + ".xml"); } catch { } finally { m.ReleaseMutex(); m.Close(); } } } if (!aborting) { CheckUserLogs(slaves, logname); } } if (verbose) { Console.WriteLine(); Console.WriteLine("[{0}] Done", System.DateTime.Now.ToString(), System.DateTime.Now.Millisecond); for (int i = 0; i < cfgj.IOSettings.DFS_IOs.Length; i++) { Console.WriteLine("Output: {0}", cfgj.IOSettings.DFS_IOs[i].DFSWriter); } } }
public static void ExecOneLocal(SourceCode.Job cfgj, string[] ExecArgs, bool verbose) { if (verbose) { Console.WriteLine("[{0}] [Local: {2}]", System.DateTime.Now.ToString(), System.DateTime.Now.Millisecond, cfgj.NarrativeName); } int BlockID = 0; string SlaveIP = null; string logname = Surrogate.SafeTextPath(cfgj.NarrativeName) + "_" + Guid.NewGuid().ToString() + ".j" + sjid + "_log.txt"; bool aborting = false; try { dfs dc = LoadDfsConfig(); string firstslave = dc.Slaves.GetFirstSlave(); string SlaveHost = cfgj.IOSettings.LocalHost; if (SlaveHost == null || SlaveHost.Length == 0) { SlaveHost = firstslave; } SlaveIP = IPAddressUtil.GetIPv4Address(SlaveHost); MySpace.DataMining.DistributedObjects5.Remote rem = new MySpace.DataMining.DistributedObjects5.Remote(cfgj.NarrativeName + "_local"); rem.OutputStartingPoint = BlockID; rem.LocalCompile = true; rem.CompilerOptions = cfgj.IOSettings.CompilerOptions; rem.CompilerVersion = cfgj.IOSettings.CompilerVersion; if (cfgj.OpenCVExtension != null) { rem.AddOpenCVExtension(); } if (cfgj.MemCache != null) { rem.AddMemCacheExtension(); } if (cfgj.Unsafe != null) { rem.AddUnsafe(); } if (cfgj.AssemblyReferencesCount > 0) { cfgj.AddAssemblyReferences(rem.CompilerAssemblyReferences, Surrogate.NetworkPathForHost(firstslave)); } rem.SetJID(jid, CurrentJobFileName + " Local: " + cfgj.NarrativeName); rem.AddBlock(SlaveHost + @"|" + (cfgj.ForceStandardError != null ? "&" : "") + logname + @"|slaveid=0"); rem.Open(); string codectx = (@" public const int DSpace_BlockID = " + BlockID.ToString() + @"; public const int DSpace_ProcessID = DSpace_BlockID; public const int Qizmt_ProcessID = DSpace_ProcessID; public const int DSpace_BlocksTotalCount = 1; public const int DSpace_ProcessCount = DSpace_BlocksTotalCount; public const int Qizmt_ProcessCount = DSpace_ProcessCount; public const string DSpace_SlaveHost = `" + SlaveHost + @"`; public const string DSpace_MachineHost = DSpace_SlaveHost; public const string Qizmt_MachineHost = DSpace_MachineHost; public const string DSpace_SlaveIP = `" + SlaveIP + @"`; public const string DSpace_MachineIP = DSpace_SlaveIP; public const string Qizmt_MachineIP = DSpace_MachineIP; public static readonly string[] DSpace_ExecArgs = new string[] { " + ExecArgsCode(ExecArgs) + @" }; public static readonly string[] Qizmt_ExecArgs = DSpace_ExecArgs; public const string DSpace_ExecDir = @`" + System.Environment.CurrentDirectory + @"`; public const string Qizmt_ExecDir = DSpace_ExecDir; static string Shell(string line, bool suppresserrors) { return MySpace.DataMining.DistributedObjects.Exec.Shell(line, suppresserrors); } static string Shell(string line) { return MySpace.DataMining.DistributedObjects.Exec.Shell(line, false); } const string _userlogname = `" + logname + @"`; static System.Threading.Mutex _logmutex = new System.Threading.Mutex(false, `distobjlog`); private static int userlogsremain = " + AELight.maxuserlogs.ToString() + @"; public static void Qizmt_Log(string line) { DSpace_Log(line); } public static void DSpace_Log(string line) { if(--userlogsremain < 0) { return; } try { _logmutex.WaitOne(); } catch (System.Threading.AbandonedMutexException) { } try { using (System.IO.StreamWriter fstm = System.IO.File.AppendText(_userlogname)) { fstm.WriteLine(`{0}`, line); } } finally { _logmutex.ReleaseMutex(); } } public static void Qizmt_LogResult(string line, bool passed) { DSpace_LogResult(line, passed); } public static void DSpace_LogResult(string name, bool passed) { if(passed) { DSpace_Log(`[\u00012PASSED\u00010] - ` + name); } else { DSpace_Log(`[\u00014FAILED\u00010] - ` + name); } } ").Replace('`', '"') + CommonDynamicCsCode; rem.LocalExec(codectx + cfgj.Local, cfgj.Usings); rem.Close(); if (verbose) { Console.Write('*'); ConsoleFlush(); } } catch (System.Threading.ThreadAbortException) { aborting = true; } finally { if (!aborting) { CheckUserLogs(new string[] { SlaveIP }, logname); } } if (verbose) { Console.WriteLine(); Console.WriteLine("[{0}] Done", System.DateTime.Now.ToString(), System.DateTime.Now.Millisecond); } }
public static void MemCacheCommand(string[] args) { if (args.Length < 1) { Console.Error.WriteLine("Expected memcache sub-command"); SetFailure(); return; } string act = args[0].ToLower(); switch (act) { case "create": { string mcname = null; string mcschema = null; int mcsegsize = -1; EachArgument(args, 1, new Action <string, string>( delegate(string key, string value) { key = key.ToLower(); switch (key) { case "name": mcname = value; break; case "schema": mcschema = value; break; case "segment": case "segsize": case "segmentsize": mcsegsize = ParseCapacity(value); break; } })); if (string.IsNullOrEmpty(mcname)) { Console.Error.WriteLine("Expected name=<MemCacheName>"); SetFailure(); return; } if (string.IsNullOrEmpty(mcschema)) { Console.Error.WriteLine("Expected schema=<schema>"); SetFailure(); return; } if (-1 != mcsegsize && mcsegsize < 1024) { Console.Error.WriteLine("Error: segment={0} is too small", mcsegsize); SetFailure(); return; } if (mcname.StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { mcname = mcname.Substring(6); } { string reason; if (dfs.IsBadFilename(mcname, out reason)) { Console.Error.WriteLine("MemCache cannot be named '{0}': {1}", mcname, reason); SetFailure(); return; } } dfs.DfsFile.ConfigMemCache cmc = new dfs.DfsFile.ConfigMemCache(); cmc.MetaFileName = "mcm." + Surrogate.SafeTextPath(mcname) + ".mcm"; cmc.Schema = mcschema; List <int> offsets = new List <int>(); cmc.RowLength = Surrogate.GetRecordInfo(mcschema, out cmc.KeyOffset, out cmc.KeyLength, offsets); /*if (0 == cmc.KeyOffset * && cmc.RowLength == cmc.KeyLength * && -1 == mcschema.IndexOf('[')) * { * Console.WriteLine("Note: no key was specified, the key is the entire row"); * }*/ if (-1 == mcsegsize) { const int defsegsize = 0x400 * 0x400 * 64; cmc.SegmentSize = defsegsize - (defsegsize % cmc.RowLength); } else { if (0 != (mcsegsize % cmc.RowLength)) { Console.Error.WriteLine("Segment size must be a multiple of the row length"); Console.Error.WriteLine("Nearest segment size is {0} bytes", mcsegsize - (mcsegsize % cmc.RowLength)); SetFailure(); return; } cmc.SegmentSize = mcsegsize; } { StringBuilder sbFieldOffsets = new StringBuilder(); foreach (int offset in offsets) { if (sbFieldOffsets.Length != 0) { sbFieldOffsets.Append(','); } sbFieldOffsets.Append(offset); } cmc.FieldOffsets = sbFieldOffsets.ToString(); } dfs.DfsFile df = new dfs.DfsFile(); df.Nodes = new List <dfs.DfsFile.FileNode>(0); df.MemCache = cmc; df.Name = mcname; df.XFileType = DfsFileTypes.BINARY_RECT + "@" + cmc.RowLength; df.Size = 0; dfs dc = LoadDfsConfig(); { dfs.DfsFile df2 = dc.FindAny(df.Name); if (null != df2) { Console.Error.WriteLine("Error: a file named '{0}' already exists", df2.Name); SetFailure(); return; } } { string startmeta = GetMemCacheMetaFileHeader(df); string[] slaves = dc.Slaves.SlaveList.Split(';'); int totalworkercount = dc.Blocks.TotalCount; // Subprocess_TotalPrime StringBuilder[] permachine = new StringBuilder[slaves.Length]; //byte[] HEADER = new byte[4]; //MySpace.DataMining.DistributedObjects.Entry.ToBytes(4, HEADER, 0); for (int i = 0; i < permachine.Length; i++) { permachine[i] = new StringBuilder(256); } { int si = -1; for (int workerid = 0; workerid < totalworkercount; workerid++) { if (++si >= slaves.Length) { si = 0; } StringBuilder sb = permachine[si]; sb.AppendFormat("##{1}:{0}", Environment.NewLine, workerid); // There's no segments, but write a dummy one for bookkeeping. foreach (char snc in "MemCache_" + mcname + "_empty") { sb.Append(snc); } { sb.Append(' '); /* * StringBuilder newchunkpath = new StringBuilder(100); * newchunkpath.Append(Surrogate.NetworkPathForHost(slaves[si])); * newchunkpath.Append('\\'); * */ // Make up a data node chunk name. foreach (char ch in MakeMemCacheChunkName(mcname, workerid)) { //newchunkpath.Append(ch); sb.Append(ch); } // Write the empty chunk. //System.IO.File.WriteAllBytes(newchunkpath.ToString(), HEADER); } //if (IsLastSegment) // true { sb.Append(' '); string shexlen = string.Format("{0:x8}", 0); // Zero-length! for (int i = 0; i < shexlen.Length; i++) { sb.Append(shexlen[i]); } } sb.AppendLine(); } } for (int si = 0; si < slaves.Length; si++) { string slave = slaves[si]; string fp = Surrogate.NetworkPathForHost(slave) + @"\" + cmc.MetaFileName; using (System.IO.StreamWriter sw = new System.IO.StreamWriter(fp)) { sw.Write(startmeta); sw.Write(permachine[si].ToString()); } } } using (LockDfsMutex()) { dc = LoadDfsConfig(); // Load again in update lock. { dfs.DfsFile df2 = dc.FindAny(df.Name); if (null != df2) { Console.Error.WriteLine("Error: a file named '{0}' already exists", df2.Name); SetFailure(); return; } } dc.Files.Add(df); UpdateDfsXml(dc); } try { // Need to commit it so that the empty chunks are in the metadata for bookkeeping. // This has to be done after actually adding it to dfsxml. MemCacheFlush(mcname); } catch (Exception e) { try { MemCacheDelete(mcname, false); } catch { } Console.Error.WriteLine("Error: unable to commit newly created MemCache '{0}'; because:{1}{2}", mcname, Environment.NewLine, e.ToString()); SetFailure(); return; } Console.WriteLine("Successfully created MemCache '{0}'", mcname); } break; case "delete": case "del": case "rm": { string mcname = null; EachArgument(args, 1, new Action <string, string>( delegate(string key, string value) { key = key.ToLower(); switch (key) { case "name": mcname = value; break; } })); if (string.IsNullOrEmpty(mcname)) { Console.Error.WriteLine("Expected name=<MemCacheName>"); SetFailure(); return; } MemCacheDelete(mcname, true); } break; case "flush": case "commit": { string mcname = null; EachArgument(args, 1, new Action <string, string>( delegate(string key, string value) { key = key.ToLower(); switch (key) { case "name": mcname = value; break; } })); if (string.IsNullOrEmpty(mcname)) { Console.Error.WriteLine("Expected name=<MemCacheName>"); SetFailure(); return; } try { MemCacheFlush(mcname); Console.WriteLine("Done"); } catch (Exception e) { Console.WriteLine(" Commit was unsuccessful because: {0}", e.Message); Console.WriteLine(); Console.Error.WriteLine(e.ToString()); SetFailure(); return; } } break; case "release": case "rollback": { string mcname = null; bool force = false; EachArgument(args, 1, new Action <string, string>( delegate(string key, string value) { key = key.ToLower(); switch (key) { case "name": mcname = value; break; case "-f": force = true; break; } })); if (string.IsNullOrEmpty(mcname)) { Console.Error.WriteLine("Expected name=<MemCacheName>"); SetFailure(); return; } try { MemCacheRelease(mcname, force); Console.WriteLine("Done"); } catch (Exception e) { string exception = e.ToString(); if (-1 != exception.IndexOf("MemCacheWarning")) { Console.WriteLine("Warning: " + exception); } else { Console.Error.WriteLine(exception); string ioe = "InvalidOperationException:"; if (!force && -1 != exception.IndexOf(ioe)) { try { string emsg = exception.Substring(exception.IndexOf(ioe) + ioe.Length) .Split('\r', '\n')[0].Trim(); System.Threading.Thread.Sleep(100); Console.WriteLine(); Console.WriteLine("{0}{2}{1}", false ? "\u00014" : "", false ? "\u00010" : "", emsg); System.Threading.Thread.Sleep(100); } catch { } Console.Error.WriteLine("Use rollback -f followed by killall to force rollback"); } SetFailure(); return; } } } break; case "load": { string mcname = null; EachArgument(args, 1, new Action <string, string>( delegate(string key, string value) { key = key.ToLower(); switch (key) { case "name": mcname = value; break; } })); if (string.IsNullOrEmpty(mcname)) { Console.Error.WriteLine("Expected name=<MemCacheName>"); SetFailure(); return; } MemCacheLoad(mcname); Console.WriteLine("Done"); } break; case "info": case "information": { string mcname = null; EachArgument(args, 1, new Action <string, string>( delegate(string key, string value) { key = key.ToLower(); switch (key) { case "name": mcname = value; break; } })); if (string.IsNullOrEmpty(mcname)) { Console.Error.WriteLine("Expected name=<MemCacheName>"); SetFailure(); return; } if (mcname.StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { mcname = mcname.Substring(6); } dfs dc = LoadDfsConfig(); dfs.DfsFile df = dc.FindAny(mcname); if (null == df || df.MemCache == null) { Console.Error.WriteLine("Error: '{0}' is not a MemCache", (null == df ? mcname : df.Name)); SetFailure(); return; } Console.WriteLine(" MemCache: {0}", df.Name); Console.WriteLine(" Segment size: {0} ({1})", GetFriendlyByteSize(df.MemCache.SegmentSize), df.MemCache.SegmentSize); Console.WriteLine(" Schema: {0}", df.MemCache.Schema); Console.WriteLine(" Row Length: {0}", df.MemCache.RowLength); Console.WriteLine(" Key Offset: {0}", df.MemCache.KeyOffset); Console.WriteLine(" Key Length: {0}", df.MemCache.KeyLength); } break; default: Console.Error.WriteLine("No such sub-command for memcache: {0}", act); SetFailure(); return; } }
public static void ExecOneTest(SourceCode.Job cfgj, string[] ExecArgs, bool verbose) { if (verbose) { Console.WriteLine("[{0}] [Test: {2}]", System.DateTime.Now.ToString(), System.DateTime.Now.Millisecond, cfgj.NarrativeName); } //string SlaveIP = IPAddressUtil.GetIPv4Address(cfgj.IOSettings.LocalHost); int BlockID = 0; string logname = Surrogate.SafeTextPath(cfgj.NarrativeName) + "_" + Guid.NewGuid().ToString() + ".j" + sjid + "_log.txt"; try { string outputguid = Guid.NewGuid().ToString(); TestRemote dobj = new TestRemote(cfgj.NarrativeName + "_test"); string outputfilename = outputguid + ".local"; dobj.SetJID(jid, CurrentJobFileName + " Test: " + cfgj.NarrativeName); dobj.AddBlock(@"127.0.0.1|" + outputfilename + @".log|slaveid=0"); string codectx = (@" public const int DSpace_BlockID = 0; public const int DSpace_ProcessID = DSpace_BlockID; public const int Qizmt_ProcessID = DSpace_ProcessID; public const int DSpace_BlocksTotalCount = 1; public const int DSpace_ProcessCount = DSpace_BlocksTotalCount; public const int Qizmt_ProcessCount = DSpace_ProcessCount; public const string DSpace_SlaveHost = `localhost`; public const string DSpace_MachineHost = DSpace_SlaveHost; public const string Qizmt_MachineHost = DSpace_MachineHost; public const string DSpace_SlaveIP = `127.0.0.1`; public const string DSpace_MachineIP = DSpace_SlaveIP; public const string Qizmt_MachineIP = DSpace_MachineIP; public static readonly string[] DSpace_ExecArgs = new string[] { " + ExecArgsCode(ExecArgs) + @" }; public static readonly string[] Qizmt_ExecArgs = DSpace_ExecArgs; public const string DSpace_ExecDir = @`" + System.Environment.CurrentDirectory + @"`; public const string Qizmt_ExecDir = DSpace_ExecDir; static string Shell(string line, bool suppresserrors) { return MySpace.DataMining.DistributedObjects.Exec.Shell(line, suppresserrors); } static string Shell(string line) { return MySpace.DataMining.DistributedObjects.Exec.Shell(line, false); } public static void Qizmt_Log(string line) { DSpace_Log(line); } public static void DSpace_Log(string line) { Console.WriteLine(line); } public void Qizmt_LogResult(string line, bool passed) { DSpace_LogResult(line, passed); } public void DSpace_LogResult(string name, bool passed) { if(passed) { DSpace_Log(`[\u00012PASSED\u00010] - ` + name); } else { DSpace_Log(`[\u00014FAILED\u00010] - ` + name); } } ").Replace('`', '"') + MySpace.DataMining.DistributedObjects.CommonCs.CommonDynamicCsCode; dobj.LocalExec(codectx + "\r\n" + cfgj.Test, cfgj.Usings, "Test"); string fullsource = dobj.RemoteSource; System.Reflection.Assembly asm = null; try { dobj.CompilePluginSource(fullsource, true, ref asm); } catch (BadImageFormatException) { } // dobj.RemoteClassName is of type IRemote MySpace.DataMining.DistributedObjects.IRemote iface = _LoadPluginInterface <MySpace.DataMining.DistributedObjects.IRemote>(asm, dobj.RemoteClassName); iface.OnRemote(); // ! if (verbose) { Console.Write('*'); ConsoleFlush(); } } finally { //CheckUserLogs(new string[] { SlaveIP }, logname); } if (verbose) { Console.WriteLine(); Console.WriteLine("[{0}] Done", System.DateTime.Now.ToString(), System.DateTime.Now.Millisecond); } }