public DfsStream(string dfsfile, bool PreserveOrder, bool MachineLock) { if (MachineLock) { this.Mutex = new System.Threading.Mutex(false, "DfsStream{24A86864-EED6-4680-AB0E-3BDE97262339}"); this.Mutex.WaitOne(); } ReplicateStartIndex = StaticGlobals.Qizmt_BlockID; surrogatedir = Surrogate.NetworkPathForHost(Surrogate.MasterHost); dfs dc = dfs.ReadDfsConfig_unlocked(surrogatedir + @"\" + dfs.DFSXMLNAME); this.RetryTimeout = dc.slave.CookTimeout; this.RetryCount = dc.slave.CookRetries; dfs.DfsFile df = dc.FindAny(dfsfile); if (null == df) { throw new System.IO.FileNotFoundException("DFS file '" + dfsfile + "' not found", dfsfile); } if (0 != string.Compare(DfsFileTypes.NORMAL, df.Type, StringComparison.OrdinalIgnoreCase) && 0 != string.Compare(DfsFileTypes.BINARY_RECT, df.Type, StringComparison.OrdinalIgnoreCase)) { throw new InvalidOperationException("DFS file '" + df.Name + "' cannot be opened because file is of type " + df.Type); } this.reclen = df.RecordLength; nodes = df.Nodes.ToArray(); if (!PreserveOrder) { Random rnd = new Random(unchecked ( System.Threading.Thread.CurrentThread.ManagedThreadId + DateTime.Now.Millisecond * 351 + ReplicateStartIndex + nodes.Length * 6131)); for (int i = 0; i < nodes.Length; i++) { int ridx = rnd.Next(0, nodes.Length); dfs.DfsFile.FileNode tmpnode = nodes[i]; nodes[i] = nodes[ridx]; nodes[ridx] = tmpnode; } } }
public static void MemCacheLoad(string mcname) { if (mcname.StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { mcname = mcname.Substring(6); } dfs dc = LoadDfsConfig(); dfs.DfsFile df = dc.FindAny(mcname); if (null == df || df.MemCache == null) { Console.Error.WriteLine("Error: '{0}' is not a MemCache", (null == df ? mcname : df.Name)); SetFailure(); return; } _MemCacheLoad(df); }
public static void ExecOneRemote(SourceCode.Job cfgj, string[] ExecArgs, bool verbose, bool verbosereplication) { if (verbose) { Console.WriteLine("[{0}] [Remote: {2}]", System.DateTime.Now.ToString(), System.DateTime.Now.Millisecond, cfgj.NarrativeName); } string logname = Surrogate.SafeTextPath(cfgj.NarrativeName) + "_" + Guid.NewGuid().ToString() + ".j" + sjid + "_log.txt"; //System.Threading.Thread.Sleep(8000); /*if (cfgj.IOSettings.DFS_IOs == null || cfgj.IOSettings.DFS_IOs.Length == 0) * { * Console.Error.WriteLine("One or more IOSettings/DFS_IO needed in configuration for 'remote'"); * return; * }*/ // Could provide BlockID here, which is just the n-th DFS_IO entry. //cfgj.Remote dfs dc = LoadDfsConfig(); string[] slaves = dc.Slaves.SlaveList.Split(',', ';'); if (dc.Slaves.SlaveList.Length == 0 || slaves.Length < 1) { throw new Exception("SlaveList expected in " + dfs.DFSXMLNAME); } if (dc.Replication > 1) { string[] slavesbefore = slaves; slaves = ExcludeUnhealthySlaveMachines(slaves, true).ToArray(); if (slavesbefore.Length - slaves.Length >= dc.Replication) { throw new Exception("Not enough healthy machines to run job (hit replication count)"); } } if (cfgj.IOSettings.DFS_IO_Multis != null) { cfgj.ExpandDFSIOMultis(slaves.Length, MySpace.DataMining.DistributedObjects.MemoryUtils.NumberOfProcessors); } Dictionary <string, int> slaveIDs = new Dictionary <string, int>(); for (int si = 0; si < slaves.Length; si++) { slaveIDs.Add(slaves[si].ToUpper(), si); } bool aborting = false; try { List <RemoteBlockInfo> blocks = new List <RemoteBlockInfo>(cfgj.IOSettings.DFS_IOs.Length); if (verbose) { Console.WriteLine("{0} processes on {1} machines:", cfgj.IOSettings.DFS_IOs.Length, slaves.Length); } List <string> outputdfsdirs = new List <string>(slaves.Length); { for (int i = 0; i < slaves.Length; i++) { try { outputdfsdirs.Add(NetworkPathForHost(slaves[i])); } catch (Exception e) { Console.Error.WriteLine(" {0}", e.Message); } } } string slaveconfigxml = ""; { System.Xml.XmlDocument pdoc = new System.Xml.XmlDocument(); { System.IO.MemoryStream ms = new System.IO.MemoryStream(); System.Xml.Serialization.XmlSerializer xs = new System.Xml.Serialization.XmlSerializer(typeof(dfs)); xs.Serialize(ms, dc); ms.Seek(0, System.IO.SeekOrigin.Begin); pdoc.Load(ms); } string xml = pdoc.DocumentElement.SelectSingleNode("./slave").OuterXml; //System.Threading.Thread.Sleep(8000); slaveconfigxml = xml; } { // Temporary: for (int si = 0; si < slaves.Length; si++) { System.Threading.Mutex m = new System.Threading.Mutex(false, "AEL_SC_" + slaves[si]); try { m.WaitOne(); } catch (System.Threading.AbandonedMutexException) { } try { System.IO.File.WriteAllText(NetworkPathForHost(slaves[si]) + @"\slaveconfig.j" + sjid + ".xml", slaveconfigxml); } catch { } finally { m.ReleaseMutex(); m.Close(); } } } int nextslave = (new Random(DateTime.Now.Millisecond / 2 + System.Diagnostics.Process.GetCurrentProcess().Id / 2)).Next() % slaves.Length; int hosttypes = 0; List <int> outputrecordlengths = new List <int>(); List <int> inputrecordlengths = new List <int>(); for (int BlockID = 0; BlockID < cfgj.IOSettings.DFS_IOs.Length; BlockID++) { int slaveHostID = 0; RemoteBlockInfo bi = new RemoteBlockInfo(); bi.sampledist = dc.DataNodeBaseSize / dc.DataNodeSamples; bi.BlockID = BlockID; bi.blockcount = cfgj.IOSettings.DFS_IOs.Length; if (string.IsNullOrEmpty(cfgj.IOSettings.DFS_IOs[BlockID].Host)) { if (0 != hosttypes && 1 != hosttypes) { throw new Exception("DFS_IO/Host tag must be specified for all or none"); } hosttypes = 1; bi.SlaveHost = slaves[nextslave]; slaveHostID = nextslave; bi.explicithost = false; } else { if (0 != hosttypes && 2 != hosttypes) { throw new Exception("DFS_IO/Host tag must be specified for all or none"); } hosttypes = 2; bi.SlaveHost = cfgj.IOSettings.DFS_IOs[BlockID].Host; slaveHostID = slaveIDs[bi.SlaveHost.ToUpper()]; bi.explicithost = true; } bi.ExecArgs = ExecArgs; if (++nextslave >= slaves.Length) { nextslave = 0; } bi.logname = logname; bi.outputdfsdirs = outputdfsdirs; bi.slaves = slaves; bi.baseoutputfilesize = dc.DataNodeBaseSize; bi.cfgj = cfgj; bi.DFSWriter = cfgj.IOSettings.DFS_IOs[BlockID].DFSWriter.Trim(); bi.Meta = cfgj.IOSettings.DFS_IOs[BlockID].Meta; List <string> dfswriters = new List <string>(); if (bi.DFSWriter.Length > 0) { string[] writers = bi.DFSWriter.Split(';'); for (int wi = 0; wi < writers.Length; wi++) { string thiswriter = writers[wi].Trim(); if (thiswriter.Length == 0) { continue; } int ic = thiswriter.IndexOf('@'); int reclen = -1; if (-1 != ic) { try { reclen = Surrogate.GetRecordSize(thiswriter.Substring(ic + 1)); thiswriter = thiswriter.Substring(0, ic); } catch (FormatException e) { Console.Error.WriteLine("Error: remote output record length error: {0} ({1})", thiswriter, e.Message); SetFailure(); return; } catch (OverflowException e) { Console.Error.WriteLine("Error: remote output record length error: {0} ({1})", thiswriter, e.Message); SetFailure(); return; } } string outfn = thiswriter; if (outfn.StartsWith(@"dfs://", StringComparison.OrdinalIgnoreCase)) { outfn = outfn.Substring(6); } string reason = ""; if (dfs.IsBadFilename(outfn, out reason)) { Console.Error.WriteLine("Invalid output file: {0}", reason); return; } if (null != DfsFindAny(dc, outfn)) { Console.Error.WriteLine("Error: output file already exists in DFS: {0}", outfn); return; } dfswriters.Add(thiswriter); outputrecordlengths.Add(reclen); } } else { dfswriters.Add(""); outputrecordlengths.Add(-1); } bi.DFSWriters = dfswriters; bi.verbose = verbose; bi.rem = new MySpace.DataMining.DistributedObjects5.Remote(cfgj.NarrativeName + "_remote"); bi.rem.CookRetries = dc.slave.CookRetries; bi.rem.CookTimeout = dc.slave.CookTimeout; bi.rem.DfsSampleDistance = bi.sampledist; bi.rem.CompressFileOutput = dc.slave.CompressDfsChunks; bi.rem.LocalCompile = true; bi.rem.OutputStartingPoint = slaveHostID; bi.rem.CompilerOptions = cfgj.IOSettings.CompilerOptions; bi.rem.CompilerVersion = cfgj.IOSettings.CompilerVersion; if (cfgj.AssemblyReferencesCount > 0) { cfgj.AddAssemblyReferences(bi.rem.CompilerAssemblyReferences, Surrogate.NetworkPathForHost(dc.Slaves.GetFirstSlave())); } if (cfgj.OpenCVExtension != null) { bi.rem.AddOpenCVExtension(); } if (cfgj.MemCache != null) { bi.rem.AddMemCacheExtension(); } if (cfgj.Unsafe != null) { bi.rem.AddUnsafe(); } { List <dfs.DfsFile.FileNode> nodes = new List <dfs.DfsFile.FileNode>(); List <string> mapfileswithnodes = null; List <int> nodesoffsets = null; IList <string> mapfiles = SplitInputPaths(dc, cfgj.IOSettings.DFS_IOs[BlockID].DFSReader); if (mapfiles.Count > 0) { mapfileswithnodes = new List <string>(mapfiles.Count); nodesoffsets = new List <int>(mapfiles.Count); } for (int i = 0; i < mapfiles.Count; i++) { string dp = mapfiles[i].Trim(); int inreclen = -1; if (0 != dp.Length) // Allow empty entry where input isn't wanted. { if (dp.StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { dp = dp.Substring(6); } { int ic = dp.IndexOf('@'); if (-1 != ic) { try { inreclen = Surrogate.GetRecordSize(dp.Substring(ic + 1)); dp = dp.Substring(0, ic); } catch (FormatException e) { Console.Error.WriteLine("Error: remote input record length error: {0} ({1})", dp, e.Message); SetFailure(); return; } catch (OverflowException e) { Console.Error.WriteLine("Error: remote input record length error: {0} ({1})", dp, e.Message); SetFailure(); return; } } } dfs.DfsFile df; if (inreclen > 0 || inreclen == -2) { df = DfsFind(dc, dp, DfsFileTypes.BINARY_RECT); if (null != df && inreclen != df.RecordLength) { Console.Error.WriteLine("Error: remote input file does not have expected record length of {0}: {1}@{2}", inreclen, dp, df.RecordLength); SetFailure(); return; } } else { df = DfsFind(dc, dp); } if (null == df) { //throw new Exception("Remote input file not found in DFS: " + dp); Console.Error.WriteLine("Remote input file not found in DFS: {0}", dp); return; } if (df.Nodes.Count > 0) { mapfileswithnodes.Add(dp); nodesoffsets.Add(nodes.Count); inputrecordlengths.Add(inreclen); nodes.AddRange(df.Nodes); } } } bi.dfsinputpaths = new List <string>(nodes.Count); //MapNodesToNetworkPaths(nodes, bi.dfsinputpaths); dfs.MapNodesToNetworkStarPaths(nodes, bi.dfsinputpaths); bi.dfsinputfilenames = mapfileswithnodes; bi.dfsinputnodesoffsets = nodesoffsets; } blocks.Add(bi); bi.thread = new System.Threading.Thread(new System.Threading.ThreadStart(bi.threadproc)); bi.thread.Name = "RemoteJobBlock" + bi.BlockID; } MySpace.DataMining.DistributedObjects.StaticGlobals.DSpace_InputRecordLength = inputrecordlengths.Count > 0 ? inputrecordlengths[0] : -1; MySpace.DataMining.DistributedObjects.StaticGlobals.DSpace_OutputRecordLength = outputrecordlengths.Count > 0 ? outputrecordlengths[0] : -1; // Need to start threads separately due to StaticGlobals being updated. for (int BlockID = 0; BlockID < cfgj.IOSettings.DFS_IOs.Length; BlockID++) { RemoteBlockInfo bi = blocks[BlockID]; bi.rem.InputRecordLength = MySpace.DataMining.DistributedObjects.StaticGlobals.DSpace_InputRecordLength; bi.rem.InputRecordLengths = inputrecordlengths; bi.rem.OutputRecordLength = MySpace.DataMining.DistributedObjects.StaticGlobals.DSpace_OutputRecordLength; bi.rem.OutputRecordLengths = outputrecordlengths; AELight_StartTraceThread(bi.thread); } for (int BlockID = 0; BlockID < blocks.Count; BlockID++) { AELight_JoinTraceThread(blocks[BlockID].thread); blocks[BlockID].rem.Close(); if (blocks[BlockID].blockfail) { Console.Error.WriteLine("BlockID {0} on host '{1}' did not complete successfully", BlockID, (blocks[BlockID].SlaveHost != null) ? blocks[BlockID].SlaveHost : "<null>"); continue; } } List <string> dfsnames = new List <string>(); List <string> dfsnamesreplicating = new List <string>(); // Reload DFS config to make sure changes since starting get rolled in, and make sure the output file wasn't created in that time... using (LockDfsMutex()) // Needed: change between load & save should be atomic. { dc = LoadDfsConfig(); for (int BlockID = 0; BlockID < blocks.Count; BlockID++) { if (blocks[BlockID].blockfail) { continue; } { bool anyoutput = false; bool nonemptyoutputpath = false; for (int oi = 0; oi < blocks[BlockID].DFSWriters.Count; oi++) { string dfswriter = blocks[BlockID].DFSWriters[oi]; if (string.IsNullOrEmpty(dfswriter)) { if (blocks[BlockID].outputdfsnodeses[oi].Count > 0) { Console.Error.WriteLine("Output data detected with no DFSWriter specified"); } } else { { if (null != DfsFind(dc, dfswriter)) { Console.Error.WriteLine("Error: output file was created during job: {0}", dfswriter); continue; } string dfspath = dfswriter; { nonemptyoutputpath = true; dfs.DfsFile df = new dfs.DfsFile(); if (blocks[BlockID].rem.OutputRecordLengths[oi] > 0) { df.XFileType = DfsFileTypes.BINARY_RECT + "@" + blocks[BlockID].rem.OutputRecordLengths[oi].ToString(); } else if (blocks[BlockID].rem.OutputRecordLengths[oi] == -2) { df.XFileType = DfsFileTypes.BINARY_RECT + "@?"; } df.Nodes = new List <dfs.DfsFile.FileNode>(); df.Size = -1; // Preset if (dfspath.StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { dfspath = dfspath.Substring(6); } string dfspathreplicating = ".$" + dfspath + ".$replicating-" + Guid.NewGuid().ToString(); if (null != dc.FindAny(dfspathreplicating)) { Console.Error.WriteLine("Error: file exists: file put into DFS from another location during job: " + dfspathreplicating); SetFailure(); return; } dfsnames.Add(dfspath); dfsnamesreplicating.Add(dfspathreplicating); df.Name = dfspathreplicating; bool anybad = false; long totalsize = 0; { int i = BlockID; for (int j = 0; j < blocks[i].outputdfsnodeses[oi].Count; j++) { dfs.DfsFile.FileNode fn = new dfs.DfsFile.FileNode(); fn.Host = blocks[i].slaves[(blocks[i].rem.OutputStartingPoint + j) % blocks[i].slaves.Count]; fn.Name = blocks[i].outputdfsnodeses[oi][j]; df.Nodes.Add(fn); fn.Length = -1; // Preset fn.Position = -1; // Preset if (anybad) { continue; } fn.Length = blocks[i].outputsizeses[oi][j]; fn.Position = totalsize; // Position must be set before totalsize updated! if (blocks[i].outputdfsnodeses[oi].Count != blocks[i].outputsizeses[oi].Count) { anybad = true; continue; } totalsize += blocks[i].outputsizeses[oi][j]; } } if (!anybad) { df.Size = totalsize; } if (totalsize != 0) { anyoutput = true; } // Always add the file to DFS, even if blank! dc.Files.Add(df); } } } } if (!anyoutput && verbose && nonemptyoutputpath) { Console.Write(" (no DFS output) "); ConsoleFlush(); } } } UpdateDfsXml(dc); } ReplicationPhase(verbosereplication, blocks.Count, slaves, dfsnamesreplicating); using (LockDfsMutex()) // Needed: change between load & save should be atomic. { dc = LoadDfsConfig(); // Reload in case of change or user modifications. for (int nfile = 0; nfile < dfsnames.Count; nfile++) { string dfspath = dfsnames[nfile]; string dfspathreplicating = dfsnamesreplicating[nfile]; { dfs.DfsFile dfu = dc.FindAny(dfspathreplicating); if (null != dfu) { if (null != DfsFindAny(dc, dfspath)) { Console.Error.WriteLine("Error: file exists: file put into DFS from another location during job"); SetFailure(); continue; } dfu.Name = dfspath; } } } UpdateDfsXml(dc); } if (verbose) { Console.WriteLine(); // Line after output chars. } } catch (System.Threading.ThreadAbortException) { aborting = true; } finally { { for (int si = 0; si < slaves.Length; si++) { System.Threading.Mutex m = new System.Threading.Mutex(false, "AEL_SC_" + slaves[si]); try { m.WaitOne(); } catch (System.Threading.AbandonedMutexException) { } try { System.IO.File.Delete(NetworkPathForHost(slaves[si]) + @"\slaveconfig.j" + sjid + ".xml"); } catch { } finally { m.ReleaseMutex(); m.Close(); } } } if (!aborting) { CheckUserLogs(slaves, logname); } } if (verbose) { Console.WriteLine(); Console.WriteLine("[{0}] Done", System.DateTime.Now.ToString(), System.DateTime.Now.Millisecond); for (int i = 0; i < cfgj.IOSettings.DFS_IOs.Length; i++) { Console.WriteLine("Output: {0}", cfgj.IOSettings.DFS_IOs[i].DFSWriter); } } }
public static dfs.DfsFile DfsFindAny(dfs dc, string dfspath) { return dc.FindAny(dfspath); }
public static void MemCacheFlush(string mcname) { if (mcname.StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { mcname = mcname.Substring(6); } dfs dc = LoadDfsConfig(); dfs.DfsFile df = dc.FindAny(mcname); if (null == df || df.MemCache == null) { Console.Error.WriteLine("Error: '{0}' is not a MemCache", (null == df ? mcname : df.Name)); SetFailure(); return; } string tempdfsname = mcname + Guid.NewGuid() + dfs.TEMP_FILE_MARKER; string[] slaves = dc.Slaves.SlaveList.Split(';'); long dfsfilesize = 0; { string tempfp = "bp-mcflush" + Guid.NewGuid() + ".tmp"; try { using (System.IO.StreamWriter sw = new System.IO.StreamWriter(tempfp)) { //foreach (string slave in slaves) MySpace.DataMining.Threading.ThreadTools <string> .Parallel( new Action <string>( delegate(string slave) { System.Net.Sockets.NetworkStream nstm = Surrogate.ConnectService(slave); nstm.WriteByte((byte)'C'); nstm.WriteByte((byte)'f'); XContent.SendXContent(nstm, df.Name); int ich = nstm.ReadByte(); if ('+' != ich) { string errmsg = null; if ('-' == ich) { try { errmsg = XContent.ReceiveXString(nstm, null); } catch { } } if (null != errmsg) { throw new Exception("Error received from DO service during MemCache commit: " + errmsg); } throw new Exception("Did not receive a success signal from DO service during MemCache commit"); } // flushinfos: chunk name, chunk size (without header) string[] flushinfos = XContent.ReceiveXString(nstm, null).Split( new char[] { '\r', '\n' }, StringSplitOptions.RemoveEmptyEntries); foreach (string flushinfo in flushinfos) { lock (slaves) { sw.WriteLine("{0} {1}", slave, flushinfo); dfsfilesize += int.Parse(flushinfo.Split(' ')[1]); } } }), slaves, slaves.Length); } DfsBulkPut(new string[] { tempfp, tempdfsname, "rbin@" + df.RecordLength }); } finally { try { System.IO.File.Delete(tempfp); } catch { } } } Dictionary <string, bool> newchunknames = new Dictionary <string, bool>(StringComparer.OrdinalIgnoreCase); using (LockDfsMutex()) { dc = LoadDfsConfig(); dfs.DfsFile df2 = dc.FindAny(tempdfsname); if (null == df2) { throw new Exception("DEBUG: Temp DFS file not found: " + tempdfsname); } for (int i = 0; i < dc.Files.Count; i++) { if (0 == string.Compare(dc.Files[i].Name, mcname, true)) { dc.Files.RemoveAt(i); break; } } foreach (dfs.DfsFile.FileNode fn in df2.Nodes) { newchunknames[fn.Name] = true; } df2.MemCache = df.MemCache; df2.Size = dfsfilesize; df2.Name = df.Name; UpdateDfsXml(dc); } { // Just kill the old chunks, not the MemCache stuff. List <string> delfnodes = new List <string>(); { //Collect file node paths. for (int dn = 0; dn < df.Nodes.Count; dn++) { if (newchunknames.ContainsKey(df.Nodes[dn].Name)) { continue; } foreach (string chost in df.Nodes[dn].Host.Split(';')) { delfnodes.Add(NetworkPathForHost(chost) + @"\" + df.Nodes[dn].Name); } } } _KillDataFileChunksInternal_unlocked_mt(delfnodes); } }
public static void MemCacheCommand(string[] args) { if (args.Length < 1) { Console.Error.WriteLine("Expected memcache sub-command"); SetFailure(); return; } string act = args[0].ToLower(); switch (act) { case "create": { string mcname = null; string mcschema = null; int mcsegsize = -1; EachArgument(args, 1, new Action <string, string>( delegate(string key, string value) { key = key.ToLower(); switch (key) { case "name": mcname = value; break; case "schema": mcschema = value; break; case "segment": case "segsize": case "segmentsize": mcsegsize = ParseCapacity(value); break; } })); if (string.IsNullOrEmpty(mcname)) { Console.Error.WriteLine("Expected name=<MemCacheName>"); SetFailure(); return; } if (string.IsNullOrEmpty(mcschema)) { Console.Error.WriteLine("Expected schema=<schema>"); SetFailure(); return; } if (-1 != mcsegsize && mcsegsize < 1024) { Console.Error.WriteLine("Error: segment={0} is too small", mcsegsize); SetFailure(); return; } if (mcname.StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { mcname = mcname.Substring(6); } { string reason; if (dfs.IsBadFilename(mcname, out reason)) { Console.Error.WriteLine("MemCache cannot be named '{0}': {1}", mcname, reason); SetFailure(); return; } } dfs.DfsFile.ConfigMemCache cmc = new dfs.DfsFile.ConfigMemCache(); cmc.MetaFileName = "mcm." + Surrogate.SafeTextPath(mcname) + ".mcm"; cmc.Schema = mcschema; List <int> offsets = new List <int>(); cmc.RowLength = Surrogate.GetRecordInfo(mcschema, out cmc.KeyOffset, out cmc.KeyLength, offsets); /*if (0 == cmc.KeyOffset * && cmc.RowLength == cmc.KeyLength * && -1 == mcschema.IndexOf('[')) * { * Console.WriteLine("Note: no key was specified, the key is the entire row"); * }*/ if (-1 == mcsegsize) { const int defsegsize = 0x400 * 0x400 * 64; cmc.SegmentSize = defsegsize - (defsegsize % cmc.RowLength); } else { if (0 != (mcsegsize % cmc.RowLength)) { Console.Error.WriteLine("Segment size must be a multiple of the row length"); Console.Error.WriteLine("Nearest segment size is {0} bytes", mcsegsize - (mcsegsize % cmc.RowLength)); SetFailure(); return; } cmc.SegmentSize = mcsegsize; } { StringBuilder sbFieldOffsets = new StringBuilder(); foreach (int offset in offsets) { if (sbFieldOffsets.Length != 0) { sbFieldOffsets.Append(','); } sbFieldOffsets.Append(offset); } cmc.FieldOffsets = sbFieldOffsets.ToString(); } dfs.DfsFile df = new dfs.DfsFile(); df.Nodes = new List <dfs.DfsFile.FileNode>(0); df.MemCache = cmc; df.Name = mcname; df.XFileType = DfsFileTypes.BINARY_RECT + "@" + cmc.RowLength; df.Size = 0; dfs dc = LoadDfsConfig(); { dfs.DfsFile df2 = dc.FindAny(df.Name); if (null != df2) { Console.Error.WriteLine("Error: a file named '{0}' already exists", df2.Name); SetFailure(); return; } } { string startmeta = GetMemCacheMetaFileHeader(df); string[] slaves = dc.Slaves.SlaveList.Split(';'); int totalworkercount = dc.Blocks.TotalCount; // Subprocess_TotalPrime StringBuilder[] permachine = new StringBuilder[slaves.Length]; //byte[] HEADER = new byte[4]; //MySpace.DataMining.DistributedObjects.Entry.ToBytes(4, HEADER, 0); for (int i = 0; i < permachine.Length; i++) { permachine[i] = new StringBuilder(256); } { int si = -1; for (int workerid = 0; workerid < totalworkercount; workerid++) { if (++si >= slaves.Length) { si = 0; } StringBuilder sb = permachine[si]; sb.AppendFormat("##{1}:{0}", Environment.NewLine, workerid); // There's no segments, but write a dummy one for bookkeeping. foreach (char snc in "MemCache_" + mcname + "_empty") { sb.Append(snc); } { sb.Append(' '); /* * StringBuilder newchunkpath = new StringBuilder(100); * newchunkpath.Append(Surrogate.NetworkPathForHost(slaves[si])); * newchunkpath.Append('\\'); * */ // Make up a data node chunk name. foreach (char ch in MakeMemCacheChunkName(mcname, workerid)) { //newchunkpath.Append(ch); sb.Append(ch); } // Write the empty chunk. //System.IO.File.WriteAllBytes(newchunkpath.ToString(), HEADER); } //if (IsLastSegment) // true { sb.Append(' '); string shexlen = string.Format("{0:x8}", 0); // Zero-length! for (int i = 0; i < shexlen.Length; i++) { sb.Append(shexlen[i]); } } sb.AppendLine(); } } for (int si = 0; si < slaves.Length; si++) { string slave = slaves[si]; string fp = Surrogate.NetworkPathForHost(slave) + @"\" + cmc.MetaFileName; using (System.IO.StreamWriter sw = new System.IO.StreamWriter(fp)) { sw.Write(startmeta); sw.Write(permachine[si].ToString()); } } } using (LockDfsMutex()) { dc = LoadDfsConfig(); // Load again in update lock. { dfs.DfsFile df2 = dc.FindAny(df.Name); if (null != df2) { Console.Error.WriteLine("Error: a file named '{0}' already exists", df2.Name); SetFailure(); return; } } dc.Files.Add(df); UpdateDfsXml(dc); } try { // Need to commit it so that the empty chunks are in the metadata for bookkeeping. // This has to be done after actually adding it to dfsxml. MemCacheFlush(mcname); } catch (Exception e) { try { MemCacheDelete(mcname, false); } catch { } Console.Error.WriteLine("Error: unable to commit newly created MemCache '{0}'; because:{1}{2}", mcname, Environment.NewLine, e.ToString()); SetFailure(); return; } Console.WriteLine("Successfully created MemCache '{0}'", mcname); } break; case "delete": case "del": case "rm": { string mcname = null; EachArgument(args, 1, new Action <string, string>( delegate(string key, string value) { key = key.ToLower(); switch (key) { case "name": mcname = value; break; } })); if (string.IsNullOrEmpty(mcname)) { Console.Error.WriteLine("Expected name=<MemCacheName>"); SetFailure(); return; } MemCacheDelete(mcname, true); } break; case "flush": case "commit": { string mcname = null; EachArgument(args, 1, new Action <string, string>( delegate(string key, string value) { key = key.ToLower(); switch (key) { case "name": mcname = value; break; } })); if (string.IsNullOrEmpty(mcname)) { Console.Error.WriteLine("Expected name=<MemCacheName>"); SetFailure(); return; } try { MemCacheFlush(mcname); Console.WriteLine("Done"); } catch (Exception e) { Console.WriteLine(" Commit was unsuccessful because: {0}", e.Message); Console.WriteLine(); Console.Error.WriteLine(e.ToString()); SetFailure(); return; } } break; case "release": case "rollback": { string mcname = null; bool force = false; EachArgument(args, 1, new Action <string, string>( delegate(string key, string value) { key = key.ToLower(); switch (key) { case "name": mcname = value; break; case "-f": force = true; break; } })); if (string.IsNullOrEmpty(mcname)) { Console.Error.WriteLine("Expected name=<MemCacheName>"); SetFailure(); return; } try { MemCacheRelease(mcname, force); Console.WriteLine("Done"); } catch (Exception e) { string exception = e.ToString(); if (-1 != exception.IndexOf("MemCacheWarning")) { Console.WriteLine("Warning: " + exception); } else { Console.Error.WriteLine(exception); string ioe = "InvalidOperationException:"; if (!force && -1 != exception.IndexOf(ioe)) { try { string emsg = exception.Substring(exception.IndexOf(ioe) + ioe.Length) .Split('\r', '\n')[0].Trim(); System.Threading.Thread.Sleep(100); Console.WriteLine(); Console.WriteLine("{0}{2}{1}", false ? "\u00014" : "", false ? "\u00010" : "", emsg); System.Threading.Thread.Sleep(100); } catch { } Console.Error.WriteLine("Use rollback -f followed by killall to force rollback"); } SetFailure(); return; } } } break; case "load": { string mcname = null; EachArgument(args, 1, new Action <string, string>( delegate(string key, string value) { key = key.ToLower(); switch (key) { case "name": mcname = value; break; } })); if (string.IsNullOrEmpty(mcname)) { Console.Error.WriteLine("Expected name=<MemCacheName>"); SetFailure(); return; } MemCacheLoad(mcname); Console.WriteLine("Done"); } break; case "info": case "information": { string mcname = null; EachArgument(args, 1, new Action <string, string>( delegate(string key, string value) { key = key.ToLower(); switch (key) { case "name": mcname = value; break; } })); if (string.IsNullOrEmpty(mcname)) { Console.Error.WriteLine("Expected name=<MemCacheName>"); SetFailure(); return; } if (mcname.StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { mcname = mcname.Substring(6); } dfs dc = LoadDfsConfig(); dfs.DfsFile df = dc.FindAny(mcname); if (null == df || df.MemCache == null) { Console.Error.WriteLine("Error: '{0}' is not a MemCache", (null == df ? mcname : df.Name)); SetFailure(); return; } Console.WriteLine(" MemCache: {0}", df.Name); Console.WriteLine(" Segment size: {0} ({1})", GetFriendlyByteSize(df.MemCache.SegmentSize), df.MemCache.SegmentSize); Console.WriteLine(" Schema: {0}", df.MemCache.Schema); Console.WriteLine(" Row Length: {0}", df.MemCache.RowLength); Console.WriteLine(" Key Offset: {0}", df.MemCache.KeyOffset); Console.WriteLine(" Key Length: {0}", df.MemCache.KeyLength); } break; default: Console.Error.WriteLine("No such sub-command for memcache: {0}", act); SetFailure(); return; } }