static void Dfs(string cmd, string[] args) { //using (LockDfsMutex()) { switch (cmd) { case "delete": case "del": case "rm": case "delmt": case "delst": if (!dfs.DfsConfigExists(DFSXMLPATH)) { Console.Error.WriteLine("DFS not setup; use: {0} format", appname); SetFailure(); return; } if (args.Length < 1) { Console.Error.WriteLine("Error: {0} command needs argument: <path|wildcard>", cmd); SetFailure(); return; } if (string.Compare(cmd, "delst", true) == 0) { DfsDelete(args[0]); //single threaded. } else { DfsDeleteMt(args[0], true); } break; case "head": { if (!dfs.DfsConfigExists(DFSXMLPATH)) { Console.Error.WriteLine("DFS not setup; use: {0} format", appname); SetFailure(); return; } if (args.Length < 1 || '-' == args[0][0]) { Console.Error.WriteLine("Error: dfs head command needs argument: <dfsfile>"); SetFailure(); return; } string[] specs; if (args[0].StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { specs = args[0].Substring(6).Split(':'); } else { specs = args[0].Split(':'); } string RecordInfo = null; int RecordLength = -1; { int iat = specs[0].IndexOf('@'); if (-1 != iat) { RecordInfo = specs[0].Substring(iat + 1); specs[0] = specs[0].Substring(0, iat); } } dfs dc = LoadDfsConfig(); dfs.DfsFile df = DfsFindAny(dc, specs[0]); if (null == df) { Console.Error.WriteLine("File not found in DFS: {0}", specs[0]); SetFailure(); return; } if (0 == string.Compare(DfsFileTypes.NORMAL, df.Type, StringComparison.OrdinalIgnoreCase)) { if (null != RecordInfo) { Console.Error.WriteLine("Cannot specify record information '{0}'" + " for non- rectangular binary DFS file '{1}'", RecordInfo, df.Name); SetFailure(); return; } } else if (0 == string.Compare(DfsFileTypes.BINARY_RECT, df.Type, StringComparison.OrdinalIgnoreCase)) { if (string.IsNullOrEmpty(RecordInfo)) { Console.Error.WriteLine("DFS file '{0}' is not of expected type", specs[0]); Console.WriteLine("Must specify <dfsfile>@<record-types> for a rectangular binary DFS file"); SetFailure(); return; } RecordLength = df.RecordLength; } else { Console.Error.WriteLine("DFS file '{0}' is not of expected type", specs[0]); SetFailure(); return; } if (df.Nodes.Count > 0) { string shost = ""; if (specs.Length >= 2 && specs[1].Length > 0) { shost = IPAddressUtil.GetName(specs[1]); } string partspec = ""; bool foundpart = true; if (specs.Length >= 3) { partspec = specs[2]; foundpart = false; } int lc = 10; if (args.Length >= 2) { lc = int.Parse(args[1]); if (lc <= 0) { throw new FormatException("Line count makes no sense"); } } const int MAX_SIZE_PER_RECEIVE = 0x400 * 64; byte[] fbuf = new byte[MAX_SIZE_PER_RECEIVE]; foreach (dfs.DfsFile.FileNode node in df.Nodes) { if (partspec.Length > 0) { if (0 == string.Compare(node.Name, partspec, StringComparison.OrdinalIgnoreCase)) { if (ListContainsHost(node.Host, shost)) { // Good!.. foundpart = true; } else { ConsoleFlush(); Console.Error.WriteLine(" Specified data-node chunk does not reside on specified host"); SetFailure(); return; } } else { continue; } } //string netpath = NetworkPathForHost(node.Host.Split(';')[0]) + @"\" + node.Name; using (System.IO.Stream _fc = new DfsFileNodeStream(node, true, System.IO.FileMode.Open, System.IO.FileAccess.Read, System.IO.FileShare.Read, FILE_BUFFER_SIZE)) { System.IO.Stream fc = _fc; if (1 == dc.slave.CompressDfsChunks) { fc = new System.IO.Compression.GZipStream(_fc, System.IO.Compression.CompressionMode.Decompress); } { int xread = StreamReadLoop(fc, fbuf, 4); if (4 == xread) { int hlen = MySpace.DataMining.DistributedObjects.Entry.BytesToInt(fbuf); StreamReadExact(fc, fbuf, hlen - 4); } } if (null == RecordInfo) { using (System.IO.StreamReader sr = new System.IO.StreamReader(fc)) { for (; lc > 0; lc--) { string s = sr.ReadLine(); if (null == s) { break; } Console.WriteLine(s); } } } else { byte[] recbuf = new byte[RecordLength]; string[] types = RecordInfo.Split(','); List<int> typesizes = new List<int>(types.Length); int totalsizerequested = 0; { foreach (Surrogate.RecordInfo ri in Surrogate.EachRecord(RecordInfo)) { if (ri.InKey) { throw new Exception("Unexpected [ found in record information"); } totalsizerequested += ri.Size; if (totalsizerequested <= RecordLength) { typesizes.Add(ri.Size); } } } { for (; lc > 0; lc--) { if (RecordLength != StreamReadLoop(fc, recbuf, RecordLength)) { break; } int offset = 0; for (int it = 0; it < typesizes.Count; offset += typesizes[it++]) { if (0 != it) { Console.Write(' '); } if (char.IsDigit(types[it][0])) { Console.Write("<{0}>", typesizes[it]); } else if (0 == string.Compare("int", types[it], StringComparison.OrdinalIgnoreCase) || 0 == string.Compare("int32", types[it], StringComparison.OrdinalIgnoreCase) || 0 == string.Compare("nInt", types[it], StringComparison.OrdinalIgnoreCase) ) { bool isnullable = (char.ToLowerInvariant(types[it][0]) == 'n'); MySpace.DataMining.DistributedObjects.recordset rs; if (isnullable) { if (0 != recbuf[0]) { Console.Write("NULL"); continue; } else { rs = MySpace.DataMining.DistributedObjects.recordset.Prepare( MySpace.DataMining.DistributedObjects.ByteSlice.Prepare( recbuf, offset + 1, typesizes[it] - 1)); } } else { rs = MySpace.DataMining.DistributedObjects.recordset.Prepare( MySpace.DataMining.DistributedObjects.ByteSlice.Prepare( recbuf, offset, typesizes[it])); } int val = rs.GetInt(); if (isnullable) { val = (int)MySpace.DataMining.DistributedObjects.Entry.ToUInt32(val); } Console.Write(val); } else if (0 == string.Compare("long", types[it], StringComparison.OrdinalIgnoreCase) || 0 == string.Compare("int64", types[it], StringComparison.OrdinalIgnoreCase) || 0 == string.Compare("nLong", types[it], StringComparison.OrdinalIgnoreCase) ) { bool isnullable = (char.ToLowerInvariant(types[it][0]) == 'n'); MySpace.DataMining.DistributedObjects.recordset rs; if (isnullable) { if (0 != recbuf[0]) { Console.Write("NULL"); continue; } else { rs = MySpace.DataMining.DistributedObjects.recordset.Prepare( MySpace.DataMining.DistributedObjects.ByteSlice.Prepare( recbuf, offset + 1, typesizes[it] - 1)); } } else { rs = MySpace.DataMining.DistributedObjects.recordset.Prepare( MySpace.DataMining.DistributedObjects.ByteSlice.Prepare( recbuf, offset, typesizes[it])); } long val = rs.GetLong(); if (isnullable) { val = (long)MySpace.DataMining.DistributedObjects.Entry.ToUInt64(val); } Console.Write(val); } else if (0 == string.Compare("nDateTime", types[it], StringComparison.OrdinalIgnoreCase) ) { bool isnullable = (char.ToLowerInvariant(types[it][0]) == 'n'); MySpace.DataMining.DistributedObjects.recordset rs; if (isnullable) { if (0 != recbuf[0]) { Console.Write("NULL"); continue; } else { rs = MySpace.DataMining.DistributedObjects.recordset.Prepare( MySpace.DataMining.DistributedObjects.ByteSlice.Prepare( recbuf, offset + 1, typesizes[it] - 1)); } } else { rs = MySpace.DataMining.DistributedObjects.recordset.Prepare( MySpace.DataMining.DistributedObjects.ByteSlice.Prepare( recbuf, offset, typesizes[it])); } try { DateTime dtval = rs.GetDateTime(); string val = "\"" + dtval + "\""; Console.Write(val); } catch { Console.Write("<error>"); } } else if (0 == string.Compare("double", types[it], StringComparison.OrdinalIgnoreCase) || 0 == string.Compare("nDouble", types[it], StringComparison.OrdinalIgnoreCase) ) { bool isnullable = (char.ToLowerInvariant(types[it][0]) == 'n'); MySpace.DataMining.DistributedObjects.recordset rs; if (isnullable) { if (0 != recbuf[0]) { Console.Write("NULL"); continue; } else { rs = MySpace.DataMining.DistributedObjects.recordset.Prepare( MySpace.DataMining.DistributedObjects.ByteSlice.Prepare( recbuf, offset + 1, typesizes[it] - 1)); } } else { rs = MySpace.DataMining.DistributedObjects.recordset.Prepare( MySpace.DataMining.DistributedObjects.ByteSlice.Prepare( recbuf, offset, typesizes[it])); } double val = Math.Round(rs.GetDouble(), 4); Console.Write(val); } else if (types[it].StartsWith("nChar", StringComparison.OrdinalIgnoreCase) ) { bool isnullable = (char.ToLowerInvariant(types[it][0]) == 'n'); MySpace.DataMining.DistributedObjects.recordset rs; if (isnullable) { if (0 != recbuf[0]) { Console.Write("NULL"); continue; } else { rs = MySpace.DataMining.DistributedObjects.recordset.Prepare( MySpace.DataMining.DistributedObjects.ByteSlice.Prepare( recbuf, offset + 1, typesizes[it] - 1)); } } else { throw new NotSupportedException(); } try { byte[] sbbuf = new byte[rs.Length]; rs.GetBytes(sbbuf, 0, sbbuf.Length); string s = Encoding.Unicode.GetString(sbbuf).TrimEnd('\0'); string val = "\"" + s.Replace("\\", "\\\\").Replace("\"", "\\\"") + "\""; Console.Write(val); } catch { Console.Write("<error>"); } } else { // Unhandled type: Console.Write("<{0}>", types[it].ToUpper()); } } if (typesizes.Count > types.Length) { if (typesizes.Count > 0) { Console.Write(' '); } Console.Write("<clipped>"); } /*else if(totalsizerequested < RecordLength) { if (typesizes.Count > 0) { Console.Write(' '); } Console.Write("<{0}>", RecordLength - totalsizerequested); }*/ Console.WriteLine(); } } } } if (partspec.Length > 0) { break; } if (lc <= 0) { break; } } if (lc > 0) { ConsoleFlush(); if (!foundpart) { Console.Error.WriteLine(" Specified data-node chunk not found"); } else { if (partspec.Length > 0) { Console.Error.WriteLine(" Hit end of specified data-node chunk"); } else { //Console.Error.WriteLine(" Hit end of file"); } } } } else { Console.Error.WriteLine(" No data-node chunks"); } } break; case "rename": case "ren": case "move": case "mv": if (!dfs.DfsConfigExists(DFSXMLPATH)) { Console.Error.WriteLine("DFS not setup; use: {0} format", appname); SetFailure(); return; } if (args.Length < 2) { Console.Error.WriteLine("Error: dfs rename command needs arguments: <dfspath> <dfspath>"); SetFailure(); return; } DfsRename(args); break; case "swap": if (!dfs.DfsConfigExists(DFSXMLPATH)) { Console.Error.WriteLine("DFS not setup; use: {0} format", appname); SetFailure(); return; } if (args.Length < 2) { Console.Error.WriteLine("Error: dfs swap command needs arguments: <dfspath> <dfspath>"); SetFailure(); return; } DfsSwap(args); break; case "countparts": { if (args.Length == 0) { Console.Error.WriteLine("Error: countparts command needs argument: <dfspath>"); SetFailure(); return; } if (!dfs.DfsConfigExists(DFSXMLPATH)) { Console.Error.WriteLine("DFS not setup; use: {0} format", appname); SetFailure(); return; } string dfsfilename = args[0]; dfs dc = LoadDfsConfig(); dfs.DfsFile df = DfsFindAny(dc, dfsfilename); if (null == df) { Console.Error.WriteLine("No such file: {0}", dfsfilename); return; } if (0 == string.Compare(df.Type, DfsFileTypes.NORMAL, StringComparison.OrdinalIgnoreCase) || 0 == string.Compare(df.Type, DfsFileTypes.BINARY_RECT, StringComparison.OrdinalIgnoreCase)) { Console.WriteLine(df.Nodes.Count); } else { Console.Error.WriteLine("countparts not supported for file of type '{0}'", df.Type); } } break; case "filesize": { if (args.Length == 0) { Console.Error.WriteLine("Error: filesize command needs argument: <dfspath>"); SetFailure(); return; } if (!dfs.DfsConfigExists(DFSXMLPATH)) { Console.Error.WriteLine("DFS not setup; use: {0} format", appname); SetFailure(); return; } string dfsfilename = args[0]; dfs dc = LoadDfsConfig(); dfs.DfsFile df = DfsFindAny(dc, dfsfilename); if (null == df) { Console.Error.WriteLine("No such file: {0}", dfsfilename); SetFailure(); return; } if (0 == string.Compare(df.Type, DfsFileTypes.NORMAL, StringComparison.OrdinalIgnoreCase) || 0 == string.Compare(df.Type, DfsFileTypes.BINARY_RECT, StringComparison.OrdinalIgnoreCase)) { Console.WriteLine(df.Size); // Byte count. Console.WriteLine(Surrogate.GetFriendlyByteSize(df.Size)); // Friendly size. } else { Console.Error.WriteLine("filesize not supported for file of type '{0}'", df.Type); SetFailure(); return; } } break; case "ls": case "dir": if (!dfs.DfsConfigExists(DFSXMLPATH)) { Console.Error.WriteLine("DFS not setup; use: {0} format", appname); SetFailure(); return; } { int iarg = 0; bool showhidden = false; if (args.Length > iarg) { if ("-h" == args[iarg]) { iarg++; showhidden = true; } } bool filterspecified = args.Length > iarg; string filter = filterspecified ? args[iarg++] : "*"; if (filter.StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { filter = filter.Substring(6); } string srex = Surrogate.WildcardRegexString(filter); System.Text.RegularExpressions.Regex rex = new System.Text.RegularExpressions.Regex(srex, System.Text.RegularExpressions.RegexOptions.IgnoreCase); if (!filterspecified) // Only show [metadata] info if no specific filter. { long dcsize = dfs.GetDfsConfigSize(DFSXMLPATH); string colorcode = "", endcolorcode = ""; ConsoleColor oldcolor = Console.ForegroundColor; Console.ForegroundColor = ConsoleColor.Cyan; if (isdspace) { colorcode = "\u00012"; endcolorcode = "\u00010"; } Console.Write(" {0}{1,-40}{2} ", colorcode, "[metadata]", endcolorcode); Console.WriteLine("{0,10}", GetFriendlyByteSize(dcsize)); Console.ForegroundColor = oldcolor; } long totalbytes = 0; dfs dc = LoadDfsConfig(); string about = ""; int matchedCount = 0; for (int i = 0; i < dc.Files.Count; i++) { if (rex.IsMatch(dc.Files[i].Name)) { bool isnormalfile = 0 == string.Compare(dc.Files[i].Type, DfsFileTypes.NORMAL); bool iszballfile = 0 == string.Compare(dc.Files[i].Type, DfsFileTypes.DELTA); bool isjobsfile = 0 == string.Compare(dc.Files[i].Type, DfsFileTypes.JOB); bool isdllfile = 0 == string.Compare(dc.Files[i].Type, DfsFileTypes.DLL); bool istbl = false; int RecordLength = dc.Files[i].RecordLength; if (RecordLength > 0 || RecordLength == -2) { isnormalfile = true; // For most purposes here it's the same. } /*if (isnormalfile && dc.Files[i].Name.EndsWith(".tbl", StringComparison.OrdinalIgnoreCase)) { istbl = true; }*/ string ssize = " "; if (isnormalfile || isdllfile) // jobs file doesn't update the file size yet! { ssize = GetFriendlyByteSize(dc.Files[i].Size); if (dc.Files[i].Size < 0) { ssize = "?"; } } if (dc.Files[i].Size >= 0) { totalbytes += dc.Files[i].Size; } else { about = "~"; } ConsoleColor oldcolor = ConsoleColor.Gray; // ... string colorcode = "", endcolorcode = ""; int morespace = 0; if (iszballfile) { oldcolor = Console.ForegroundColor; Console.ForegroundColor = ConsoleColor.Cyan; if (isdspace) { colorcode = "\u00012"; endcolorcode = "\u00010"; } } else if (isjobsfile) { if (isdspace) { colorcode = "\u00013"; endcolorcode = "\u00010"; } } else if (isdllfile || istbl) { oldcolor = Console.ForegroundColor; Console.ForegroundColor = ConsoleColor.Cyan; if (isdspace) { colorcode = "\u00012[\u00010"; endcolorcode = "\u00012]\u00010"; } Console.ForegroundColor = oldcolor; morespace += 2; } if (RecordLength > 0 || RecordLength == -2) { string srl = RecordLength > 0 ? RecordLength.ToString() : "?"; endcolorcode += "\u00012@" + srl + "\u00010"; morespace += 1 + srl.Length; } if (dc.Files[i].MemCache != null) { endcolorcode += " \u00012mc\u00010"; morespace += 3; } { int iddx = dc.Files[i].Name.IndexOf(".$"); if (-1 != iddx) { iddx = dc.Files[i].Name.IndexOf(".$", iddx + 2); if (-1 != iddx) { if (showhidden) { Console.ForegroundColor = ConsoleColor.Red; colorcode = "\u00014"; endcolorcode = "\u00010"; } else { continue; } } } } if (isdllfile || istbl || RecordLength > 0 || RecordLength == -2) { Console.Write(" {0}{1}{2} ", colorcode, dc.Files[i].Name, endcolorcode); int spacelen = 40 - dc.Files[i].Name.Length - morespace; if (spacelen > 0) { Console.Write(new string(' ', spacelen)); } } else { Console.Write(" {0}{1,-40}{2} ", colorcode, dc.Files[i].Name, endcolorcode); } if (iszballfile || isjobsfile) { Console.ForegroundColor = oldcolor; } Console.Write("{0,10}", ssize); if (isnormalfile) { Console.Write(" ({0} parts)", dc.Files[i].Nodes.Count); } Console.WriteLine(); matchedCount++; } } Console.WriteLine(" {0} Distributed Files", matchedCount); Console.WriteLine(" {0}{1} Used (data files)", about, GetFriendlyByteSize(totalbytes)); { long freespace = 0; long freemin = long.MaxValue; int replicationFactor = dc.Replication; string[] fslaves = dc.Slaves.SlaveList.Split(';'); //for (int fsi = 0; fsi < fslaves.Length; fsi++) MySpace.DataMining.Threading.ThreadTools<string>.Parallel( new Action<string>( delegate(string fslave) { //string fslave = fslaves[fsi]; try { long x = (long)GetDiskFreeBytes(Surrogate.NetworkPathForHost(fslave)); if (replicationFactor > 1) { x = x / replicationFactor; } lock (fslaves) { if (x < freemin) { freemin = x; } freespace += x; } } catch (Exception e) { LogOutputToFile("Error while calculating DFS disk spage usage: " + e.ToString()); /*if (!dc.IsFailoverEnabled) { throw; }*/ } } ), fslaves, fslaves.Length); Console.WriteLine(" {0} Free ({1} node avg; {2} node min)", GetFriendlyByteSize(freespace), GetFriendlyByteSize(freespace / fslaves.Length), GetFriendlyByteSize((freemin == long.MaxValue) ? 0 : freemin) ); } } break; case "copy": case "cp": if (args.Length < 2) { Console.Error.WriteLine("Error: dfs copy command needs arguments: <from-path> <to-path>"); SetFailure(); return; } { bool isdfs0 = args[0].StartsWith("dfs://", StringComparison.OrdinalIgnoreCase); bool isdfs1 = args[1].StartsWith("dfs://", StringComparison.OrdinalIgnoreCase); if (isdfs0 && isdfs1) { Console.Error.WriteLine("Error: dfs copy DFS-to-DFS not supported yet"); SetFailure(); return; } if (!isdfs0 && !isdfs1) { //Console.Error.WriteLine("Error: dfs copy local-to-local not supported"); Console.Error.WriteLine("Error: dfs copy must contain at least one dfs://"); SetFailure(); return; } if (isdfs0) { DfsGet(args); } else //if (isdfs1) { DfsPut(args); } } break; case "get": DfsGet(args); break; case "getbinary": DfsGetBinary(args); break; case "put": DfsPut(args); break; case "fput": DfsFPut(args); break; case "fget": DfsFGet(args); break; case "copyto": DfsCopyTo(args); break; case "putbinary": DfsPutBinary(args); break; case "bulkget": DfsBulkGet(args); break; case "bulkput": DfsBulkPut(args); break; case "shuffle": DfsShuffle(args); break; case "getjobs": if (args.Length < 1) { Console.Error.WriteLine("Argument expected: <localpath.dj>"); SetFailure(); return; } if (new System.IO.DirectoryInfo(args[0]).Exists) { Console.Error.WriteLine("Argument cannot be a directory. Argument expected: <localpath.dj>"); SetFailure(); return; } EnsureNetworkPath(args[0]); //using (LockDfsMutex()) { dfs dc = LoadDfsConfig(); int count = 0; using (System.IO.StreamWriter sw = new System.IO.StreamWriter(args[0])) { for (int i = 0; i < dc.Files.Count; i++) { dfs.DfsFile f = dc.Files[i]; if (0 == string.Compare(f.Type, DfsFileTypes.JOB, StringComparison.OrdinalIgnoreCase)) { try { if (f.Nodes.Count < 1) { throw new Exception("Error: -exec jobs file not in correct jobs DFS format"); } dfs.DfsFile.FileNode fn = dc.Files[i].Nodes[0]; string content = System.IO.File.ReadAllText(Surrogate.NetworkPathForHost(fn.Host.Split(';')[0]) + @"\" + fn.Name); sw.Write(f.Name); sw.Write('\0'); sw.Write(f.Type); sw.Write('\0'); sw.Write(content); sw.Write('\0'); count++; } catch (Exception e) { Console.Error.WriteLine("Unable to get job '{0}': {1}", f.Name, e.Message); } } } } Console.WriteLine("Saved {0} jobs files to jobs archive '{1}'", count, args[0]); } break; case "putjobs": if (args.Length < 1) { Console.Error.WriteLine("Argument expected: <localpath.dj>"); SetFailure(); return; } EnsureNetworkPath(args[0]); { string[] segs = System.IO.File.ReadAllText(args[0]).Split('\0'); // 0: name, 1: type, 2: content, etc. int count = 0; for (int si = 0; si + 2 < segs.Length; si += 3) { try { string fname = segs[si + 0]; string ftype = segs[si + 1]; string fcontent = segs[si + 2]; if (0 != string.Compare(ftype, DfsFileTypes.JOB, StringComparison.OrdinalIgnoreCase)) { throw new Exception("File '" + fname + "' is of type '" + ftype + "', not of expected type '" + DfsFileTypes.JOB + "'"); } else { if (!DfsPutJobsFileContent(fname, fcontent)) { throw new Exception("Unable to write job '" + fname + "' to DFS; ensure that the file does not already exist in DFS"); } } Console.WriteLine(" {0}", fname); count++; } catch (Exception e) { Console.Error.WriteLine("Problem importing job: {0}", e.Message); } } Console.WriteLine("Done importing {0} jobs files into DFS", count); } break; case "combine": // Note: datanode chunk file header keeps the old file offset. { //System.Threading.Thread.Sleep(8000); using (LockDfsMutex()) { dfs dc = LoadDfsConfig(); List<string> inputs = new List<string>(); string outfn = null; bool nextoutfn = false; foreach (string arg in args) { if (nextoutfn) { if (null != outfn) { throw new Exception("Too many output files"); } outfn = arg; } else { if ("+" == arg) { nextoutfn = true; } else if (arg.Length > 0 && '+' == arg[0]) { if (null != outfn) { throw new Exception("Too many output files"); } outfn = arg.Substring(1); } else { inputs.AddRange(SplitInputPaths(dc, arg)); } } } if (0 == inputs.Count) { Console.Error.WriteLine("No input files to combine"); SetFailure(); return; } bool outisin = false; if (null == outfn) { outfn = inputs[inputs.Count - 1]; outisin = true; } if (outfn.StartsWith("dfs://", StringComparison.OrdinalIgnoreCase)) { outfn = outfn.Substring(6); } string reason = ""; if (dfs.IsBadFilename(outfn, out reason)) { Console.Error.WriteLine("Invalid output file: {0}", reason); SetFailure(); return; } if (null != DfsFindAny(dc, outfn)) { if (outisin) { if (!QuietMode && InteractiveMode) { Console.Write("The specified file already exists in DFS; overwrite? "); ConsoleFlush(); for (; ; ) { string s = Console.ReadLine(); char ch = '\0'; if (0 != s.Length) { ch = char.ToUpper(s[0]); } if ('N' == ch) { Console.WriteLine(" Aborted by user"); return; } else if ('Y' == ch) { break; // ! } else { Console.Write("Overwrite, yes or no? "); ConsoleFlush(); } } } } else { Console.Error.WriteLine("Output file for combine already exists: {0}", outfn); SetFailure(); return; } } //if (verbose) { //Console.WriteLine("Combining {0} input files into file '{1}'", inputs.Count, outfn); } { dfs.DfsFile dfout = new dfs.DfsFile(); dfout.Nodes = new List<dfs.DfsFile.FileNode>(inputs.Count * 32); dfout.Name = outfn; dfout.Size = 0; int RecordLength = int.MinValue; for (int i = 0; i < inputs.Count; i++) { dfs.DfsFile df = DfsFindAny(dc, inputs[i]); if (null == df) { Console.Error.WriteLine("Combine error: input file '{0}' does not exist in DFS or was included more than once", inputs[i]); SetFailure(); return; } if (0 != string.Compare(df.Type, DfsFileTypes.NORMAL, StringComparison.OrdinalIgnoreCase) && 0 != string.Compare(df.Type, DfsFileTypes.BINARY_RECT, StringComparison.OrdinalIgnoreCase)) { Console.Error.WriteLine("DFS file '{0}' is not of expected type", df.Name); SetFailure(); return; } { int reclen = df.RecordLength; if (int.MinValue != RecordLength && reclen != RecordLength) { Console.Error.WriteLine("Error: Record lengths of all input files must match; DFS file '{0}' has record length of {1}, expected record length of {2}", df.Name, (-1 == reclen) ? "<none>" : reclen.ToString(), (-1 == RecordLength) ? "<none>" : RecordLength.ToString()); SetFailure(); return; } RecordLength = reclen; #if DEBUG if (int.MinValue == RecordLength) { throw new Exception("DEBUG: (int.MinValue == RecordLength) after first file"); } #endif } int j = dfout.Nodes.Count; dfout.Nodes.AddRange(df.Nodes); for (; j < dfout.Nodes.Count; j++) { dfout.Nodes[j].Position = dfout.Size; // ! dfout.Size += dfout.Nodes[j].Length; // ! } dc.Files.Remove(df); // Ok since a failure will bail this out entirely, since the next DFS read re-loads. } if (RecordLength > 0) { dfout.XFileType = DfsFileTypes.BINARY_RECT + "@" + RecordLength.ToString(); } dc.Files.Add(dfout); UpdateDfsXml(dc); // ! //if (verbose) { Console.WriteLine("Combined {0} input files into file '{1}' of resulting size {2}", inputs.Count, outfn, GetFriendlyByteSize(dfout.Size)); } } } } break; case "info": case "information": { dfs dc = LoadDfsConfig(); if (null == dc) { Console.Error.WriteLine(" No " + dfs.DFSXMLNAME); SetFailure(); } else { string[] slaves = dc.Slaves.SlaveList.Split(',', ';'); bool mt = false; bool shortname = true; List<string> largs = new List<string>(); for (int i = 0; i < args.Length; i++) { string arg = args[i].ToLower(); switch(arg) { case "-mt": mt = true; break; case "-s": shortname = true; break; default: largs.Add(arg); break; } } if (largs.Count == 0) { if (mt) { Dictionary<string, string> netpaths = new Dictionary<string, string>(slaves.Length); Dictionary<string, string> hostnames = new Dictionary<string, string>(slaves.Length); Dictionary<string, long> freesp = new Dictionary<string, long>(slaves.Length); MySpace.DataMining.Threading.ThreadTools<string>.Parallel( new Action<string>(delegate(string host) { host = host.ToUpper(); lock (hostnames) { if (shortname) { hostnames[host] = host; } else { hostnames[host] = IPAddressUtil.GetName(host); } string np = Surrogate.NetworkPathForHost(host); netpaths[host] = np; freesp[host] = (long)GetDiskFreeBytes(np); } }), slaves, slaves.Length); { string dfsfmt = "machines="; for (int i = 0; i < slaves.Length; i++) { if (i != 0) { dfsfmt += ","; } dfsfmt += hostnames[slaves[i].ToUpper()]; } dfsfmt += " processes=" + dc.Blocks.TotalCount.ToString(); if (dc.DataNodeBaseSize != dfs.DataNodeBaseSize_default) { dfsfmt += " datanodebasesize=" + dc.DataNodeBaseSize.ToString(); } Console.WriteLine("[DFS information]"); Console.WriteLine(" Format: {0}", dfsfmt); Console.WriteLine(" Files: {0}", dc.Files.Count); } MySpace.DataMining.Threading.ThreadTools<string>.Parallel( new Action<string>(delegate(string host) { host = host.ToUpper(); try { string netpath = netpaths[host]; System.IO.DirectoryInfo netdi = new System.IO.DirectoryInfo(netpath); long zdcount = 0; long zdsizes = 0; foreach (System.IO.FileInfo fi in (netdi).GetFiles("zd*.zd")) { zdcount++; zdsizes += fi.Length; } long sbcount = 0; long sbsizes = 0; foreach (System.IO.FileInfo fi in (netdi).GetFiles(GetSnowballFilesWildcard("*"))) { sbcount++; sbsizes += fi.Length; } long diskfree = freesp[host]; lock (hostnames) { Console.WriteLine(" {0}:", hostnames[host]); Console.WriteLine(" {0} data file parts", zdcount); Console.WriteLine(" {0} cache parts", sbcount); Console.WriteLine(" {0} total used", GetFriendlyByteSize(zdsizes + sbsizes)); Console.WriteLine(" {0} free", GetFriendlyByteSize(diskfree)); } } catch { string reason; if (Surrogate.IsHealthySlaveMachine(host, out reason)) { reason = "cannot query"; } lock (hostnames) { Console.WriteLine(" Error: {0}", reason); } } }), slaves, slaves.Length); } else { string[] netpaths = new string[slaves.Length]; Dictionary<int, long> freesp = new Dictionary<int, long>(); Console.WriteLine("[DFS information]"); { string dfsfmt = "machines="; for (int i = 0; i < slaves.Length; i++) { if (i != 0) { dfsfmt += ","; } dfsfmt += shortname ? slaves[i].ToUpper() : IPAddressUtil.GetName(slaves[i]); string np = Surrogate.NetworkPathForHost(slaves[i]); netpaths[i] = np; freesp.Add(i, (long)GetDiskFreeBytes(np)); } dfsfmt += " processes=" + dc.Blocks.TotalCount.ToString(); if (dc.DataNodeBaseSize != dfs.DataNodeBaseSize_default) { dfsfmt += " datanodebasesize=" + dc.DataNodeBaseSize.ToString(); } Console.WriteLine(" Format: {0}", dfsfmt); } Console.WriteLine(" Files: {0}", dc.Files.Count); List<KeyValuePair<int, long>> sfreesp = new List<KeyValuePair<int, long>>(freesp); sfreesp.Sort( delegate(KeyValuePair<int, long> firstPair, KeyValuePair<int, long> nextPair) { return -firstPair.Value.CompareTo(nextPair.Value); } ); foreach (KeyValuePair<int, long> item in sfreesp) { int si = item.Key; Console.WriteLine(" {0}:", shortname ? slaves[si].ToUpper() : IPAddressUtil.GetName(slaves[si])); try { string netpath = netpaths[si]; System.IO.DirectoryInfo netdi = new System.IO.DirectoryInfo(netpath); long zdcount = 0; long zdsizes = 0; foreach (System.IO.FileInfo fi in (netdi).GetFiles("zd*.zd")) { zdcount++; zdsizes += fi.Length; } long sbcount = 0; long sbsizes = 0; foreach (System.IO.FileInfo fi in (netdi).GetFiles(GetSnowballFilesWildcard("*"))) { sbcount++; sbsizes += fi.Length; } long diskfree = item.Value; Console.WriteLine(" {0} data file parts", zdcount); Console.WriteLine(" {0} cache parts", sbcount); Console.WriteLine(" {0} total used", GetFriendlyByteSize(zdsizes + sbsizes)); Console.WriteLine(" {0} free", GetFriendlyByteSize(diskfree)); } catch { string reason; if (Surrogate.IsHealthySlaveMachine(slaves[si], out reason)) { reason = "cannot query"; } Console.WriteLine(" Error: {0}", reason); } } } } else { if (-1 != largs[0].IndexOf(':')) { string[] specs = largs[0].Split(':'); // <file>:<host> dfs.DfsFile df = DfsFindAny(dc, specs[0]); if (null == df) { Console.Error.WriteLine(" No such file: {0}", specs[0]); } else if (0 == string.Compare(df.Type, DfsFileTypes.NORMAL, true) || 0 == string.Compare(df.Type, DfsFileTypes.BINARY_RECT, true)) { bool HasSamples = df.RecordLength < 1; Console.WriteLine("[DFS file information]"); Console.WriteLine(" DFS File: {0}", df.Name); string shost = ""; { Console.WriteLine(" Host: {0}", specs[1].ToUpper()); shost = IPAddressUtil.GetName(specs[1]); } /*string partspec = ""; if (specs.Length >= 3) { partspec = specs[2]; }*/ { foreach (dfs.DfsFile.FileNode fn in df.Nodes) { int replindex = 0; if (shost.Length > 0) { { string[] fnxshosts = fn.Host.Split(';'); for (int i = 0; i < fnxshosts.Length; i++) { string fnshost = IPAddressUtil.GetName(fnxshosts[i]); if (0 == string.Compare(shost, fnshost, StringComparison.OrdinalIgnoreCase)) { replindex = i + 1; break; } } if (replindex < 1) { continue; } } } /*if (partspec.Length > 0) { if (0 != string.Compare(partspec, fn.Name, StringComparison.OrdinalIgnoreCase)) { continue; } }*/ try { if (HasSamples) { Console.WriteLine(" {0} [{3}] ({1} data; {2} samples)", fn.Name, GetFriendlyByteSize((new System.IO.FileInfo(dfs.MapNodeToNetworkPath(fn))).Length), GetFriendlyByteSize((new System.IO.FileInfo(dfs.MapNodeToNetworkPath(fn, true))).Length), replindex ); } else { Console.WriteLine(" {0} [{2}] ({1} data)", fn.Name, GetFriendlyByteSize((new System.IO.FileInfo(dfs.MapNodeToNetworkPath(fn))).Length), replindex ); } } catch (Exception e) { LogOutputToFile(e.ToString()); Console.WriteLine(" {0}", fn.Name); } } } } else { Console.Error.WriteLine("DFS file '{0}' is not of expected type", df.Type); SetFailure(); return; } } else { dfs.DfsFile df = DfsFindAny(dc, largs[0]); if (null == df) { Console.Error.WriteLine(" No such file: {0}", largs[0]); } else { Console.WriteLine("[DFS file information]"); if (0 == string.Compare(df.Type, DfsFileTypes.NORMAL, true) || 0 == string.Compare(df.Type, DfsFileTypes.BINARY_RECT, true)) { bool HasSamples = df.RecordLength < 1 && df.RecordLength != -2; Console.WriteLine(" DFS File: {0}", df.Name); int RecordLength = df.RecordLength; if (RecordLength > 0 || RecordLength == -2) { Console.WriteLine(" Record Length: {0}", (RecordLength > 0 ? RecordLength.ToString() : "?")); } Console.WriteLine(" Size: {0} ({1})", GetFriendlyByteSize(df.Size), df.Size); if (HasSamples) { long samplesize = 0; MySpace.DataMining.Threading.ThreadTools<dfs.DfsFile.FileNode>.Parallel( new Action<dfs.DfsFile.FileNode>(delegate(dfs.DfsFile.FileNode fn) { try { System.IO.FileInfo fi = new System.IO.FileInfo(dfs.MapNodeToNetworkPath(fn, true)); int ss = (int)fi.Length; System.Threading.Interlocked.Add(ref samplesize, ss); } catch { } }), df.Nodes, slaves.Length); string avg = "0"; if (df.Nodes.Count > 0) { avg = GetFriendlyByteSize(samplesize / df.Nodes.Count); } Console.WriteLine(" Sample Size: {0} ({1} avg)", GetFriendlyByteSize(samplesize), avg); } Console.WriteLine(" Parts: {0}", df.Nodes.Count); { Dictionary<string, int> partsonhosts = new Dictionary<string, int>(); Dictionary<string, long> zdsizeonhosts = new Dictionary<string, long>(); Dictionary<string, long> zsasizeonhosts = new Dictionary<string, long>(); for (int i = 0; i < df.Nodes.Count; i++) { int value; long zdsize; long zsasize; string[] xkeys = df.Nodes[i].Host.Split(';'); for (int ik = 0; ik < xkeys.Length; ik++) { string key = xkeys[ik].ToUpper(); if (partsonhosts.ContainsKey(key)) { value = partsonhosts[key]; zdsize = zdsizeonhosts[key]; zsasize = zsasizeonhosts[key]; } else { value = 0; zdsize = 0; zsasize = 0; } value++; try { zdsize += (new System.IO.FileInfo(dfs.MapNodeToNetworkPath(df.Nodes[i]))).Length; } catch { } if (HasSamples) { try { zsasize += (new System.IO.FileInfo(dfs.MapNodeToNetworkPath(df.Nodes[i], true))).Length; } catch { } } partsonhosts[key] = value; zdsizeonhosts[key] = zdsize; zsasizeonhosts[key] = zsasize; } } foreach (KeyValuePair<string, int> kvp in partsonhosts) { if (HasSamples) { Console.WriteLine(" {0} chunks on {1} ({2} data; {3} samples)", kvp.Value, kvp.Key, GetFriendlyByteSize(zdsizeonhosts[kvp.Key]), GetFriendlyByteSize(zsasizeonhosts[kvp.Key]) ); } else { Console.WriteLine(" {0} chunks on {1} ({2} data)", kvp.Value, kvp.Key, GetFriendlyByteSize(zdsizeonhosts[kvp.Key]) ); } } } } else if (0 == string.Compare(df.Type, "zsb", true)) { Console.WriteLine(" DFS Delta: {0}", df.Name); long sbsz = 0; int sbparts = 0; { string fnwc = GetSnowballFilesWildcard(df.Name); //string[] slaves = dc.Slaves.SlaveList.Split(',', ';'); try { for (int si = 0; si < slaves.Length; si++) { string netpath = Surrogate.NetworkPathForHost(slaves[si]); foreach (System.IO.FileInfo fi in (new System.IO.DirectoryInfo(netpath)).GetFiles(fnwc)) { sbparts++; sbsz += fi.Length; } } } catch { sbparts = -1; sbsz = -1; } } Console.WriteLine(" Size: {0} ({1})", (sbsz >= 0) ? GetFriendlyByteSize(sbsz) : "?", (sbsz >= 0) ? sbsz.ToString() : "?"); Console.WriteLine(" Parts: {0}", (sbparts >= 0) ? sbparts.ToString() : "?"); Console.WriteLine(" Cached Inputs: {0}", df.Nodes.Count); ConsoleColor oldcolor = ConsoleColor.Gray; string colorcode = ""; string nodeName = ""; foreach (dfs.DfsFile.FileNode fn in df.Nodes) { Console.ForegroundColor = oldcolor; Console.Write(" Input:"); if (fn.Name.StartsWith(invalidatedCacheToken)) { nodeName = fn.Name.Substring(invalidatedCacheToken.Length + 1); Console.ForegroundColor = ConsoleColor.DarkGray; if (isdspace) { colorcode = "\u00015"; } } else { nodeName = fn.Name; Console.ForegroundColor = oldcolor; colorcode = ""; } Console.WriteLine("{0}{1}{2}", colorcode, nodeName, colorcode.Length != 0 ? "\u00010" : ""); } Console.ForegroundColor = oldcolor; } else { Console.Error.WriteLine(" No info for file of type '{0}'", df.Type); } } } } } } break; case "partinfo": { if (args.Length < 1) { Console.Error.WriteLine("qizmt partinfo <partname>"); SetFailure(); } else { string nodename = args[0]; dfs dc = LoadDfsConfig(); string ownerfilename = null; dfs.DfsFile.FileNode fn = DfsFindFileNode(dc, nodename, out ownerfilename); if (fn == null) { Console.WriteLine("Part not found in dfs.xml"); } else { Console.WriteLine(); Console.WriteLine("Owner file name: {0}", ownerfilename); Console.WriteLine(); Console.WriteLine("Part paths in metadata:"); Console.WriteLine(); string[] nhosts = fn.Host.Split(';'); for (int hi = 0; hi < nhosts.Length; hi++) { Console.WriteLine(NetworkPathForHost(nhosts[hi]) + @"\" + fn.Name); Console.WriteLine(); } Console.WriteLine(); Console.WriteLine("Part paths in physical files:"); Console.WriteLine(); ConsoleColor oldcolor = Console.ForegroundColor; string colorcode = "\u00014"; string endcolorcode = "\u00010"; for (int hi = 0; hi < nhosts.Length; hi++) { string ppath = NetworkPathForHost(nhosts[hi]) + @"\" + fn.Name; if (!System.IO.File.Exists(ppath)) { Console.ForegroundColor = ConsoleColor.Red; Console.WriteLine("{0}{1} does not exist{2}", colorcode, ppath, endcolorcode); Console.ForegroundColor = oldcolor; } else { Console.WriteLine(ppath); } Console.WriteLine(); } } } } break; case "delchunk": { if (args.Length < 2) { Console.Error.WriteLine("qizmt delchunk <chunkname> <host>"); SetFailure(); return; } else { string nodename = args[0]; string delhost = args[1]; dfs.DfsFile.FileNode fn = null; bool metaremoved = false; using (LockDfsMutex()) { dfs dc = LoadDfsConfig(); string ownerfilename = null; fn = DfsFindFileNode(dc, nodename, out ownerfilename); if (fn == null) { Console.WriteLine("Part not found in dfs.xml"); return; } else { string[] nhosts = fn.Host.Split(';'); string goodhosts = ""; for (int hi = 0; hi < nhosts.Length; hi++) { if (string.Compare(nhosts[hi], delhost, true) != 0) { if (goodhosts.Length > 0) { goodhosts += ';'; } goodhosts += nhosts[hi]; } else { metaremoved = true; } } if (goodhosts.Length > 0) { fn.Host = goodhosts; } else { //remove this node all together dfs.DfsFile df = DfsFindAny(dc, ownerfilename); if (df == null) { Console.Error.WriteLine("Cannot locate owner file."); return; } else { long filesize = 0; List<dfs.DfsFile.FileNode> goodnodes = new List<dfs.DfsFile.FileNode>(df.Nodes.Count - 1); for (int ni = 0; ni < df.Nodes.Count; ni++) { dfs.DfsFile.FileNode thisnode = df.Nodes[ni]; if (string.Compare(thisnode.Name, nodename, true) != 0) { goodnodes.Add(thisnode); thisnode.Position = filesize; filesize += thisnode.Length; } } df.Size = filesize; df.Nodes = goodnodes; } } UpdateDfsXml(dc); } } bool physicalfileremoved = false; try { string ppath = NetworkPathForHost(delhost) + @"\" + fn.Name; if (System.IO.File.Exists(ppath) || System.IO.File.Exists(ppath + ".zsa")) { System.IO.File.Delete(ppath); System.IO.File.Delete(ppath + ".zsa"); physicalfileremoved = true; } } catch { } Console.WriteLine("Chunk deleted successfully from host:"); if (metaremoved) { Console.WriteLine("Metadata removed"); } if (physicalfileremoved) { Console.WriteLine("Physical file deleted"); } } } break; case "\u0040format": case "format": { EnterAdminCmd(); bool verify = false; if (args.Length == 1 && (0 == string.Compare(args[0], "vacuum", true) || 0 == string.Compare(args[0], "vacuum=true", true))) { Console.Error.WriteLine("Use: {0} killall", appname); } else // Normal format... { int blockcount = -1; int sortedblockcount = -1; string[] slavelist = null; int datanodebasesize = 0; int zmapblockcount = 0; int zblockcount = 0; //int zblockaddbuffersize = 0; //int zblockreadbuffersize = 0; int filebuffersizeoverride = 0; byte compresszmapblocks = 127; byte compressdfschunks = 127; int numargs = 0; int replication = 0; ulong btreeCapSize = 0; int logexechistory = 0; int cooktimeout = -1; int cookretries = -1; bool mt = false; bool metaonly = false; string metabackuplocation = null; int failovertimeout = -1; int failoverdocheck = -1; foreach (string arg in args) { string optname = "", optvalue = ""; { int oi = arg.IndexOf('='); if (-1 == oi) { optname = arg; optvalue = ""; } else { optname = arg.Substring(0, oi); optvalue = arg.Substring(oi + 1); } } numargs++; switch (optname.ToLower()) { case "blocks": case "processes": case "groupedprocesses": blockcount = int.Parse(optvalue); break; case "sortedprocesses": sortedblockcount = int.Parse(optvalue); break; case "slaves": case "machines": if (optvalue[0] == '@') { slavelist = Surrogate.GetHostsFromFile(optvalue.Substring(1)); } else { slavelist = optvalue.Split(';', ','); } break; case "replication": case "replicationfactor": replication = int.Parse(optvalue); break; case "datanodebasesize": datanodebasesize = ParseCapacity(optvalue); break; case "zmapblockcount": zmapblockcount = int.Parse(optvalue); break; case "zblockcount": zblockcount = int.Parse(optvalue); break; case "zblockaddbuffersize": //zblockaddbuffersize = ParseCapacity(optvalue); Console.Error.WriteLine("zblockaddbuffersize no longer supported, use FileBufferSizeOverride"); SetFailure(); return; case "zblockreadbuffersize": //zblockreadbuffersize = ParseCapacity(optvalue); Console.Error.WriteLine("zblockreadbuffersize no longer supported, use FileBufferSizeOverride"); SetFailure(); return; case "filebuffersizeoverride": filebuffersizeoverride = ParseCapacity(optvalue); break; case "compresszmapblocks": switch (optvalue.ToLower()) { case "true": case "1": compresszmapblocks = 1; break; case "false": case "0": compresszmapblocks = 0; break; default: throw new Exception("Unknown value for 'compresszmapblocks'"); } break; case "compressdfschunks": switch (optvalue.ToLower()) { case "true": case "1": compressdfschunks = 1; break; case "false": case "0": compressdfschunks = 0; break; default: throw new Exception("Unknown value for 'compressdfschunks'"); } break; case "btreecapsize": btreeCapSize = (ulong)AELight.ParseLongCapacity((optvalue)); break; case "logexechistory": logexechistory = int.Parse(optvalue); break; case "vacuum": Console.Error.WriteLine("Error: 'vacuum' cannot be used with other options", arg); return; case "?": numargs--; break; case "verify": switch (optvalue.ToLower()) { case "true": case "1": verify = true; break; case "false": case "0": verify = false; break; default: throw new Exception("Unknown value for 'Verify'"); } break; case "cooktimeout": cooktimeout = int.Parse(optvalue); break; case "cookretries": cookretries = int.Parse(optvalue); break; case "multithreaded": switch (optvalue.ToLower()) { case "true": case "1": mt = true; break; case "false": case "0": mt = false; break; default: throw new Exception("Unknown value for 'Multithreaded'"); } break; case "metaonly": switch (optvalue.ToLower()) { case "true": case "1": metaonly = true; break; case "false": case "0": metaonly = false; break; default: throw new Exception("Unknown value for 'MetaOnly'"); } break; case "metabackuplocation": case "metabackup": metabackuplocation = optvalue; break; case "failovertimeout": failovertimeout = int.Parse(optvalue); break; case "failoverdocheck": failoverdocheck = int.Parse(optvalue); break; default: Console.Error.WriteLine("Error: unknown option for dfs format: {0}", arg); return; } } if (0 == numargs) { Console.Error.WriteLine("Format arguments:"); Console.Error.WriteLine(" Machines=<host1>[,<host2>...]"); Console.Error.WriteLine(" [Processes=<num>]"); //Console.Error.WriteLine(" [SortedProcesses=<num>]"); Console.Error.WriteLine(" [Replication=<num>]"); Console.Error.WriteLine(" [DataNodeBaseSize=<size>]"); //Console.Error.WriteLine(" [ZMapBlockCount=<count>]"); Console.Error.WriteLine(" [ZBlockCount=<size>]"); //Console.Error.WriteLine(" [ZBlockAddBufferSize=<size>]"); //Console.Error.WriteLine(" [ZBlockReadBufferSize=<size>]"); Console.Error.WriteLine(" [FileBufferSizeOverride=<size>]"); Console.Error.WriteLine(" [CompressZMapBlocks=<bool>]"); Console.Error.WriteLine(" [CompressDfsChunks=<bool>]"); Console.Error.WriteLine(" [LogExecHistory=<num>]"); Console.Error.WriteLine(" [BTreeCapSize=<size>]"); Console.Error.WriteLine(" [CookTimeout=<ms>]"); Console.Error.WriteLine(" [CookRetries=<num>]"); Console.Error.WriteLine(" [MetaBackupLocation=<dir>]"); Console.Error.WriteLine(" [Verify=<bool>]"); Console.Error.WriteLine(" [Multithreaded=<bool>]"); Console.Error.WriteLine(" [MetaOnly=<bool>]"); return; } if (null == slavelist) { Console.Error.WriteLine("Error: \"Machines=<host1>[,<host2>...]\" expected"); SetFailure(); return; } { Dictionary<string, bool> alls = new Dictionary<string, bool>(slavelist.Length); foreach (string ss in slavelist) { string coolss = IPAddressUtil.GetName(ss); if (alls.ContainsKey(coolss)) { Console.Error.WriteLine("host in there twice {0} lol", ss); SetFailure(); return; } alls.Add(coolss, true); } } if (verify) { string[] sl = new string[1]; bool vOK = true; foreach (string s in slavelist) { sl[0] = s; if (!VerifyHostPermissions(sl)) { Console.Error.WriteLine("Ensure the Windows service is installed and running on '{0}'", s); vOK = false; } } if (vOK) { Console.WriteLine("All machines are verified."); } else { Console.WriteLine(); Console.Error.WriteLine("Unable to format."); SetFailure(); return; } } if (dfs.DfsConfigExists(DFSXMLPATH, 1)) { Console.WriteLine("DFS exists; reformatting..."); Console.WriteLine("Consider running killall after format"); if (!metaonly) { } } else { } try { System.IO.File.Delete(DFSXMLPATH); } catch { } { dfs dc = new dfs(); dc.InitNew(); string sslavelist = ""; { StringBuilder sb = new StringBuilder(); for (int i = 0; i < slavelist.Length; i++) { if (sb.Length != 0) { sb.Append(';'); } sb.Append(slavelist[i].Trim()); } sslavelist = sb.ToString(); } dc.Slaves.SlaveList = sslavelist; dc.Blocks = new dfs.ConfigBlocks(); if (blockcount <= 0) { blockcount = NearestPrimeGE(slavelist.Length * Surrogate.NumberOfProcessors); } dc.Blocks.TotalCount = blockcount; if (sortedblockcount <= 0) { sortedblockcount = slavelist.Length * Surrogate.NumberOfProcessors; } dc.Blocks.SortedTotalCount = sortedblockcount; if (datanodebasesize > 0) { dc.DataNodeBaseSize = datanodebasesize; } if (zmapblockcount > 0) { dc.slave.zblocks.count = zmapblockcount; } if (replication > 0) { if (replication > slavelist.Length) { Console.Error.WriteLine("Cannot format with replication factor higher than the number of machines in the cluster (replication {0} > {1} machines)", replication, slavelist.Length); SetFailure(); return; } dc.Replication = replication; } if (btreeCapSize > 0) { dc.BTreeCapSize = btreeCapSize; } if (logexechistory > 0) { dc.LogExecHistory = logexechistory; } if (zblockcount > 0) { dc.slave.zblocks.count = zblockcount; } /*if (zblockaddbuffersize > 0) { dc.slave.zblocks.addbuffersize = zblockaddbuffersize; }*/ /*if (zblockreadbuffersize > 0) { dc.slave.zblocks.readbuffersize = zblockreadbuffersize; }*/ if (filebuffersizeoverride > 0) { dc.slave.FileBufferSizeOverride = filebuffersizeoverride; } if (127 != compressdfschunks) { dc.slave.CompressDfsChunks = compressdfschunks; } if (127 != compressdfschunks) { dc.slave.CompressZMapBlocks = compresszmapblocks; } if (cooktimeout >= 0) { dc.slave.CookTimeout = cooktimeout; } if (cookretries >= 0) { dc.slave.CookRetries = cookretries; } if (failovertimeout >= 0) { dc.FailoverTimeout = failovertimeout; } if (failoverdocheck >= 0) { dc.FailoverDoCheck = failoverdocheck; } try { if (null != metabackuplocation) { if (string.Empty == metabackuplocation) { dc.MetaBackup = ""; //Console.WriteLine("MetaBackupLocation explicitly disabled"); } else { if (metabackuplocation.StartsWith(@"\\")) { dc.MetaBackup = metabackuplocation; } else // If not a network path, make it one (relative to current-machine/surrogate). { // Using GetHostName here becuase during format, the current machine is the surrogate. dc.MetaBackup = Surrogate.LocalPathToNetworkPath(metabackuplocation, System.Net.Dns.GetHostName()); } try { EnsureMetaBackupLocation(dc); } catch { dc.MetaBackup = ""; throw; } foreach (string fn in System.IO.Directory.GetFiles(dc.GetMetaBackupLocation())) { System.IO.File.Delete(fn); } } } } catch (Exception e) { LogOutputToFile(e.ToString()); Console.Error.WriteLine(e.Message); } //Delete dfsxml from slaves. { string[] slaves = dc.Slaves.SlaveList.Split(';'); string self = GetSelfHost(slaves); foreach (string slave in slaves) { if (self != slave) { string dfsxmlpath = Surrogate.NetworkPathForHost(slave) + "\\" + dfs.DFSXMLNAME; // Not using dfs.DfsConfigExists() here because we're testing all slaves. if (System.IO.File.Exists(dfsxmlpath)) { System.IO.File.Delete(dfsxmlpath); } } } } UpdateDfsXml(dc); try { // Ensure master isn't an old slave. System.IO.File.Delete(AELight_Dir + @"\slave.dat"); } catch { } foreach (string slave in dc.Slaves.SlaveList.Split(';')) { WriteSlaveDat(slave); } Console.WriteLine("DFS setup: {0} processes on {1} machines", dc.Blocks.TotalCount, slavelist.Length); } } } break; case "invalidate": if (!dfs.DfsConfigExists(DFSXMLPATH)) { Console.Error.WriteLine("DFS not setup; use: {0} format", appname); SetFailure(); return; } if (args.Length < 2) { Console.Error.WriteLine("Error: dfs invalidate command needs arguments: <cacheName> <fileNodeName>"); SetFailure(); return; } DfsInvalidateCachedFileNode(args); break; default: Console.Error.WriteLine("Unrecognized DFS command: " + cmd); SetFailure(); return; } } }
internal static void EnsureMetaBackupLocation(dfs dc) { // Make sure the backup location is writable and isn't the same as the primary store! string mbl = dc.GetMetaBackupLocation(); if (!mbl.StartsWith(@"\\")) { throw new Exception("Must supply a network path for new metabackup location"); } if (!System.IO.Path.IsPathRooted(mbl)) { throw new Exception("MetaBackupLocation must be an absolute path"); } try { System.IO.Directory.CreateDirectory(mbl); } catch { } string mblfn = @"mbl-" + Guid.NewGuid().ToString() + @".test"; string mblfp = mbl + @"\" + mblfn; try { System.IO.File.WriteAllText(mblfp, "MetaBackupLocation write test" + Environment.NewLine); } catch (Exception e) { throw new Exception("Unable to write to MetaBackupLocation " + mbl, e); } bool mblfexists = System.IO.File.Exists(AELight_Dir + @"\" + mblfn); try { System.IO.File.Delete(mblfp); } catch { } if (mblfexists) { throw new Exception("MetaBackupLocation is the same as the normal meta-data storage location: " + mbl); } }
protected static void UpdateDfsXml(dfs dc) { UpdateDfsXml(dc, DFSXMLPATH, dc.GetMetaBackupLocation()); }