public static void InsertFromFile(string fn, ElasticClient es, Dictionary <byte, EasyRocksDb> rdbs) { var date = DateTime.ParseExact(Path.GetFileNameWithoutExtension(fn), "yyyy-MM-dd", CultureInfo.CurrentCulture); var items = new Dictionary <string, CsvEntry>(); // Step 1: Parse CSV-File using (var fs = new FileStream(fn, FileMode.Open, FileAccess.Read, FileShare.Read)) using (var reader = new StreamReader(fs, Encoding.UTF8)) { reader.ReadLine(); // Skip header while (!reader.EndOfStream) { try { var item = CsvEntry.Step1_ReadLine(reader.ReadLine()); if (item.Frequency > 0) { items.Add(item.Key, item); } } catch { // ignore } } } Insert(es, rdbs, items, date); }
public static void InsertFromWebRequest(string data, DateTime date, ElasticClient es, Dictionary <byte, EasyRocksDb> rdbs) { var items = new Dictionary <string, CsvEntry>(); // Step 1: Parse CSV-File using (var fs = new MemoryStream(Convert.FromBase64String(data))) using (var gzip = new GZipStream(fs, CompressionMode.Decompress)) using (var reader = new StreamReader(gzip, Encoding.UTF8)) { reader.ReadLine(); // Skip header while (!reader.EndOfStream) { try { var item = CsvEntry.Step1_ReadLine(reader.ReadLine()); if (item.Frequency > 0) { items.Add(item.Key, item); } } catch { // ignore } } } Insert(es, rdbs, items, date); }
private void AddMissingFields(CsvEntry entry, List <string> missingFieldNames) { foreach (var missingFieldName in missingFieldNames) { entry.AddValue(missingFieldName, null, false); } }
protected override void ReadEntry(CsvEntry entry) { var info = new PropDropItemInfo(); info.Type = entry.ReadInt(); info.ItemClass = entry.ReadInt(); info.Amount = entry.ReadUShort(); info.Chance = entry.ReadFloat(); var ii = AuraData.ItemDb.Find(info.ItemClass); if (ii == null) { throw new Exception(string.Format("Unknown item id '{0}'.", info.ItemClass)); } if (info.Amount > ii.StackMax) { info.Amount = ii.StackMax; } // The file contains PropDropItemInfo, here we organize it into PropDropInfo structs. if (!this.Entries.ContainsKey(info.Type)) { this.Entries.Add(info.Type, new PropDropData(info.Type)); } this.Entries[info.Type].Items.Add(info); }
protected override void ReadEntry(CsvEntry entry) { // Read everything first, we might need it for multiple regions. var regions = entry.ReadStringList(); var type = (WeatherInfoType)entry.ReadByte(); var values = new List <float>(); while (!entry.End) { values.Add(entry.ReadFloat()); } // Every type has at least 1 value. if (values.Count < 1) { throw new CsvDatabaseWarningException("Too few values."); } foreach (var region in regions) { var info = new WeatherData(); info.Region = Convert.ToInt32(region); info.Type = type; info.Values = values; this.Entries[info.Region] = info; } }
public CsvEntry Clone() { string[] array = (string[])strings.ToArray().Clone(); CsvEntry e = new CsvEntry(array); return(e); }
protected override void ReadEntry(CsvEntry entry) { var info = new MapData(); info.Id = entry.ReadInt(); info.Name = entry.ReadString(); this.Entries[info.Name] = info; }
protected override void ReadEntry(CsvEntry entry) { var info = new SpeedData(); info.Ident = entry.ReadString(); info.Speed = entry.ReadFloat(); this.Entries[info.Ident] = info; }
public XDocument Convert() { if (FilePath == null) { throw new ArgumentNullException(); } // read csv entries var lines = File.ReadAllLines(FilePath, Encoding.Default); var headers = lines.First().Split(Settings.Separator.ToCharArray()).Select(it => it.Trim('"')).ToArray(); var skip = 1; if (!Settings.Header) { headers = Enumerable.Range(0, headers.Count()).Select(it => it.ToString()).ToArray(); skip = 0; } var csvEntries = new List <CsvEntry>(); foreach (var line in lines.Skip(skip)) { var values = line.Split(Settings.Separator.ToCharArray()); var csvEntry = new CsvEntry(); for (var i = 0; i < headers.Count(); ++i) { var key = Settings.HeaderToKey(headers[i]) ?? headers[i]; var value = values[i].Trim('"').Replace(@"\n", Environment.NewLine); csvEntry.Add(key, value); } csvEntries.Add(csvEntry); } // write to xml var doc = new XDocument(); var root = new XElement(Settings.RootNodeName); foreach (var csvEntry in csvEntries) { var newEntry = new XElement(Settings.EntryNodeName); newEntry.Add(csvEntry.Select(it => new XElement("String", new XElement("Key", it.Key), new XElement("Value", it.Value)))); root.Add(newEntry); } doc.Add(root); System.Diagnostics.Debug.WriteLine(doc); return(doc); }
public CsvEntry ParseLine(string l) { var retVal = new CsvEntry(); if (l != null) { var split = l.Split(new[] { "," }, StringSplitOptions.RemoveEmptyEntries); retVal.Machine = split[2]; retVal.Ms = int.Parse(split[4]); retVal.Step = int.Parse(split[3]); retVal.Time = DateTime.Parse(split[0]); } return retVal; }
protected override void ReadEntry(CsvEntry entry) { // Replace previous values if there is more than 1 line. this.Entries = new List <ExpData>(entry.Count); while (!entry.End) { var info = new ExpData(); info.Level = (entry.Pointer + 1); info.Exp = entry.ReadInt(); this.Entries.Add(info); } }
public void TestAddDimension() { var key = "key"; var value = "10.0"; var entry = new CsvEntry(); entry.AddValue(key, value, true); Assert.AreEqual(0, entry.MeasureValues.Count); Assert.AreEqual(1, entry.DimensionValues.Count); Assert.IsTrue(entry.DimensionValues.ContainsKey(key)); Assert.AreEqual(value, entry.DimensionValues[key]); }
void init(Buf?data, Encoding?encoding) { if (encoding == null) { encoding = defaultEncoding; } int bomSize = 0; Encoding?enc2 = null; if (data != null) { enc2 = Str.CheckBOM(data.ByteData, out bomSize); } if (bomSize >= 1) { data = new Buf(Util.RemoveStartByteArray(data !.ByteData, bomSize)); } if (enc2 != null) { encoding = enc2; } this.encoding = encoding; entryList = new List <CsvEntry>(); if (data != null) { MemoryStream ms = new MemoryStream(data.ByteData); StreamReader sr = new StreamReader(ms, this.encoding); while (true) { string?s = sr.ReadLine(); if (s == null) { break; } char[] sep = { ',' }; string[] strings = s.Trim().Split(sep, StringSplitOptions.None); CsvEntry e = new CsvEntry(strings); Add(e); } } }
public MailUser(CsvEntry e) { this.MailAddress = e[0]; this.Company = e[1]; this.FullName = e[2]; this.Language = CoreLanguageList.GetLanguageClassByName(e[3]); if (e.Count >= 5) { string pStr = e[4]; this.ParamList = MailUtil.StrToParamList(pStr); } normalize(); }
public void TestToString() { var key1 = "key1"; var value1 = "10.0"; var key2 = "key2"; var value2 = "value2"; var entry = new CsvEntry(); entry.AddValue(key1, value1, true); entry.AddValue(key2, value2, false); var expectedString = "10.0,value2"; Assert.AreEqual(expectedString, entry.ToString()); }
int sortInternal(CsvEntry e1, CsvEntry e2) { if (csvCompareMethod != null) { object o1 = e1.Convert(csvCompareType, csvCompareIndex); object o2 = e2.Convert(csvCompareType, csvCompareIndex); return(csvCompareMethod(o1, o2) * (csvCompareReverse ? -1 : 1)); } else { IComparable o1 = (IComparable)e1.Convert(csvCompareType, csvCompareIndex); IComparable o2 = (IComparable)e2.Convert(csvCompareType, csvCompareIndex); return(o1.CompareTo(o2) * (csvCompareReverse ? -1 : 1)); } }
public static List <CsvEntry> PopulateCsvEntries(this CsvFileContent csvFileContent, string[] parsedEntries) { csvFileContent.CsvEntries = new List <CsvEntry>(); csvFileContent.ParentsWithChildrenAdjecencyList = new Dictionary <string, HashSet <WeightedChildNode> >(); for (int index = CsvEntryIndex.TotalNumberOfColumns; index <= parsedEntries.Length - CsvEntryIndex.TotalNumberOfColumns; index += CsvEntryIndex.TotalNumberOfColumns) { var csvEntry = new CsvEntry { Child = parsedEntries[index + CsvEntryIndex.ChildIndex], Parent = parsedEntries[index + CsvEntryIndex.ParentIndex], Quantity = Convert.ToInt32(parsedEntries[index + CsvEntryIndex.QuantityIndex]) }; csvFileContent.CsvEntries.Add(csvEntry); } return(csvFileContent.CsvEntries); }
protected override void ReadEntry(CsvEntry entry) { var info = new DropData(); info.ItemId = entry.ReadInt(0); info.Chance = entry.ReadFloat(1); if (info.Chance > 100) { info.Chance = 100; } else if (info.Chance < 0) { info.Chance = 0; } this.Entries.Add(info); }
public CsvData ReadFile(string fileName) { var csvData = new CsvData(); var csvEntries = new List <CsvEntry>(); var csvReaderConfiguration = new Configuration() { Delimiter = CSV_DELIMITER }; using (var streamReader = File.OpenText(fileName)) using (var csvReader = new CsvHelper.CsvReader(streamReader, csvReaderConfiguration)) { csvReader.Read(); csvReader.ReadHeader(); var header = csvReader.Context.HeaderRecord; while (csvReader.Read()) { var csvEntry = new CsvEntry(); foreach (var name in header) { bool isDimension = false; if (dimensionColumns.Contains(name)) { isDimension = true; } var value = csvReader.GetField(name); csvEntry.AddValue(name, value, isDimension); } csvEntries.Add(csvEntry); } csvData.FieldNames = header; } csvData.Entries = csvEntries; return(csvData); }
protected override void ReadEntry(CsvEntry entry) { var info = new StatsLevelUpData(); info.Age = entry.ReadByte(); info.Race = entry.ReadUShort(); info.AP = entry.ReadShort(); info.Life = entry.ReadFloat(); info.Mana = entry.ReadFloat(); info.Stamina = entry.ReadFloat(); info.Str = entry.ReadFloat(); info.Int = entry.ReadFloat(); info.Dex = entry.ReadFloat(); info.Will = entry.ReadFloat(); info.Luck = entry.ReadFloat(); if (!this.Entries.ContainsKey(info.Race)) { this.Entries[info.Race] = new Dictionary <int, StatsLevelUpData>(); } this.Entries[info.Race][info.Age] = info; }
private static object GetCsvEntryByRequiredKey(string requiredKey, RuntimeCsvRepresentation rcr) { rcr.RemoveHeaderWhitespaceAndDetermineIfRequired(); int requiredIndex = rcr.GetRequiredIndex(); int startingRow; int count; GetStartingAndCount(rcr, requiredIndex, requiredKey, out startingRow, out count); CsvEntry csvEntry = null; if (startingRow != -1) { csvEntry = new CsvEntry(); csvEntry.RuntimeCsvRepresentation = rcr; csvEntry.Count = count; csvEntry.StartIndex = startingRow; } return(csvEntry); }
protected override void ReadEntry(CsvEntry entry) { var info = new ShamalaData(); info.Id = entry.ReadInt(); info.Name = entry.ReadString(); info.Category = entry.ReadString(); info.Rank = entry.ReadByte(); info.Rate = entry.ReadFloat(); info.Required = entry.ReadByte(); info.Size = entry.ReadFloat(); info.Color1 = entry.ReadUIntHex(); info.Color2 = entry.ReadUIntHex(); info.Color3 = entry.ReadUIntHex(); var races = entry.ReadStringList(); foreach (var race in races) { info.Races.Add(Convert.ToInt32(race)); } this.Entries[info.Id] = info; }
public CsvEntry ToCsvEntry() { CsvEntry e = new CsvEntry(this.MailAddress, this.Company, this.FullName, this.Language.Name, MailUtil.ParamListToStr(this.ParamList)); return(e); }
public void Add(CsvEntry e) { entryList.Add(e); }
public void AddCsvEntry(params string[] columns) { CsvEntry logEntry = new CsvEntry(columns); _csvLogs.Add(logEntry); }
string MergeStringTables(string oldLocalizedCsv, string newBaseCsv, string outputFileName) { // Use the CsvHelper to convert the two csvs to lists so they are easy to work with List <CsvEntry> oldEntries = new List <CsvEntry>(); List <CsvEntry> newEntries = new List <CsvEntry>(); using (var oldReader = new StringReader(oldLocalizedCsv)) using (var newReader = new StringReader(newBaseCsv)) { CsvHelper.CsvReader oldParser = new CsvHelper.CsvReader(oldReader, new CsvHelper.Configuration.Configuration(CultureInfo.InvariantCulture)); CsvHelper.CsvReader newParser = new CsvHelper.CsvReader(newReader, new CsvHelper.Configuration.Configuration(CultureInfo.InvariantCulture)); oldParser.Read(); newParser.Read(); oldParser.ReadHeader(); newParser.ReadHeader(); while (oldParser.Read()) { oldEntries.Add( new CsvEntry { id = oldParser.GetField("id"), text = oldParser.GetField("text"), file = oldParser.GetField("file"), node = oldParser.GetField("node"), lineNumber = oldParser.GetField <int>("lineNumber"), } ); } while (newParser.Read()) { newEntries.Add( new CsvEntry { id = newParser.GetField("id"), text = newParser.GetField("text"), file = newParser.GetField("file"), node = newParser.GetField("node"), lineNumber = newParser.GetField <int>("lineNumber"), } ); } } // This is where we merge the two string tables. Here's what's happening: // Use CsvParser to parse new base string table and old localized string table // The strategy is to use the fact that the two string tables look alike to optimize. // The algorithm goes through the string tables side by side. Imagine two fingers running // through the entries. At each line there are four different scenarios that we test for: // scenario 1: The lines match (matching tags) // scenario 2: The line in the new string table exists in the old string table but has been moved from somewhere else // scenario 3: The line in the new string table is completely new (no line tags in old string table match it) // scenario 4: The line in the old string table has been deleted (no line tags in new string table match it) //Go line by line: //1.If line tags are the same: add old localized with new line number and node. Increase index of both. (s1: matching lines) //2.Else if line tags are different: // a. Search forward in the old string table for that line tag // i. if we find it: add old localized with new line number and node. Remove from old. Increase new index. (s2: old line moved) // ii. If we don't find it: Search forward in new string table for that line tag // I. if we find it: add the one new line we are on. Increase new index. (s3: line is new) // II. if we don't find it: ignore line. Increase old index. (s4: line has been deleted) int oldIndex = 0; int newIndex = 0; List <CsvEntry> mergedEntries = new List <CsvEntry>(); // Mark new lines as new so they are easy to spot string newlineMarker = " (((NEW LINE)))"; while (true) { // If no more entries in old: add the rest of the new entries and break if (oldEntries.Count <= oldIndex) { for (int i = newIndex; i < newEntries.Count; i++) { CsvEntry entry = newEntries[i]; entry.text += newlineMarker; mergedEntries.Add(entry); } break; } // If no more entries in new: all additional old entries must have been deleted so break if (newEntries.Count <= newIndex) { break; } //1. If line tags are the same: add old localized with new line number. Increase index of both. if (oldEntries[oldIndex].id == newEntries[newIndex].id) { CsvEntry entry = oldEntries[oldIndex]; entry.lineNumber = newEntries[newIndex].lineNumber; entry.node = newEntries[newIndex].node; mergedEntries.Add(entry); oldIndex++; newIndex++; continue; } //2. Else if line tags are different: else { // a. Search forward in the old string table for that line tag bool didFindInOld = false; for (int i = oldIndex + 1; i < oldEntries.Count; i++) { // i. if we find it: add old localized with new line number. Remove from old. Increase index of new. (old line moved) if (oldEntries[i].id == newEntries[newIndex].id) { CsvEntry entry = oldEntries[i]; entry.lineNumber = newEntries[newIndex].lineNumber; entry.node = newEntries[newIndex].node; mergedEntries.Add(entry); oldEntries.RemoveAt(i); didFindInOld = true; newIndex++; break; } } if (didFindInOld) { continue; } // ii.If we don't find it: Search forward in new string table for that line tag bool didFindInNew = false; for (int i = newIndex + 1; i < newEntries.Count; i++) { // I. if we find it: add the one new line we are on. Increase index of new. (line is new) if (oldEntries[oldIndex].id == newEntries[i].id) { CsvEntry entry = newEntries[newIndex]; entry.text += newlineMarker; mergedEntries.Add(entry); newIndex++; didFindInNew = true; break; } } // II. if we don't find it: ignore line. Increase index of old. (line has been deleted) if (!didFindInNew) { oldIndex++; } } } // Entries are not necessarily added in the correct order and have to be sorted mergedEntries.Sort((a, b) => a.lineNumber.CompareTo(b.lineNumber)); // Create new Csv file using (var memoryStream = new MemoryStream()) using (var textWriter = new StreamWriter(memoryStream)) { // Generate the localised .csv file var csv = new CsvHelper.CsvWriter(textWriter, new CsvHelper.Configuration.Configuration(CultureInfo.InvariantCulture)); var lines = mergedEntries.Select(x => new { id = x.id, text = x.text, file = outputFileName, node = x.node, lineNumber = x.lineNumber }); csv.WriteRecords(lines); textWriter.Flush(); memoryStream.Position = 0; using (var reader = new StreamReader(memoryStream)) { return(reader.ReadToEnd()); } } }
public Entry MapToDomain(CsvEntry csvEntry) { return(this.Map <CsvEntry, Entry>(csvEntry)); }