private bool disposedValue = false; // To detect redundant calls void Dispose(bool disposing) { if (!disposedValue) { if (disposing) { // TODO: dispose managed state (managed objects). try { // Attempt to save the log file if (LogDocument != null && OutputPath != string.Empty) { using (FileStream fs = new FileStream(OutputPath, FileMode.Create)) { LogDocument.Save(fs); // Cleardown items to prevent file lock. fs.Flush(); fs.Dispose(); } } } finally { LogDocument = null; } } // TODO: free unmanaged resources (unmanaged objects) and override a finalizer below. // TODO: set large fields to null. disposedValue = true; } }
public async Task <ActionResult <LogDocument> > PostLogDocument(LogDocument logDocument) { _context.LogDocuments.Add(logDocument); await _context.SaveChangesAsync(); return(CreatedAtAction(nameof(GetLoggerDocument), new { id = logDocument.Id }, logDocument)); }
public void SaveLog(Log log) { using (var ds = Util.Unity.Instance.Resolve <ILogDataService <LogDocument> >()) { if (string.IsNullOrWhiteSpace(log.Id)) { var document = new LogDocument() { Id = Guid.NewGuid().ToString(), Created = log.CreatedOn, Description = log.LogDescription, Name = log.Name, LastModified = log.LastModified }; document.Messages = log.Messages.Select(d => new LogDocument.LogMessage { Data = d.Data, TimeStamp = d.TimeStamp, DetailLevel = d.DetailLevel }).ToArray(); ds.Create(document); } else { var document = ds.Find(log.Id); document.Messages = log.Messages.Select(d => new LogDocument.LogMessage { Data = d.Data, TimeStamp = d.TimeStamp, DetailLevel = d.DetailLevel }).ToArray(); ds.Update(document); } } }
public void CRUD() { var pool = new SingleNodeConnectionPool(new Uri("http://localhost:9300")); var connection = new HttpConnection(); var connectionSettings = new ConnectionSettings(pool, connection, (serializer, settings) => { //return new MyFirstCustomJsonNetSerializer(serializer, settings); return(JsonNetSerializer.Default(serializer, settings)); }) // new ConnectionSettings(pool, connection) .DisableAutomaticProxyDetection() .EnableHttpCompression() .DisableDirectStreaming() .PrettyJson() .RequestTimeout(TimeSpan.FromMinutes(2)); var client = new ElasticClient(connectionSettings); var service = new ElasticFullTextService(client); var indexName = Guid.NewGuid().ToString(); service.CreateIndex(indexName); LogDocument doc = null; for (int i = 0; i < 500; i++) { doc = new LogDocument() { Body = $"My first document into index, position is number{i}" }; service.AddDocument(indexName, doc); } var item = service.GetDocumentRaw(indexName, doc.Id.ToString()); Assert.Equal(item["Id"], doc.Id.ToString()); //search var items = service.SearchDocumentsRaw(indexName, "number1*", 0, 140); Assert.Equal(items.Count, 111); }
/// <summary> /// Write the XML log entry. /// </summary> /// <typeparam name="TState"></typeparam> /// <param name="logLevel"></param> /// <param name="eventId"></param> /// <param name="state"></param> /// <param name="exception"></param> /// <param name="formatter"></param> void ILogger.Log <TState>(LogLevel logLevel, EventId eventId, TState state, Exception exception, Func <TState, Exception, string> formatter) { XmlElement xEleNew = null; XmlNode xLogEntries = null; XmlAttribute xa = null; long rowCount = 0; // only log at the specified level and below if ((int)this.LogLevel <= (int)logLevel && logLevel != LogLevel.None) { // Make sure requests do not bump into each other. while (Busy == true) { System.Threading.Thread.Sleep(250); } Busy = true; try { LogDocument = this.LoadOrCreateLogDocument(OutputPath); // Get row information from file. xLogEntries = (from XmlNode xn in LogDocument.ChildNodes where xn.Name == "logEntries" select xn).First(); rowCount = Convert.ToInt64(xLogEntries.Attributes.GetNamedItem(@"entryCount").Value); if (rowCount > MaxLogRows) { this.LogRotate(); // get the important info again. xLogEntries = null; xLogEntries = (from XmlNode xn in LogDocument.ChildNodes where xn.Name == "logEntries" select xn).First(); rowCount = Convert.ToInt64(xLogEntries.Attributes.GetNamedItem(@"entryCount").Value); } // Write the element items xEleNew = LogDocument.CreateElement(@"logEntry"); xa = LogDocument.CreateAttribute("id"); xa.Value = rowCount.ToString(); xEleNew.Attributes.Append(xa); // Create the object data and serialise it. LogEntryData le = this.Create_EntryData(logLevel, eventId, state, exception, formatter); using (XMLSerializer serializer = new XMLSerializer()) { xEleNew.InnerXml += serializer.Serialize(le.GetType(), le, @"logEntry").InnerXml; } rowCount++; xLogEntries.Attributes.GetNamedItem(@"entryCount").Value = rowCount.ToString(); xLogEntries.AppendChild(xEleNew); try { using (FileStream fs = new FileStream(OutputPath, FileMode.OpenOrCreate, FileAccess.Write)) { LogDocument.Save(fs); // Cleardown items to prevent file lock. fs.Flush(); fs.Dispose(); } } finally { LogDocument = null; } } catch (Exception ex) { // TODO own exceptions are being logged! // throw new LoggerException(@"Something went wrong while logging using " + this.GetType().ToString() + " see inner exception for details.", ex); } finally { Busy = false; xEleNew = null; xLogEntries = null; xa = null; } } }
public async Task Test() { string indexName = "address4"; var cumulativ = new Stopwatch(); cumulativ.Start(); var searchResults2 = _elasticClient.Search <TracklogDocument>(s => s.From(0).Size(10000) .Index("tracklog") .Query(q => q .MatchAll() ) ); var logList = new List <LogDocument>(); foreach (var val in searchResults2.Documents) { var watch = new Stopwatch(); watch.Start(); var geoResult = _elasticClient.Search <SuburbDocument>(s => s.From(0).Size(10000) .Index(indexName) .Query(query => query .Bool(b => b.Filter(filter => filter .GeoDistance(geo => geo .Field(f => f.Location) .Distance(50, Nest.DistanceUnit.Meters).Location(new GeoCoordinate(val.Location.Latitude, val.Location.Longitude))) ) ) ) .Sort(s => s .GeoDistance(g => g .Field(p => p.Location) .DistanceType(GeoDistanceType.Arc) .Order(SortOrder.Ascending) .Unit(DistanceUnit.Kilometers) .Points(new GeoCoordinate(val.Location.Latitude, val.Location.Longitude), new GeoCoordinate(val.Location.Latitude, val.Location.Longitude)) ) ) ); watch.Stop(); var elapsedMs = watch.ElapsedMilliseconds; if (geoResult.Documents.Count() > 0 && val.Address != geoResult.Documents.FirstOrDefault()?.Name) { var log = new LogDocument { ElapsedMilliseconds = elapsedMs, SuburbName = geoResult.Documents.FirstOrDefault()?.Name, TracklogAddres = val.Address, Location = new GeoCoordinate(val.Location.Latitude, val.Location.Longitude), TracklogId = val.TracklogId }; logList.Add(log); } } cumulativ.Stop(); _logger.LogInformation($"Vrijeame trajanje u milisekundama = {cumulativ.ElapsedMilliseconds}"); var index = await _elasticClient.Indices.ExistsAsync("log"); if (index.Exists) { await _elasticClient.Indices.DeleteAsync("log"); } var createResult = await _elasticClient.Indices.CreateAsync("log", c => c .Settings(s => s .Analysis(a => a .AddSearchAnalyzer() ) ) .Map <LogDocument>(m => m.AutoMap()) ); var bullkResult = await _elasticClient .BulkAsync(b => b .Index("log") .CreateMany(logList) ); //var test = new TestDocument //{ // Id = 1, // Name = "Klocna je klocna" //}; //var res = await _elasticClient.IndexAsync(test, i => i.Index(indexName)); //res = await _elasticClient.IndexAsync(new IndexRequest<TestDocument>(test, indexName)); //var searchResponse = _elasticClient.Search<TestDocument>(s => s // .Index(indexName) // .From(0) // .Size(10) // .Query(q => q // .Match(m => m // .Field(f => f.Name) // .Query("Klocna je klocna") // ) // ) //); // var searchResponse = _elasticClient.Search<TestDocument>(s => s // .Index(indexName) // .Query(q => q // .Match(m => m // .Field(f => f.Name) // .Query("Klocna") // ) // ) //); var kloc = ""; }
public static Log AsEntity(this LogDocument log) => new Log(log.Entity.Id, log.Entity.DisplayName, log.Status, log.Time);