public void when_serializing_logentries_can_serialize_valid_bulk_request_format() { // Note: converting an array does not create valid message for use in elasticsearch bulk operation var actual = new ElasticsearchEventEntrySerializer("logstash", "slab", "instance", true).Serialize(new[] { CreateEventEntry(), CreateEventEntry() }); Assert.IsNotNull(actual); Assert.IsTrue(this.IsValidBulkMessage(actual)); }
public void when_serializing_a_log_entry_then_object_can_serialize() { var payload = new Dictionary <string, object> { { "msg", "the message" }, { "date", DateTime.UtcNow } }; var logObject = EventEntryTestHelper.Create( timestamp: DateTimeOffset.UtcNow, payloadNames: payload.Keys, payload: payload.Values); var actual = new ElasticsearchEventEntrySerializer("logstash", "slab", "instance", true).Serialize(new[] { logObject }); Assert.IsNotNull(actual); Assert.IsTrue(this.IsValidBulkMessage(actual)); }
public void when_serializing_a_log_entry_without_flattened_payload_then_payload_nested() { var payload = new Dictionary <string, object> { { "msg", "the message" }, { "date", DateTime.UtcNow } }; var logObject = EventEntryTestHelper.Create( timestamp: DateTimeOffset.UtcNow, payloadNames: payload.Keys, payload: payload.Values); var actual = new ElasticsearchEventEntrySerializer("logstash", "slab", "instance", false).Serialize(new[] { logObject }); var serializedEntry = actual.Split('\n')[1]; var jsonObject = JObject.Parse(serializedEntry); Assert.IsTrue(jsonObject["Payload"]["msg"] != null); Assert.IsTrue(jsonObject["Payload"]["date"] != null); Assert.IsNotNull(actual); Assert.IsTrue(this.IsValidBulkMessage(actual)); }
public void when_serializing_a_log_with_jsonprops_entry_then_json_merged() { var payload = new Dictionary <string, object> { { "msg", "the message" }, { "date", DateTime.UtcNow }, { "_jsonPayload", "{\"test\": \"value\", \"test2\": \"value2\"}" } }; var logObject = EventEntryTestHelper.Create( timestamp: DateTimeOffset.UtcNow, payloadNames: payload.Keys, payload: payload.Values); var actual = new ElasticsearchEventEntrySerializer("logstash", "slab", "instance", true).Serialize(new[] { logObject }); // Make sure the payload is still valid var obj = JObject.Parse(actual.Split('\n')[1]); Assert.IsNotNull(actual); Assert.IsTrue(this.IsValidBulkMessage(actual)); }
public void when_serializing_with_global_context_then_object_can_serialize() { var ctx = new Dictionary <string, string>() { { "TestCtx1", "TestCtxValue1" }, { "TestCtx2", "TestCtxValue2" } }; var payload = new Dictionary <string, object> { { "msg", "the message" }, { "date", DateTime.UtcNow } }; var logObject = EventEntryTestHelper.Create( timestamp: DateTimeOffset.UtcNow, payloadNames: payload.Keys, payload: payload.Values); var actual = new ElasticsearchEventEntrySerializer("logstash", "slab", "instance", true, ctx).Serialize(new[] { logObject }); Debug.WriteLine(actual); Assert.IsNotNull(actual); Assert.IsTrue(this.IsValidBulkMessage(actual)); }
public void when_serializing_a_log_entry_with_activtyid_then_activityid_serialized() { var payload = new Dictionary <string, object> { { "msg", "the message" }, { "date", DateTime.UtcNow } }; var logObject = EventEntryTestHelper.Create( timestamp: DateTimeOffset.UtcNow, payloadNames: payload.Keys, payload: payload.Values, activityId: Guid.NewGuid(), relatedActivityId: Guid.NewGuid()); var actual = new ElasticsearchEventEntrySerializer("logstash", "slab", "instance", true).Serialize(new[] { logObject }); var serializedEntry = actual.Split('\n')[1]; var jsonObject = JObject.Parse(serializedEntry); Assert.AreEqual(logObject.ActivityId.ToString(), jsonObject["ActivityId"]); Assert.AreEqual(logObject.RelatedActivityId.ToString(), jsonObject["RelatedActivityId"]); Assert.IsNotNull(actual); Assert.IsTrue(this.IsValidBulkMessage(actual)); }
/// <summary> /// Initializes a new instance of the <see cref="ElasticsearchSink"/> class with the specified connection string and table address. /// </summary> /// <param name="instanceName">The name of the instance originating the entries.</param> /// <param name="connectionString">The connection string for the storage account.</param> /// <param name="index">Index name prefix formatted as index-{0:yyyy.MM.DD}.</param> /// <param name="type">Elasticsearch entry type.</param> /// <param name="flattenPayload">Flatten the payload collection when serializing event entries</param> /// <param name="bufferInterval">The buffering interval to wait for events to accumulate before sending them to Elasticsearch.</param> /// <param name="bufferingCount">The buffering event entry count to wait before sending events to Elasticsearch </param> /// <param name="maxBufferSize">The maximum number of entries that can be buffered while it's sending to Windows Azure Storage before the sink starts dropping entries.</param> /// <param name="onCompletedTimeout">Defines a timeout interval for when flushing the entries after an <see cref="OnCompleted"/> call is received and before disposing the sink. /// This means that if the timeout period elapses, some event entries will be dropped and not sent to the store. Normally, calling <see cref="IDisposable.Dispose"/> on /// the <see cref="Microsoft.Diagnostics.Tracing.EventListener"/> will block until all the entries are flushed or the interval elapses. /// If <see langword="null"/> is specified, then the call will block indefinitely until the flush operation finishes.</param> /// <param name="jsonGlobalContextExtension">A json encoded key/value set of global environment parameters to be included in each log entry</param> public ElasticsearchSink(string instanceName, string connectionString, string index, string type, bool?flattenPayload, TimeSpan bufferInterval, int bufferingCount, int maxBufferSize, TimeSpan onCompletedTimeout, string jsonGlobalContextExtension = null) { Guard.ArgumentNotNullOrEmpty(instanceName, "instanceName"); Guard.ArgumentNotNullOrEmpty(connectionString, "connectionString"); Guard.ArgumentNotNullOrEmpty(index, "index"); Guard.ArgumentNotNullOrEmpty(type, "type"); Guard.ArgumentIsValidTimeout(onCompletedTimeout, "onCompletedTimeout"); Guard.ArgumentGreaterOrEqualThan(0, bufferingCount, "bufferingCount"); string converted = ElasticsearchEventEntrySerializer.GetIndexName(index, DateTime.UtcNow); if (Regex.IsMatch(converted, "[\\\\/*?\",<>|\\sA-Z]")) { throw new ArgumentException(Resource.InvalidElasticsearchIndexNameError, "index"); } this.onCompletedTimeout = onCompletedTimeout; this.instanceName = instanceName; this.flattenPayload = flattenPayload ?? true; this.elasticsearchUrl = new Uri(new Uri(connectionString), BulkServiceOperationPath); this.index = index; this.type = type; var sinkId = string.Format(CultureInfo.InvariantCulture, "ElasticsearchSink ({0})", instanceName); bufferedPublisher = new BufferedEventPublisher <EventEntry>( sinkId, this.PublishEventsAsync, bufferInterval, bufferingCount, maxBufferSize, this.cancellationTokenSource.Token); this._jsonGlobalContextExtension = !string.IsNullOrEmpty(jsonGlobalContextExtension)? JsonConvert.DeserializeObject <Dictionary <string, string> >(jsonGlobalContextExtension): null; }
public void when_serializing_a_log_entry_then_object_can_serialize_process_and_thread_id() { var payload = new Dictionary <string, object> { { "msg", "the message" }, { "date", DateTime.UtcNow } }; var logObject = EventEntryTestHelper.Create( timestamp: DateTimeOffset.UtcNow, payloadNames: payload.Keys, payload: payload.Values, processId: 300, threadId: 500); var actual = new ElasticsearchEventEntrySerializer("logstash", "slab", "instance", true).Serialize(new[] { logObject }); Assert.IsNotNull(actual); Assert.IsTrue(this.IsValidBulkMessage(actual)); var serializedEntry = actual.Split('\n')[1]; var jsonObject = JObject.Parse(serializedEntry); Assert.AreEqual(300, jsonObject["ProcessId"]); Assert.AreEqual(500, jsonObject["ThreadId"]); }
internal async Task <int> PublishEventsAsync(IList <EventEntry> collection) { try { string logMessages; using (var serializer = new ElasticsearchEventEntrySerializer(this.index, this.type, this.instanceName, this.flattenPayload, this._jsonGlobalContextExtension)) { logMessages = serializer.Serialize(collection); } var content = new StringContent(logMessages); content.Headers.ContentType = new MediaTypeHeaderValue("application/json"); var response = await client.PostAsync(this.elasticsearchUrl, content, cancellationTokenSource.Token).ConfigureAwait(false); // If there is an exception if (response.StatusCode != HttpStatusCode.OK) { // Check the response for 400 bad request if (response.StatusCode == HttpStatusCode.BadRequest) { var messagesDiscarded = collection.Count(); var errorContent = await response.Content.ReadAsStringAsync().ConfigureAwait(false); string serverErrorMessage; // Try to parse the exception message try { var errorObject = JObject.Parse(errorContent); serverErrorMessage = errorObject["error"].Value <string>(); } catch (Exception) { // If for some reason we cannot extract the server error message log the entire response serverErrorMessage = errorContent; } // We are unable to write the batch of event entries - Possible poison message // I don't like discarding events but we cannot let a single malformed event prevent others from being written // We might want to consider falling back to writing entries individually here SemanticLoggingEventSource.Log.CustomSinkUnhandledFault(string.Format("Elasticsearch sink unhandled exception {0} messages discarded with server error message {1}", messagesDiscarded, serverErrorMessage)); return(messagesDiscarded); } // This will leave the messages in the buffer return(0); } var responseString = await response.Content.ReadAsStringAsync().ConfigureAwait(false); var responseObject = JObject.Parse(responseString); var items = responseObject["items"] as JArray; // If the response return items collection if (items != null) { // NOTE: This only works with Elasticsearch 1.0 // Alternatively we could query ES as part of initialization check results or fall back to trying <1.0 parsing // We should also consider logging errors for individual entries //return items.Count(t => t["create"]["status"].Value<int>().Equals(201)); int count = items.Count(); return(count); // Pre-1.0 Elasticsearch // return items.Count(t => t["create"]["ok"].Value<bool>().Equals(true)); } return(0); } catch (OperationCanceledException) { return(0); } catch (Exception ex) { // Although this is generally considered an anti-pattern this is not logged upstream and we have context SemanticLoggingEventSource.Log.CustomSinkUnhandledFault(ex.ToString()); throw; } }
internal async Task <int> PublishEventsAsync(IList <EventEntry> collection) { try { string logMessages; using (var serializer = new ElasticsearchEventEntrySerializer(this.index, this.type, this.instanceName, this.flattenPayload, this._jsonGlobalContextExtension)) { logMessages = serializer.Serialize(collection); } var content = new StringContent(logMessages); content.Headers.ContentType = new MediaTypeHeaderValue("application/json"); // buiding the basic authorization if (this.userName != null && this.password != null) { string temp = this.userName + ":" + this.password; var byteArray = Encoding.ASCII.GetBytes(temp); var header = new AuthenticationHeaderValue("Basic", Convert.ToBase64String(byteArray)); this.client.DefaultRequestHeaders.Authorization = header; } var response = await this.client.PostAsync(this.elasticsearchUrl, content, this.cancellationTokenSource.Token).ConfigureAwait(false); // If there is an exception if (response.StatusCode != HttpStatusCode.OK) { // Check the response for 400 bad request if (response.StatusCode == HttpStatusCode.BadRequest) { var messagesDiscarded = collection.Count(); var errorContent = await response.Content.ReadAsStringAsync().ConfigureAwait(false); string serverErrorMessage; // Try to parse the exception message try { var errorObject = JObject.Parse(errorContent); serverErrorMessage = errorObject["error"].Value <string>(); } catch (Exception) { // If for some reason we cannot extract the server error message log the entire response serverErrorMessage = errorContent; } // We are unable to write the batch of event entries - Possible poison message // I don't like discarding events but we cannot let a single malformed event prevent others from being written // We might want to consider falling back to writing entries individually here SemanticLoggingEventSource.Log.CustomSinkUnhandledFault(string.Format("Elasticsearch sink unhandled exception {0} messages discarded with server error message {1}", messagesDiscarded, serverErrorMessage)); return(messagesDiscarded); } // This will leave the messages in the buffer return(0); } var responseString = await response.Content.ReadAsStringAsync().ConfigureAwait(false); var responseObject = JObject.Parse(responseString); var items = responseObject["items"] as JArray; // If the response return items collection if (items != null) { // NOTE: This only works with Elasticsearch 1.0 // Alternatively we could query ES as part of initialization check results or fall back to trying <1.0 parsing // We should also consider logging errors for individual entries if (this.elasticsearchUrl.Port == 80 || this.elasticsearchUrl.Port == 443) { return(items.Count(t => t["index"]["status"].Value <int>().Equals(201))); } else if (this.elasticsearchUrl.Port == 9200 || this.elasticsearchUrl.Port == 9243) { //return items.Count(t => t["create"]["status"].Value<int>().Equals(201)); // Temporary fix: ES 2.3.1 returns create for bulk index, ES 5.0.2 and ES 5.1.1 return index for buld index. // TODO Introduce version specific constant here. return(items.Count(t => t["index"]["status"].Value <int>().Equals(201))); } // Pre-1.0 Elasticsearch // return items.Count(t => t["create"]["ok"].Value<bool>().Equals(true)); } return(0); } catch (OperationCanceledException) { return(0); } catch (Exception ex) { // Although this is generally considered an anti-pattern this is not logged upstream and we have context Debug.WriteLine("{0} \n {1} \n {2} \n ", ex.Message, ex.InnerException, ex.StackTrace); SemanticLoggingEventSource.Log.CustomSinkUnhandledFault(ex.ToString()); throw; } }