/**[[bulkall-observable]] * ==== Multiple documents with `BulkAllObservable` helper * * Using the `BulkAllObservable` helper allows you to focus on the overall objective of indexing, without having to * concern yourself with retry, backoff or chunking mechanics. * Multiple documents can be indexed using the `BulkAll` method and `Wait()` extension method. * * This helper exposes functionality to automatically retry / backoff in the event of an indexing failure, * and to control the number of documents indexed in a single HTTP request. In the example below each request will contain 1000 documents, * chunked from the original input. In the event of a large number of documents this could result in many HTTP requests, each containing * 1000 documents (the last request may contain less, depending on the total number). * * The helper lazily enumerates the provided `IEnumerable<T>` of documents, allowing you to index a large number of documents easily */ public void BulkDocumentsWithObservableHelper() { // hide var people = new [] { new Person { Id = 1, FirstName = "Martijn", LastName = "Laarman" }, new Person { Id = 2, FirstName = "Stuart", LastName = "Cam" }, new Person { Id = 3, FirstName = "Russ", LastName = "Cam" } // snip }; var bulkAllObservable = _client.BulkAll(people, b => b .Index("people") .BackOffTime("30s") //<1> how long to wait between retries .BackOffRetries(2) //<2> how many retries are attempted if a failure occurs .RefreshOnCompleted() .MaxDegreeOfParallelism(Environment.ProcessorCount) .Size(1000) // <3> items per bulk request ) .Wait(TimeSpan.FromMinutes(15), next => //<4> perform the indexing and wait up to 15 minutes, whilst the BulkAll calls are asynchronous this is a blocking operation { // do something e.g. write number of pages to console }); }
public async Task BadBulkRequestFeedsToOnError() { var index = CreateIndexName(); var documents = await CreateIndexAndReturnDocuments(index); var seenPages = 0; var badUris = new[] { new Uri("http://test.example:9201"), new Uri("http://test.example:9202") }; var pool = new StaticNodePool(badUris); var badClient = new ElasticsearchClient(new ElasticsearchClientSettings(pool)); var observableBulk = badClient.BulkAll(documents, f => f .MaxDegreeOfParallelism(8) .BackOffTime(TimeSpan.FromSeconds(10)) .BackOffRetries(2) .Size(Size) .RefreshOnCompleted() .Index(index) ); Exception ex = null; var handle = new ManualResetEvent(false); using (observableBulk.Subscribe( b => Interlocked.Increment(ref seenPages), e => { ex = e; handle.Set(); }, () => handle.Set() )) { handle.WaitOne(TimeSpan.FromSeconds(60)); seenPages.Should().Be(0); var clientException = ex.Should().NotBeNull().And.BeOfType <TransportException>().Subject; clientException.Message.Should().StartWith("BulkAll halted after attempted bulk failed over all the active nodes"); } }