private RemoteBulkInsertOperation GetBulkInsertOperation() { if (current == null) { return(current = CreateBulkInsertOperation(Task.FromResult(0))); } if (processedItemsInCurrentOperation < options.ChunkedBulkInsertOptions.MaxDocumentsPerChunk) { if (options.ChunkedBulkInsertOptions.MaxChunkVolumeInBytes <= 0 || currentChunkSize < options.ChunkedBulkInsertOptions.MaxChunkVolumeInBytes) { return(current); } } // if we haven't flushed the previous one yet, we will force // a disposal of both the previous one and the one before, to avoid // consuming a lot of memory, and to have _too_ much concurrency. if (previousTask != null) { previousTask.ConfigureAwait(false).GetAwaiter().GetResult(); } previousTask = current.DisposeAsync(); currentChunkSize = 0; processedItemsInCurrentOperation = 0; current = CreateBulkInsertOperation(previousTask); return(current); }
public void Write(string id, RavenJObject metadata, RavenJObject data) { current = GetBulkInsertOperation(); current.Write(id, metadata, data); processedItemsInCurrentOperation++; }
public ChunkedRemoteBulkInsertOperation(BulkInsertOptions options, AsyncServerClient client, IDatabaseChanges changes) { this.options = options; this.client = client; this.changes = changes; currentChunkSize = 0; current = GetBulkInsertOperation(); }
public void CanCreateAndDisposeUsingBulk() { using (var store = NewRemoteDocumentStore()) { var bulkInsertOperation = new RemoteBulkInsertOperation(new BulkInsertOptions(), (AsyncServerClient)store.AsyncDatabaseCommands, store.Changes()); bulkInsertOperation.Dispose(); } }
private RemoteBulkInsertOperation CreateBulkInsertOperation() { var operation = new RemoteBulkInsertOperation(options, client, changes); if (Report != null) operation.Report += Report; return operation; }
public async Task WaitForLastTaskToFinish() { if (disposed == false && current != null) { await current.DisposeAsync().ConfigureAwait(false); current = null; } }
public void CanInsertSingleDocument() { using (var store = NewRemoteDocumentStore()) { var bulkInsertOperation = new RemoteBulkInsertOperation(new BulkInsertOptions(), (ServerClient)store.DatabaseCommands); bulkInsertOperation.Write("test", new RavenJObject(), new RavenJObject { { "test", "passed" } }); bulkInsertOperation.Dispose(); Assert.Equal("passed", store.DatabaseCommands.Get("test").DataAsJson.Value<string>("test")); } }
public void CanBulkInsertWithApiKey() { using (var store = NewRemoteDocumentStore(enableAuthentication: true)) { using (var op = new RemoteBulkInsertOperation(new BulkInsertOptions(), (AsyncServerClient)store.AsyncDatabaseCommands, store.Changes())) { op.Write("items/1", new RavenJObject(), new RavenJObject()); } Assert.NotNull(store.DatabaseCommands.Get("items/1")); } }
public void Write(string id, RavenJObject metadata, RavenJObject data, int? dataSize) { current = GetBulkInsertOperation(); current.Write(id, metadata, data, dataSize); if(documentSizeInChunkLimit.HasValue) documentSizeInChunk += DocumentHelpers.GetRoughSize(data); processedItemsInCurrentOperation++; }
public void Write(string id, RavenJObject metadata, RavenJObject data, int? dataSize) { current = GetBulkInsertOperation(); current.Write(id, metadata, data, dataSize); if (options.ChunkedBulkInsertOptions.MaxChunkVolumeInBytes > 0) currentChunkSize += DocumentHelpers.GetRoughSize(data); processedItemsInCurrentOperation++; }
public void CanBulkInsertWithWindowsAuth() { using (var store = NewRemoteDocumentStore()) { using (var op = new RemoteBulkInsertOperation(new BulkInsertOptions(), (ServerClient)store.DatabaseCommands, store.Changes())) { op.Write("items/1", new RavenJObject(), new RavenJObject()); } Assert.NotNull(store.DatabaseCommands.Get("items/1")); } }
private RemoteBulkInsertOperation GetBulkInsertOperation() { if (current == null) return current = CreateBulkInsertOperation(); if (processedItemsInCurrentOperation < chunkSize) return current; processedItemsInCurrentOperation = 0; tasks.Add(current.DisposeAsync()); return current = CreateBulkInsertOperation(); }
public void Write(string id, RavenJObject metadata, RavenJObject data, int?dataSize) { current = GetBulkInsertOperation(); current.Write(id, metadata, data, dataSize); if (options.ChunkedBulkInsertOptions.MaxChunkVolumeInBytes > 0) { currentChunkSize += DocumentHelpers.GetRoughSize(data); } processedItemsInCurrentOperation++; }
public void CanHandleReferenceChecking() { using (var store = NewRemoteDocumentStore()) { using (var op = new RemoteBulkInsertOperation(new BulkInsertOptions { CheckReferencesInIndexes = true }, (AsyncServerClient)store.AsyncDatabaseCommands, store.Changes())) { op.Write("items/1", new RavenJObject(), new RavenJObject()); } } }
public void CanBulkInsertWithApiKey() { using (var store = NewRemoteDocumentStore()) { WaitForUserToContinueTheTest(); using (var op = new RemoteBulkInsertOperation(new BulkInsertOptions(), (ServerClient)store.DatabaseCommands)) { op.Write("items/1", new RavenJObject(), new RavenJObject()); } Assert.NotNull(store.DatabaseCommands.Get("items/1")); } }
private RemoteBulkInsertOperation GetBulkInsertOperation() { if (current == null) return current = CreateBulkInsertOperation(); // if (processedItemsInCurrentOperation < chunkSize) if (!documentSizeInChunkLimit.HasValue || documentSizeInChunk < documentSizeInChunkLimit.Value) return current; documentSizeInChunk = 0; processedItemsInCurrentOperation = 0; tasks.Add(current.DisposeAsync()); return current = CreateBulkInsertOperation(); }
public void CanHandleUpdates() { using (var store = NewRemoteDocumentStore()) { using (var op = new RemoteBulkInsertOperation(new BulkInsertOptions(), (AsyncServerClient)store.AsyncDatabaseCommands, store.Changes())) { op.Write("items/1", new RavenJObject(), new RavenJObject()); } using (var op = new RemoteBulkInsertOperation(new BulkInsertOptions { OverwriteExisting = true }, (AsyncServerClient)store.AsyncDatabaseCommands, store.Changes())) { op.Write("items/1", new RavenJObject(), new RavenJObject()); } } }
public void CanHandleUpdates() { using (var store = NewRemoteDocumentStore()) { using (var op = new RemoteBulkInsertOperation(new BulkInsertOptions(), (ServerClient)store.DatabaseCommands)) { op.Write("items/1", new RavenJObject(), new RavenJObject()); } using (var op = new RemoteBulkInsertOperation(new BulkInsertOptions { CheckForUpdates = true }, (ServerClient)store.DatabaseCommands)) { op.Write("items/1", new RavenJObject(), new RavenJObject()); } } }
private RemoteBulkInsertOperation CreateBulkInsertOperation(Task <int> disposeAsync) { Guid?existingOperationId; if (OperationId == Guid.Empty) { existingOperationId = null; } else { existingOperationId = OperationId; } var operation = new RemoteBulkInsertOperation(options, client, changes, disposeAsync, existingOperationId); if (Report != null) { operation.Report += Report; } return(operation); }
private RemoteBulkInsertOperation GetBulkInsertOperation() { if (current == null) return current = CreateBulkInsertOperation(Task.FromResult(0)); if (processedItemsInCurrentOperation < chunkSize) if (!documentSizeInChunkLimit.HasValue || documentSizeInChunk < documentSizeInChunkLimit.Value) return current; // if we haven't flushed the previous one yet, we will force // a disposal of both the previous one and the one before, to avoid // consuming a lot of memory, and to have _too_ much concurrency. if (previousTask != null) { previousTask.ConfigureAwait(false).GetAwaiter().GetResult(); } previousTask = current.DisposeAsync(); documentSizeInChunk = 0; processedItemsInCurrentOperation = 0; current = CreateBulkInsertOperation(previousTask); return current; }
private RemoteBulkInsertOperation GetBulkInsertOperation() { if (current == null) return current = CreateBulkInsertOperation(Task.FromResult(0)); if (processedItemsInCurrentOperation < options.ChunkedBulkInsertOptions.MaxDocumentsPerChunk) if (options.ChunkedBulkInsertOptions.MaxChunkVolumeInBytes <= 0 || currentChunkSize < options.ChunkedBulkInsertOptions.MaxChunkVolumeInBytes) return current; // if we haven't flushed the previous one yet, we will force // a disposal of both the previous one and the one before, to avoid // consuming a lot of memory, and to have _too_ much concurrency. if (previousTask != null) { previousTask.ConfigureAwait(false).GetAwaiter().GetResult(); } previousTask = current.DisposeAsync(); currentChunkSize = 0; processedItemsInCurrentOperation = 0; current = CreateBulkInsertOperation(previousTask); return current; }
public void CanInsertSeveralDocumentsInSeveralBatches() { using (var store = NewRemoteDocumentStore()) { var bulkInsertOperation = new RemoteBulkInsertOperation(new BulkInsertOptions { BatchSize = 2 }, (AsyncServerClient)store.AsyncDatabaseCommands, store.Changes()); bulkInsertOperation.Write("one", new RavenJObject(), new RavenJObject { { "test", "passed" } }); bulkInsertOperation.Write("two", new RavenJObject(), new RavenJObject { { "test", "passed" } }); bulkInsertOperation.Write("three", new RavenJObject(), new RavenJObject { { "test", "passed" } }); bulkInsertOperation.Dispose(); Assert.Equal("passed", store.DatabaseCommands.Get("one").DataAsJson.Value<string>("test")); Assert.Equal("passed", store.DatabaseCommands.Get("two").DataAsJson.Value<string>("test")); Assert.Equal("passed", store.DatabaseCommands.Get("three").DataAsJson.Value<string>("test")); } }
private RemoteBulkInsertOperation CreateBulkInsertOperation(Task<int> disposeAsync) { Guid? existingOperationId; if (OperationId == Guid.Empty) existingOperationId = null; else existingOperationId = OperationId; var operation = new RemoteBulkInsertOperation(options, client, changes, disposeAsync, existingOperationId); if (Report != null) operation.Report += Report; return operation; }
private RemoteBulkInsertOperation CreateBulkInsertOperation(Task<int> disposeAsync) { var operation = new RemoteBulkInsertOperation(options, client, changes, disposeAsync); if (Report != null) operation.Report += Report; return operation; }