Пример #1
0
        public void When_setting_the_same_value_on_the_key_twice_will_not_create_new_version()
        {
            using (var table = new PersistentHashTable(testDatabase))
            {
                table.Initialize();

                table.Batch(actions =>
                {
                    PutResult version1 = actions.Put(new PutRequest
                    {
                        Key            = "abc1",
                        ParentVersions = new ValueVersion[0],
                        Bytes          = new byte[] { 6 }
                    });

                    var version2 = actions.Put(new PutRequest
                    {
                        Key            = "abc1",
                        ParentVersions = new[] { version1.Version },
                        Bytes          = new byte[] { 6 }
                    });

                    Assert.False(version2.ConflictExists);
                    Assert.Equal(version1.Version.InstanceId, version2.Version.InstanceId);
                    Assert.Equal(version1.Version.Number, version2.Version.Number);
                });
            }
        }
Пример #2
0
        public void Writing_identical_data_to_existing_vlaue_will_not_create_conflict()
        {
            using (var table = new PersistentHashTable(testDatabase))
            {
                table.Initialize();

                table.Batch(actions =>
                {
                    PutResult put1 = actions.Put(new PutRequest
                    {
                        Key            = "test",
                        ParentVersions = new ValueVersion[0],
                        Bytes          = new byte[] { 1 }
                    });
                    PutResult put2 = actions.Put(new PutRequest
                    {
                        Key            = "test",
                        ParentVersions = new ValueVersion[0],
                        Bytes          = new byte[] { 1 }
                    });
                    Value[] values = actions.Get(new GetRequest {
                        Key = "test"
                    });
                    Assert.Equal(1, values.Length);
                    Assert.Equal(put1.Version.Number, put2.Version.Number);
                    Assert.Equal(put1.Version.InstanceId, put2.Version.InstanceId);
                });
            }
        }
Пример #3
0
        public override void OnPut(string key, JObject document, JObject metadata, TransactionInformation transactionInformation)
        {
            if (metadata.Value <string>(RavenDocumentRevisionStatus) == "Historical")
            {
                return;
            }

            if (excludeByEntityName.Contains(metadata.Value <string>("Raven-Entity-Name")))
            {
                return;
            }

            int revision = 0;

            if (metadata[RavenDocumentRevision] != null)
            {
                revision = metadata.Value <int>(RavenDocumentRevision);
            }

            var copyMetadata = new JObject(metadata);

            copyMetadata[RavenDocumentRevisionStatus] = JToken.FromObject("Historical");
            copyMetadata[RavenDocumentRevision]       = JToken.FromObject(revision + 1);
            PutResult newDoc = Database.Put(key + "/revisions/", null, document, copyMetadata,
                                            transactionInformation);

            revision = int.Parse(newDoc.Key.Split('/').Last());

            RemoveOldRevisions(key, revision, transactionInformation);

            metadata[RavenDocumentRevisionStatus] = JToken.FromObject("Current");
            metadata[RavenDocumentRevision]       = JToken.FromObject(revision);
        }
Пример #4
0
        public void Can_use_optimistic_concurrency()
        {
            using (var table = new PersistentHashTable(testDatabase))
            {
                table.Initialize();

                table.Batch(actions =>
                {
                    actions.Put(new PutRequest
                    {
                        Key            = "test",
                        ParentVersions = new ValueVersion[0],
                        Bytes          = new byte[] { 1 }
                    });
                    PutResult put = actions.Put(new PutRequest
                    {
                        Key                   = "test",
                        ParentVersions        = new ValueVersion[0],
                        Bytes                 = new byte[] { 2 },
                        OptimisticConcurrency = true
                    });
                    Assert.True(put.ConflictExists);

                    actions.Commit();

                    Assert.Equal(1, actions.Get(new GetRequest
                    {
                        Key = "test"
                    }).Length);
                });
            }
        }
Пример #5
0
        public void Can_get_item_in_specific_version()
        {
            using (var table = new PersistentHashTable(testDatabase))
            {
                table.Initialize();

                table.Batch(actions =>
                {
                    PutResult version1 = actions.Put(new PutRequest
                    {
                        Key            = "test",
                        ParentVersions = new ValueVersion[0],
                        Bytes          = new byte[] { 1 }
                    });
                    actions.Put(new PutRequest
                    {
                        Key            = "test",
                        ParentVersions = new ValueVersion[0],
                        Bytes          = new byte[] { 2 }
                    });
                    Value[] value = actions.Get(new GetRequest
                    {
                        Key = "test",
                        SpecifiedVersion = version1.Version
                    });
                    Assert.Equal(new byte[] { 1 }, value[0].Data);
                });
            }
        }
Пример #6
0
        public void Can_set_the_replication_version_and_get_it_back()
        {
            using (var table = new PersistentHashTable(testDatabase))
            {
                table.Initialize();
                Guid guid = Guid.NewGuid();
                table.Batch(actions =>
                {
                    PutResult result = actions.Put(new PutRequest
                    {
                        Key                = "test",
                        ParentVersions     = new ValueVersion[0],
                        Bytes              = new byte[] { 3 },
                        ReplicationVersion = new ValueVersion
                        {
                            InstanceId = guid,
                            Number     = 53
                        }
                    });

                    Assert.Equal(53, result.Version.Number);
                    Assert.Equal(guid, result.Version.InstanceId);

                    actions.Commit();
                });
            }
        }
Пример #7
0
        public Task PutContentSucceedsAfterFullnessReleased()
        {
            var contentSize = ContentSizeToStartHardPurging(3);

            return(TestStore(Context, Clock, async store =>
            {
                var triggeredEviction = false;
                store.OnLruEnumerationWithTime = hashes =>
                {
                    triggeredEviction = true;
                    return Task.FromResult(hashes);
                };

                using (var pinContext = store.CreatePinContext())
                {
                    await PutRandomAndPinAsync(store, contentSize, pinContext);
                    await PutRandomAndPinAsync(store, contentSize, pinContext);

                    PutResult putResult = await store.PutRandomAsync(Context, contentSize);
                    putResult.ShouldBeError();
                    triggeredEviction.Should().BeTrue();
                    triggeredEviction = false;

                    await pinContext.DisposeAsync();
                }

                await store.PutRandomAsync(Context, contentSize).ShouldBeSuccess();
                triggeredEviction.Should().BeTrue();
            }));
        }
Пример #8
0
        public async Task <PlaceFileResult> CreateTempAndPutAsync(
            OperationContext context,
            ContentHash contentHash,
            IContentSession contentSession)
        {
            using (var disposableFile = new DisposableFile(context, _fileSystem, AbsolutePath.CreateRandomFileName(_rootPath / "temp")))
            {
                PlaceFileResult placeTempFileResult = await PlaceFileAsync(context, contentHash, disposableFile.Path, FileAccessMode.ReadOnly, FileReplacementMode.FailIfExists, FileRealizationMode.HardLink, context.Token);

                if (!placeTempFileResult.Succeeded)
                {
                    return(placeTempFileResult);
                }
                PutResult putFileResult = await contentSession.PutFileAsync(context, contentHash, disposableFile.Path, FileRealizationMode.Any, context.Token);

                if (!putFileResult)
                {
                    return(new PlaceFileResult(putFileResult));
                }
                else
                {
                    return(new PlaceFileResult(PlaceFileResult.ResultCode.PlacedWithCopy, putFileResult.ContentSize));
                }
            }
        }
Пример #9
0
        public async Task CopyExistingFile()
        {
            await RunTestCase(nameof(CopyExistingFile), async (rootPath, session, client) =>
            {
                // Write a random file
                var sourcePath = rootPath / ThreadSafeRandom.Generator.Next().ToString();
                var content    = ThreadSafeRandom.GetBytes(FileSize);
                FileSystem.WriteAllBytes(sourcePath, content);

                // Put the random file
                PutResult putResult = await session.PutFileAsync(_context, HashType.Vso0, sourcePath, FileRealizationMode.Any, CancellationToken.None);
                putResult.ShouldBeSuccess();

                // Copy the file out via GRPC
                var destinationPath = rootPath / ThreadSafeRandom.Generator.Next().ToString();
                (await client.CopyFileAsync(_context, putResult.ContentHash, destinationPath, CancellationToken.None)).ShouldBeSuccess();

                var copied = FileSystem.ReadAllBytes(destinationPath);

                // Compare original and copied files
                var originalHash = content.CalculateHash(DefaultHashType);
                var copiedHash   = copied.CalculateHash(DefaultHashType);
                Assert.Equal(originalHash, copiedHash);
            });
        }
Пример #10
0
        public Task CopyFileRejectedIfTooMany(bool failFastIfServerBusy)
        {
            var failFastBandwidthConfiguration = new BandwidthConfiguration()
            {
                Interval               = TimeSpan.FromSeconds(10),
                RequiredBytes          = 10_000_000,
                FailFastIfServerIsBusy = failFastIfServerBusy,
            };

            int numberOfFiles = 100;

            _copyToLimit = 1;

            return(RunTestCase(async(rootPath, session, client) =>
            {
                // Add random files to the cache.
                var tasks = Enumerable.Range(1, numberOfFiles).Select(_ => putRandomFile()).ToList();
                var hashes = await Task.WhenAll(tasks);

                var copyTasks = hashes.Select(
                    hash =>
                {
                    var destinationPath = rootPath / ThreadSafeRandom.Generator.Next().ToString();
                    return client.CopyFileAsync(
                        new OperationContext(_context),
                        hash,
                        destinationPath,
                        new CopyOptions(failFastBandwidthConfiguration));
                });

                var results = await Task.WhenAll(copyTasks);

                if (failFastIfServerBusy)
                {
                    // We're doing 100 simultaneous copies, at least some of them should fail, because we're not willing to wait for the response.
                    var error = results.FirstOrDefault(r => !r.Succeeded);
                    error.Should().NotBeNull("At least one copy operation should fail.");

                    error !.ErrorMessage.Should().Contain("Copy limit of");
                }
                else
                {
                    // All operation should succeed!
                    results.All(r => r.ShouldBeSuccess()).Should().BeTrue();
                }

                async Task <ContentHash> putRandomFile()
                {
                    var sourcePath = rootPath / ThreadSafeRandom.Generator.Next().ToString();
                    var content = ThreadSafeRandom.GetBytes(FileSize);
                    FileSystem.WriteAllBytes(sourcePath, content);

                    // Put the random file
                    PutResult putResult = await session.PutFileAsync(_context, HashType.Vso0, sourcePath, FileRealizationMode.Any, CancellationToken.None);
                    putResult.ShouldBeSuccess();
                    return putResult.ContentHash;
                }
            }));
        }
Пример #11
0
        public async Task <ApiResponse <PutResult <Guid> > > Put([FromBody] StateMachineInstancePutDto dto)
        {
            var putResult = await _stateMachineInstanceService.Put(dto);

            var result = new PutResult <Guid>(putResult.Instance?.Id ?? Guid.Empty);

            return(ApiResponse.Custom(Response, putResult.Code, putResult.Message, result));
        }
Пример #12
0
 public static PutResponseMessage GetPutResponse(this PutResult x)
 {
     return(new PutResponseMessage.Builder
     {
         Version = GetVersion(x.Version),
         ConflictExists = x.ConflictExists
     }.Build());
 }
Пример #13
0
        public async Task <ApiResponse <PutResult <Guid> > > Put([FromBody] BlobInputDto dto)
        {
            var key = await _blobService.Put(dto);

            var result = new PutResult <Guid>(key);

            return(ApiResponse.OK(result));
        }
Пример #14
0
        public virtual void PutFileStop(Context context, PutResult result, bool trusted, AbsolutePath path, FileRealizationMode mode)
        {
            if (context.IsEnabled)
            {
                TracerOperationFinished(context, result, $"{Name}.{PutFileCallName}({path},{mode},{result.ContentHash.HashType}) stop {result.DurationMs}ms result=[{result}] trusted={trusted}");
            }

            _putFileCallCounter.Completed(result.Duration.Ticks);
        }
Пример #15
0
        public virtual void PutStreamStop(Context context, PutResult result)
        {
            if (context.IsEnabled)
            {
                TracerOperationFinished(context, result, $"{Name}.{PutStreamCallName} stop {result.DurationMs}ms result=[{result}]");
            }

            _putStreamCallCounter.Completed(result.Duration.Ticks);
        }
Пример #16
0
        public async Task PlaceFileRequiringNewReplicaCloseToHardLimitDoesNotHang()
        {
            var context = new Context(Logger);

            using (DisposableDirectory testDirectory = new DisposableDirectory(FileSystem))
            {
#pragma warning disable AsyncFixer04 // A disposable object used in a fire & forget async call
                Task testTask = TestStore(context, Clock, testDirectory, async store =>
#pragma warning restore AsyncFixer04 // A disposable object used in a fire & forget async call
                {
                    // Make a file which will overflow the cache size with just 2 copies.
                    PutResult putResult = await store.PutRandomAsync(context, ContentSizeToStartHardPurging(2));
                    ResultTestExtensions.ShouldBeSuccess((BoolResult)putResult);
                    ContentHash hash = putResult.ContentHash;

                    // Hardlink the file out 1024 times. Since the limit is 1024 total, and we already have 1 in the CAS,
                    // this will overflow the links and cause the CAS to create a new replica for it. This will cause
                    // the purger to consider that hash for eviction *while making room for that hash*, which is the
                    // trigger for the previous deadlock that this test will now guard against.
                    for (int i = 0; i < 1024; i++)
                    {
                        PlaceFileResult placeResult = await store.PlaceFileAsync(
                            context,
                            hash,
                            testDirectory.Path / $"hardlink{i}.txt",
                            FileAccessMode.ReadOnly,
                            FileReplacementMode.FailIfExists,
                            FileRealizationMode.HardLink,
                            null);

                        // The checks below are just to make sure that the calls completed as expected.
                        // The most important part is that they complete *at all*, which is enforced by
                        // racing against the Task.Delay in the outer scope.
                        if (i < 1023 || SucceedsEvenIfFull)
                        {
                            // The first 1023 links should succeed (bringing it up to the limit of 1024)
                            // And *all* of the calls should succeed if the cache takes new content even when overflowed.
                            Assert.True(placeResult.Succeeded);
                        }
                        else
                        {
                            // If the implementation rejects overflowing content, then the last call should fail.
                            Assert.False(placeResult.Succeeded);
                            Assert.Contains("Failed to reserve space", placeResult.ErrorMessage);
                        }
                    }
                });

                // Race between the test and a 2-minute timer. This can be increased if the test ends up flaky.
                Task firstCompletedTask = await Task.WhenAny(testTask, Task.Delay(TimeSpan.FromMinutes(2)));

                // The test should finish first, long before a minute passes, but it won't if it deadlocks.
                Assert.True(firstCompletedTask == testTask);
                await firstCompletedTask;
            }
        }
Пример #17
0
        protected async Task PutRandomAndPinAsync(FileSystemContentStoreInternal store, int contentSize, PinContext pinContext)
        {
            PutResult putResult = await store.PutRandomAsync(Context, contentSize);

            putResult.ShouldBeSuccess();

            PinResult pinResult = await store.PinAsync(Context, putResult.ContentHash, pinContext);

            pinResult.ShouldBeSuccess();
        }
        public override void PutStreamStop(Context context, PutResult result, Severity successSeverity)
        {
            if (_eventSource.IsEnabled())
            {
                _eventSource.PutStreamStop(
                    context.TraceId, result.Succeeded, result.ErrorMessage, result.ContentHash.ToString());
            }

            base.PutStreamStop(context, result, DiagnosticLevelSeverity(result.Duration));
        }
        public override void PutFileStop(Context context, PutResult result, bool trusted, AbsolutePath path, FileRealizationMode mode, Severity successSeverity)
        {
            if (_eventSource.IsEnabled())
            {
                _eventSource.PutFileStop(
                    context.TraceId, result.Succeeded, result.ErrorMessage, result.ContentHash.ToString());
            }

            base.PutFileStop(context, result, trusted, path, mode, successSeverity: DiagnosticLevelSeverity(result.Duration));
        }
        public override void PutFileStop(Context context, PutResult result, bool trusted, AbsolutePath path, FileRealizationMode mode)
        {
            if (_eventSource.IsEnabled())
            {
                _eventSource.PutFileStop(
                    context.Id.ToString(), result.Succeeded, result.ErrorMessage, result.ContentHash.ToString());
            }

            base.PutFileStop(context, result, trusted, path, mode);
        }
        public override void PutStreamStop(Context context, PutResult result)
        {
            if (_eventSource.IsEnabled())
            {
                _eventSource.PutStreamStop(
                    context.Id.ToString(), result.Succeeded, result.ErrorMessage, result.ContentHash.ToString());
            }

            base.PutStreamStop(context, result);
        }
Пример #22
0
        private async Task <PutResult> PutStreamInternalAsync(Context context, Stream stream, ContentHash contentHash, Func <int, AbsolutePath, Task <PutResult> > putFileFunc)
        {
            ObjectResult <SessionData> result = await _sessionState.GetDataAsync();

            if (!result.Succeeded)
            {
                return(new PutResult(result, contentHash));
            }

            int sessionId = result.Data.SessionId;
            var tempFile  = result.Data.TemporaryDirectory.CreateRandomFileName();

            try
            {
                if (stream.CanSeek)
                {
                    stream.Position = 0;
                }

                using (var fileStream = await _fileSystem.OpenAsync(tempFile, FileAccess.Write, FileMode.Create, FileShare.Delete))
                {
                    if (fileStream == null)
                    {
                        throw new ClientCanRetryException(context, $"Could not create temp file {tempFile}. The service may have restarted.");
                    }

                    await stream.CopyToAsync(fileStream);
                }

                PutResult putResult = await putFileFunc(sessionId, tempFile);

                if (putResult.Succeeded)
                {
                    return(new PutResult(putResult.ContentHash, putResult.ContentSize));
                }
                else if (!_fileSystem.FileExists(tempFile))
                {
                    throw new ClientCanRetryException(context, $"Temp file {tempFile} not found. The service may have restarted.");
                }
                else
                {
                    return(new PutResult(putResult, putResult.ContentHash));
                }
            }
            catch (Exception ex) when(ex is DirectoryNotFoundException || ex is UnauthorizedAccessException)
            {
                throw new ClientCanRetryException(context, "Exception thrown during PutStreamInternal. The service may have shut down", ex);
            }
            catch (Exception ex) when(!(ex is ClientCanRetryException))
            {
                // The caller's retry policy needs to see ClientCanRetryExceptions in order to properly retry
                return(new PutResult(ex, contentHash));
            }
        }
Пример #23
0
        public async Task <PutResult> CalculateAsync(double K, double S, double r, double t, double sigma, int n, double T, BtResult Btksi)
        {
            PutResult put = new PutResult();

            put.EuropeanPut         = _europeanPut.Calculate(K, S, r, t, sigma);
            put.PutIntegralFunction = await _putIntegralFunction.CalculateAsync(n, T, r, sigma, t, S, K, Btksi);

            put.Result.Value = put.EuropeanPut.Result.Value + put.PutIntegralFunction.Result.Value;

            return(put);
        }
Пример #24
0
        public void Put_returns_http_status_corresponding_to_PutResult(PutResult repoReturnResult, int expectedStatusCode)
        {
            var sut = CreateSut();

            _empRepoMock
            .Setup(r => r.Put(It.IsAny <Emp>()))
            .Returns(repoReturnResult);

            sut.Put(Guid.NewGuid(), new EmpModel());

            sut.HttpContext.Response.StatusCode.Should().Be(expectedStatusCode);
        }
Пример #25
0
        public void Can_query_for_item_history()
        {
            using (var table = new PersistentHashTable(testDatabase))
            {
                table.Initialize();

                table.Batch(actions =>
                {
                    PutResult version1 = actions.Put(new PutRequest
                    {
                        Key            = "abc1",
                        ParentVersions = new ValueVersion[0],
                        Bytes          = new byte[] { 6 }
                    });

                    actions.Put(new PutRequest
                    {
                        Key            = "abc1",
                        ParentVersions = new[] { version1.Version },
                        Bytes          = new byte[] { 1 }
                    });
                    actions.Put(new PutRequest
                    {
                        Key            = "abc1",
                        ParentVersions = new[] { version1.Version },
                        Bytes          = new byte[] { 2 }
                    });
                    actions.Put(new PutRequest
                    {
                        Key            = "abc1",
                        ParentVersions = new[]
                        {
                            new ValueVersion
                            {
                                InstanceId = version1.Version.InstanceId,
                                Number     = 3
                            },
                        },
                        Bytes = new byte[] { 3 }
                    });

                    Value[] values = actions.Get(new GetRequest {
                        Key = "abc1"
                    });
                    Assert.Equal(3, values.Length);

                    Assert.Equal(new[] { 1 }, values[0].ParentVersions.Select(x => x.Number).ToArray());
                    Assert.Equal(new[] { 1 }, values[1].ParentVersions.Select(x => x.Number).ToArray());
                    Assert.Equal(new[] { 3 }, values[2].ParentVersions.Select(x => x.Number).ToArray());
                });
            }
        }
Пример #26
0
            public override void OnIndexEntryCreated(string entryKey, Document document)
            {
                if (indexName != "Aggregates/ShoppingCart")
                {
                    return;
                }

                RavenJObject shoppingCart   = RavenJObject.Parse(document.GetField("Aggregate").StringValue);
                string       shoppingCartId = document.GetField("Id").StringValue;

                PutResult result = database.Documents.Put("shoppingcarts/" + shoppingCartId + "/snapshots/", null, shoppingCart, new RavenJObject(), null);

                document.Add(new Field("Snapshot", result.Key, Field.Store.YES, Field.Index.NOT_ANALYZED));
            }
        public Task AddImmediatelyDelete()
        {
            return(TestStore(Context, Clock, async(store) =>
            {
                int contentSize = 10;
                PutResult putResult = await store.PutRandomAsync(Context, contentSize);
                putResult.ShouldBeSuccess();

                DeleteResult deleteResult = await store.DeleteAsync(Context, putResult.ContentHash);
                deleteResult.ShouldBeSuccess();
                deleteResult.EvictedSize.Should().Be(contentSize);
                deleteResult.PinnedSize.Should().Be(0);
            }));
        }
Пример #28
0
        public void PushContentToRemoteLocations(
            Context context,
            PutResult putResult,
            CancellationToken cts)
        {
            var contentHashWithSize = new ContentHashWithSize(putResult.ContentHash, putResult.ContentSize);
            var operationContext    = new OperationContext(context, cts);

            var machineLocations = GetMachineLocations(putResult.ContentHash);

            foreach (MachineLocation machine in machineLocations)
            {
                PushFileToRemoteLocationAsync(operationContext, contentHashWithSize, machine).FireAndForget(operationContext);
            }
        }
Пример #29
0
 protected void SetupPutResult(PutResult result)
 {
     _storage
     .Setup(
         s => s.PutAsync(
             It.IsAny <string>(),
             It.IsAny <Stream>(),
             It.IsAny <string>(),
             It.IsAny <CancellationToken>()))
     .Callback((string path, Stream content, string contentType, CancellationToken cancellationToken) =>
     {
         _puts[path] = (content, contentType);
     })
     .ReturnsAsync(result);
 }
Пример #30
0
        public void After_resolving_conflict_will_remove_old_version_of_data()
        {
            using (var table = new PersistentHashTable(testDatabase))
            {
                table.Initialize();

                table.Batch(actions =>
                {
                    PutResult version1 = actions.Put(new PutRequest
                    {
                        Key            = "test",
                        ParentVersions = new ValueVersion[0],
                        Bytes          = new byte[] { 1 }
                    });
                    PutResult version2 = actions.Put(new PutRequest
                    {
                        Key            = "test",
                        ParentVersions = new ValueVersion[0],
                        Bytes          = new byte[] { 2 }
                    });
                    Value[] value = actions.Get(new GetRequest
                    {
                        Key = "test",
                        SpecifiedVersion = version1.Version
                    });
                    Assert.Equal(new byte[] { 1 }, value[0].Data);

                    actions.Put(new PutRequest
                    {
                        Key            = "test",
                        ParentVersions = new[]
                        {
                            version1.Version,
                            version2.Version
                        },
                        Bytes = new byte[] { 3 }
                    });

                    actions.Commit();

                    Assert.Empty(actions.Get(new GetRequest
                    {
                        Key = "test",
                        SpecifiedVersion = version1.Version
                    }));
                });
            }
        }
 public PutResult[] Put(params PutRequest[] valuesToAdd)
 {
     var groupedByUri = from x in valuesToAdd
                        group x by GetUrl(x.Key);
     var versions = new PutResult[valuesToAdd.Length];
     foreach (var values in groupedByUri)
     {
         var array = values.ToArray();
         var versionForCurrentBatch = new PutResult[0];
         values.Key.ExecuteSync(uri =>
         {
             ServiceUtil.Execute<IDistributedHashTable>(uri, table =>
             {
                 versionForCurrentBatch = table.Put(values.Key, array);
             });
         });
         for (int i = 0; i < array.Length; i++)
         {
             versions[Array.IndexOf(valuesToAdd, array[i])] = versionForCurrentBatch[i];
         }
     }
     return versions;
 }