public Task GetSelectorsGivesSelectorsInReverseLruOrderAfterAdd() { var context = new Context(Logger); var weakFingerprint = Fingerprint.Random(); var selector1 = Selector.Random(); var selector2 = Selector.Random(); var strongFingerprint1 = new StrongFingerprint(weakFingerprint, selector1); var strongFingerprint2 = new StrongFingerprint(weakFingerprint, selector2); var contentHashListWithDeterminism1 = new ContentHashListWithDeterminism(ContentHashList.Random(), CacheDeterminism.None); var contentHashListWithDeterminism2 = new ContentHashListWithDeterminism(ContentHashList.Random(), CacheDeterminism.None); return(RunTestAsync(context, async session => { await session.AddOrGetContentHashListAsync(context, strongFingerprint1, contentHashListWithDeterminism1, Token).ShouldBeSuccess(); _clock.Increment(); await session.AddOrGetContentHashListAsync(context, strongFingerprint2, contentHashListWithDeterminism2, Token).ShouldBeSuccess(); _clock.Increment(); List <GetSelectorResult> getSelectorResults = await session.GetSelectors(context, weakFingerprint, Token).ToList(CancellationToken.None); Assert.Equal(2, getSelectorResults.Count); GetSelectorResult r1 = getSelectorResults[0]; Assert.True(r1.Succeeded); Assert.True(r1.Selector == selector2); GetSelectorResult r2 = getSelectorResults[1]; Assert.True(r2.Succeeded); Assert.True(r2.Selector == selector1); })); }
public void OperationCountLimitTest() { var clock = new MemoryClock(); clock.UtcNow = DateTime.Today; var operationLimit = 2; var span = TimeSpan.FromSeconds(1); var halfSpan = new TimeSpan(span.Ticks / 2); var limit = new OperationThrottle(span, operationLimit, clock); for (var iteration = 0; iteration < 3; iteration++) { // Use all available operations for (var i = 0; i < operationLimit; i++) { limit.CheckAndRegisterOperation().ShouldBeSuccess(); } // Doing more operations should fail. limit.CheckAndRegisterOperation().ShouldBeError(); // ... even if time passes, as long as we don't cross the span limit. clock.Increment(halfSpan); limit.CheckAndRegisterOperation().ShouldBeError(); // Once the span has been completed, operations should succeed again. clock.Increment(halfSpan); } }
public async Task EvictionAnnouncesHash() { bool batchProcessWasCalled = false; var nagleQueue = NagleQueue <ContentHash> .Create( hashes => { batchProcessWasCalled = true; return(Task.FromResult(42)); }, maxDegreeOfParallelism : 1, interval : TimeSpan.FromMinutes(1), batchSize : 1); await TestStore( _context, _clock, async store => { var cas = store as IContentStoreInternal; var blobSize = BlobSizeToStartSoftPurging(2); using (var stream1 = new MemoryStream(ThreadSafeRandom.GetBytes(blobSize))) using (var stream2 = new MemoryStream(ThreadSafeRandom.GetBytes(blobSize))) { await cas.PutStreamAsync(_context, stream1, ContentHashType).ShouldBeSuccess(); _clock.Increment(); await cas.PutStreamAsync(_context, stream2, ContentHashType).ShouldBeSuccess(); _clock.Increment(); await store.SyncAsync(_context); } }, nagleQueue); batchProcessWasCalled.Should().BeTrue(); }
public async Task EnsureContentIsNotPinned(Context context, MemoryClock clock, ContentHash contentHash) { Assert.True(await ContainsAsync(context, contentHash, null)); clock.Increment(); await ClearStoreOfUnpinnedContent(context, clock); Assert.False(await ContainsAsync(context, contentHash, null)); clock.Increment(); }
public void HeartbeatUpdatesLastHeartbeatTimeAndState() { var clusterState = new ClusterStateMachine(); MachineId machineId; (clusterState, machineId) = clusterState.RegisterMachine(new MachineLocation("node1"), _clock.UtcNow); _clock.Increment(TimeSpan.FromMinutes(1)); clusterState = clusterState.Heartbeat(machineId, _clock.UtcNow, MachineState.Open).ThrowIfFailure().Next; var r = clusterState.GetStatus(machineId).ShouldBeSuccess().Value; r.LastHeartbeatTimeUtc.Should().Be(_clock.UtcNow); r.State.Should().Be(MachineState.Open); }
public Task CheckpointMaxAgeIsRespected() { var clock = new MemoryClock(); return(RunTest(async(context, service, iteration) => { if (iteration == 0) { // First heartbeat lets the service know its master. This also restores the latest checkpoint and clears old ones if needed await service.OnRoleUpdatedAsync(context, Role.Master); // Create a checkpoint and make sure it shows up in the registry await service.CreateCheckpointAsync(context).ShouldBeSuccess(); var r = await service.CheckpointManager.CheckpointRegistry.GetCheckpointStateAsync(context); r.Succeeded.Should().BeTrue(); r.Value !.CheckpointAvailable.Should().BeTrue(); } else if (iteration == 1) { clock.Increment(TimeSpan.FromHours(0.5)); // First heartbeat lets the service know its master. This also restores the latest checkpoint and clears old ones if needed await service.OnRoleUpdatedAsync(context, Role.Master); var r = await service.CheckpointManager.CheckpointRegistry.GetCheckpointStateAsync(context); r.Succeeded.Should().BeTrue(); r.Value !.CheckpointAvailable.Should().BeTrue(); } else { clock.Increment(TimeSpan.FromHours(1)); // First heartbeat lets the service know its master. This also restores the latest checkpoint and clears old ones if needed await service.OnRoleUpdatedAsync(context, Role.Master); var r = await service.CheckpointManager.CheckpointRegistry.GetCheckpointStateAsync(context); r.Succeeded.Should().BeTrue(); r.Value !.CheckpointAvailable.Should().BeFalse(); } }, clock: clock, iterations: 3, modifyConfig: configuration => { configuration.CheckpointMaxAge = TimeSpan.FromHours(1); })); }
private Task <byte[]> CorruptStoreWithReplicasAsync( Context context, MemoryClock clock, DisposableDirectory tempDirectory, Func <ContentHash, Task> corruptFunc) { return(CorruptStoreAsync(context, tempDirectory, async contentHash => { // ReSharper disable once UnusedVariable foreach (var x in Enumerable.Range(0, 1500)) { AbsolutePath tempPath = tempDirectory.CreateRandomFileName(); clock.Increment(); var result = await PlaceFileAsync( context, contentHash, tempPath, FileAccessMode.ReadOnly, FileReplacementMode.FailIfExists, FileRealizationMode.HardLink, null); result.Code.Should().Be(PlaceFileResult.ResultCode.PlacedWithHardLink); } await corruptFunc(contentHash); })); }
public Task ExpiredInstancesGetGarbageCollected() { var clock = new MemoryClock(); var configuration = new ResourcePoolConfiguration() { MaximumAge = TimeSpan.FromSeconds(1) }; return(RunTest <Key, Resource>(async(context, pool) => { var key = new Key(0); await pool.UseAsync(context, key, wrapper => { return BoolResult.SuccessTask; }).ShouldBeSuccess(); clock.Increment(TimeSpan.FromMinutes(1)); await pool.GarbageCollectAsync(context).IgnoreFailure(); pool.Counter[ResourcePoolV2Counters.CreatedResources].Value.Should().Be(1); pool.Counter[ResourcePoolV2Counters.ReleasedResources].Value.Should().Be(1); pool.Counter[ResourcePoolV2Counters.ShutdownSuccesses].Value.Should().Be(1); pool.Counter[ResourcePoolV2Counters.GarbageCollectionSuccesses].Value.Should().Be(1); }, configuration, clock)); }
private async Task ClearStoreOfUnpinnedContent(Context context, MemoryClock clock) { const int contentsToAdd = 4; for (int i = 0; i < contentsToAdd; i++) { var data = ThreadSafeRandom.GetBytes((int)(Configuration.MaxSizeQuota.Hard / (contentsToAdd - 1))); using (var dataStream = new MemoryStream(data)) { var r = await PutStreamAsync(context, dataStream, ContentHashType, null); var hashFromPut = r.ContentHash; clock.Increment(); Assert.True(await ContainsAsync(context, hashFromPut, null)); clock.Increment(); } } }
public Task ContainsPins() { var context = new Context(Logger); return(TestStore(context, _clock, async store => { var r = await store.PutRandomAsync(context, MaxSizeHard / 3); _clock.Increment(); using (var pinContext = store.CreatePinContext()) { Assert.True(await store.ContainsAsync(context, r.ContentHash, new PinRequest(pinContext))); _clock.Increment(); await store.EnsureContentIsPinned(context, _clock, r.ContentHash); Assert.True(pinContext.Contains(r.ContentHash)); } await store.EnsureContentIsNotPinned(context, _clock, r.ContentHash); })); }
public async Task IcmClientTestAsync() { Debugger.Launch(); var appKey = GetApplicationKey().ShouldBeSuccess(); var config = new App.Monitor.Configuration { AzureAppKey = appKey.Value ! }; var clock = new MemoryClock(); clock.Increment(); var keyVault = new KeyVaultClient( config.KeyVaultUrl, config.AzureTenantId, config.AzureAppId, config.AzureAppKey, clock, cacheTimeToLive: TimeSpan.FromSeconds(1)); keyVault.IcmCallsCounter.Value.Should().Be(0); // Simulate that the certificate has been acquired before. _ = await keyVault.GetCertificateAsync(config.IcmCertificateName); keyVault.IcmCallsCounter.Value.Should().Be(1); var icmClient = new IcmClient(keyVault, config.IcmUrl, config.IcmConnectorId, config.IcmCertificateName, clock); var incident = new IcmIncident( stamp: "Test", environment: "PROD", machines: new [] { "MachineA", "MachineB" }, correlationIds: new[] { "GuidA", "GuidB" }, severity: 4, description: "This incident was created for testing the cache monitor", title: "Cache Monitor Test Incident", incidentTime: DateTime.Now, cacheTimeToLive: null); await icmClient.EmitIncidentAsync(incident); // Should have used cached cert. keyVault.IcmCallsCounter.Value.Should().Be(1); // Simulate that the certificate will be acquired in the future. clock.AddSeconds(2); _ = await keyVault.GetCertificateAsync(config.IcmCertificateName); keyVault.IcmCallsCounter.Value.Should().Be(2); } }
public async Task EvictionAnnouncesHash() { await TestStore( _context, _clock, async store => { var cas = store as IContentStoreInternal; var blobSize = BlobSizeToStartSoftPurging(2); using (var stream1 = new MemoryStream(ThreadSafeRandom.GetBytes(blobSize))) using (var stream2 = new MemoryStream(ThreadSafeRandom.GetBytes(blobSize))) { await cas.PutStreamAsync(_context, stream1, ContentHashType).ShouldBeSuccess(); _clock.Increment(); await cas.PutStreamAsync(_context, stream2, ContentHashType).ShouldBeSuccess(); _clock.Increment(); await store.SyncAsync(_context); } }); }
public Task PutFileWithLongPath() { var context = new Context(Logger); if (!AbsolutePath.LongPathsSupported) { context.Debug($"The test '{nameof(PutFileWithLongPath)}' is skipped because long paths are not supported by the current version of .net framework.", component: nameof(FileSystemContentStoreInternalPutFileTests)); return(Task.FromResult(1)); } return(TestStore(context, Clock, async store => { byte[] bytes = ThreadSafeRandom.GetBytes(ValueSize); ContentHash contentHash = bytes.CalculateHash(ContentHashType); // Verify content doesn't exist yet in store Assert.False(await store.ContainsAsync(context, contentHash, null)); using (var tempDirectory = new DisposableDirectory(FileSystem)) { string longPathPart = new string('a', 300); AbsolutePath pathToContent = tempDirectory.Path / $"tempContent{longPathPart}.txt"; FileSystem.WriteAllBytes(pathToContent, bytes); ContentHash hashFromPut; using (var pinContext = store.CreatePinContext()) { // Put the content into the store w/ hard link var r = await store.PutFileAsync( context, pathToContent, FileRealizationMode.Any, ContentHashType, new PinRequest(pinContext)); hashFromPut = r.ContentHash; Clock.Increment(); await store.EnsureContentIsPinned(context, Clock, hashFromPut); Assert.True(pinContext.Contains(hashFromPut)); } await store.EnsureContentIsNotPinned(context, Clock, hashFromPut); } })); }
public Task OpenStreamPinContextPins() { var context = new Context(Logger); return(TestStore(context, _clock, async store => { var r = await store.PutRandomAsync(context, MaxSizeHard / 3); _clock.Increment(); using (var pinContext = store.CreatePinContext()) { var result = await store.OpenStreamAsync(context, r.ContentHash, new PinRequest(pinContext)); using (var streamFromGet = result.Stream) { Assert.NotNull(streamFromGet); _clock.Increment(); } await store.EnsureContentIsPinned(context, _clock, r.ContentHash); Assert.True(pinContext.Contains(r.ContentHash)); } await store.EnsureContentIsNotPinned(context, _clock, r.ContentHash); })); }
public Task ExpiredInstancesGetReleasedOnReuse(bool invalidate) { var clock = new MemoryClock(); var configuration = new ResourcePoolConfiguration() { MaximumAge = TimeSpan.FromSeconds(1) }; return(RunTest <Key, Resource>(async(context, pool) => { var key = new Key(0); Resource?lastResource = null; await pool.UseAsync(context, key, wrapper => { if (invalidate) { wrapper.Invalidate(context); } lastResource = wrapper.Value; return BoolResult.SuccessTask; }).ShouldBeSuccess(); if (!invalidate) { clock.Increment(TimeSpan.FromMinutes(1)); } await pool.UseAsync(context, key, wrapper => { lastResource.Should().NotBe(wrapper.Value); pool.Counter[ResourcePoolCounters.CreatedResources].Value.Should().Be(2); pool.Counter[ResourcePoolCounters.ReleasedResources].Value.Should().Be(1); return BoolResult.SuccessTask; }).ShouldBeSuccess(); }, configuration, clock)); }
public Task GetContentSizePins() { var context = new Context(Logger); return(TestStore(context, _clock, async store => { var putContentSizeInBytes = (long)MaxSizeHard / 3; var r1 = await store.PutRandomAsync(context, (int)putContentSizeInBytes); _clock.Increment(); using (var pinContext = store.CreatePinContext()) { var r2 = await store.GetContentSizeAndCheckPinnedAsync(context, r1.ContentHash, new PinRequest(pinContext)); r2.Exists.Should().BeTrue(); r2.Size.Should().Be(putContentSizeInBytes); r2.WasPinned.Should().BeFalse(); await store.EnsureContentIsPinned(context, _clock, r1.ContentHash); pinContext.Contains(r1.ContentHash).Should().BeTrue(); } await store.EnsureContentIsNotPinned(context, _clock, r1.ContentHash); })); }
public Task EvictionInLruOrder() { var context = new Context(Logger); return(RunTestAsync(context, async session => { // Write more than MaxRowCount items so the first ones should fall out. var strongFingerprints = Enumerable.Range(0, (int)MaxRowCount + 3).Select(i => StrongFingerprint.Random()).ToList(); foreach (var strongFingerprint in strongFingerprints) { await session.AddOrGetContentHashListAsync( context, strongFingerprint, new ContentHashListWithDeterminism(ContentHashList.Random(), CacheDeterminism.None), Token).ShouldBeSuccess(); _clock.Increment(); } // Make sure store purging completes. await((ReadOnlySQLiteMemoizationSession)session).PurgeAsync(context); // Check the first items written have fallen out. for (var i = 0; i < 3; i++) { GetContentHashListResult r = await session.GetContentHashListAsync(context, strongFingerprints[i], Token); r.Succeeded.Should().BeTrue(); r.ContentHashListWithDeterminism.ContentHashList.Should().BeNull(); } // Check the rest are still present. for (var i = 3; i < strongFingerprints.Count; i++) { GetContentHashListResult r = await session.GetContentHashListAsync(context, strongFingerprints[i], Token); r.Succeeded.Should().BeTrue(); r.ContentHashListWithDeterminism.ContentHashList.Should().NotBeNull(); } })); }
public async Task ReconstructionMaintainsLastAccessTimes() { var context = new Context(Logger); using (var testDirectory = new DisposableDirectory(FileSystem)) { MemoryClock.Increment(); // Populate with content IReadOnlyList <KeyValuePair <ContentHash, ContentFileInfo> > priorContent = null; var filePath = await TestContentDirectory( context, testDirectory, async contentDirectory => { priorContent = await PopulateRandomInfo(contentDirectory); }, shutdown : true); MemoryClock.Increment(); // Populate with content that will be need to be recovered because shutdown is skipped IReadOnlyList <KeyValuePair <ContentHash, ContentFileInfo> > reconstructedContent = null; await TestContentDirectory( context, testDirectory, async contentDirectory => { reconstructedContent = await PopulateRandomInfo(contentDirectory); Assert.False(FileSystem.FileExists(filePath)); Assert.True(FileSystem.FileExists(filePath.Parent / MemoryContentDirectory.BinaryBackupFileName)); }, shutdown : false); var priorContentMap = priorContent.ToDictionary(kvp => kvp.Key, kvp => kvp.Value); MemoryClock.Increment(); var allContent = priorContent.Concat(reconstructedContent).ToList(); Host.Content = new ContentDirectorySnapshot <ContentFileInfo>(); Host.Content.Add(allContent.Select(kv => new PayloadFromDisk <ContentFileInfo>(kv.Key, kv.Value))); // Update last access time to simulate what would happen after reconstruction allContent.ForEach(hashInfoPair => hashInfoPair.Value.UpdateLastAccessed(MemoryClock)); // Check reconstructed content directory await TestContentDirectory( context, testDirectory, async contentDirectory => { // Verify that the full set of content is present in the initialized content directory foreach (var content in allContent) { contentDirectory.TryGetFileInfo(content.Key, out var contentInfo).Should().BeTrue(); } foreach (var hash in await contentDirectory.GetLruOrderedCacheContentAsync()) { if (!priorContentMap.ContainsKey(hash)) { // Prior content should come before the reconstructed content which was preserved in the backup file // because we know the prior content was added prior to the newly discovered content Assert.Empty(priorContentMap); } else { priorContentMap.Remove(hash); } } Assert.False(FileSystem.FileExists(filePath)); Assert.True(FileSystem.FileExists(filePath.Parent / MemoryContentDirectory.BinaryBackupFileName)); }, shutdown : true); Assert.True(FileSystem.FileExists(filePath)); } }
[Trait("Category", "QTestSkip")] // Skipped public Task TestQuotaCalibration() { var context = new Context(Logger); return(TestStore(context, _clock, async store => { int totalSize = 0; // Add big content should trigger calibration. using (var pinContext = store.CreatePinContext()) { int size = 60; using (var dataStream1 = RandomStream(size)) using (var dataStream2 = RandomStream(size)) { await store.PutStreamAsync(context, dataStream1, ContentHashType, new PinRequest(pinContext)).ShouldBeSuccess(); _clock.Increment(); await store.PutStreamAsync(context, dataStream2, ContentHashType, new PinRequest(pinContext)).ShouldBeSuccess(); _clock.Increment(); } totalSize += 2 * size; } MaxSizeQuota currentQuota = null; // Initial max = 100, total size = 120. Calibrate up. await VerifyQuota(context, store, quota => { currentQuota = quota; Assert.True(quota.Hard > totalSize); Assert.True(quota.Soft > totalSize); }); // Add small content does not change quota. using (var pinContext = store.CreatePinContext()) { int size = 1; using (var dataStream = RandomStream(size)) { await store.PutStreamAsync(context, dataStream, ContentHashType, new PinRequest(pinContext)).ShouldBeSuccess(); _clock.Increment(); } totalSize += size; } await VerifyQuota(context, store, quota => { Assert.Equal(currentQuota.Hard, quota.Hard); Assert.Equal(currentQuota.Soft, quota.Soft); }); // Add small content, but window is small. Calibrate down such that in the next reservation purging can run. using (var pinContext = store.CreatePinContext()) { int size = 1; using (var dataStream = RandomStream(size)) { await store.PutStreamAsync(context, dataStream, ContentHashType, new PinRequest(pinContext)).ShouldBeSuccess(); _clock.Increment(); } totalSize += size; } await VerifyQuota(context, store, quota => { Assert.True(currentQuota.Hard > quota.Hard); Assert.True(currentQuota.Soft > quota.Soft); Assert.True(totalSize > quota.Soft && totalSize < quota.Hard); }); })); }