internal async Task EvictByKey1HashExcludingKey2Hash(byte[] includingKey1Hash, byte[] excludingKey2Hash, CancellationToken cancellationToken) { // Read in a full log's worth of entries. var enumerable = await store.List(shardId, WriteLogsDir(), cancellationToken); foreach (var log in enumerable) { using (var stream = await store.ReadStream(shardId, log.KeyName, cancellationToken)) { if (stream != null) { var entries = await WriteEntry.ReadFrom(stream, cancellationToken); // Select and delete the matching entries var matches = entries.Where((entry) => entry.Key1.SequenceEqual(includingKey1Hash) && (excludingKey2Hash == null || !entry.Key2.SequenceEqual(excludingKey2Hash))).ToList(); if (matches.Count > 0) { foreach (var entry in matches) { var blobKey = hasher.GetBlobKey(entry.Key1, entry.Key2, entry.Key3); await store.Delete(shardId, blobKey, cancellationToken); } } // Select the remaining entries var remaining = entries.Where((entry) => !entry.Key1.SequenceEqual(includingKey1Hash) || (excludingKey2Hash != null && entry.Key2.SequenceEqual(excludingKey2Hash))).ToList(); if (remaining.Count > 0) { // Write the rest of the entries back to disk var remainingLogBytes = new List <byte>(WriteEntry.RowBytes() * (remaining.Count)); foreach (var entry in remaining) { entry.SerializeTo(remainingLogBytes); } await store.WriteBytes(shardId, GetNewLogName(), remainingLogBytes.ToArray(), cancellationToken); } // Delete old log file await store.Delete(shardId, log.KeyName, cancellationToken); } } } }
internal async Task EvictSpaceFor(int length, CancellationToken cancellationToken) { using (var lockInstance = await evictionLock.LockAsync()) { var currentSize = await sizeTracker.GetCachedSize(); if (currentSize + (ulong)length < options.MaxCachedBytes) { return; // There's enough space already. } var bytesToDelete = (currentSize + (ulong)length) - (ulong)(options.MaxCachedBytes - (options.FreeSpacePercentGoal / 100 * options.MaxCachedBytes)); ulong bytesDeleted = 0; while (bytesDeleted < bytesToDelete) { // Read in a full log's worth of entries. var list = (await store.List(shardId, WriteLogsDir(), cancellationToken)).ToList(); if (list.Count < 1) { return; } var rng = new Random(); var logs = new List <IBlobInfo>(); var writeEntries = new List <WriteEntry>((int)(options.MaxWriteLogSize / (ulong)WriteEntry.RowBytes())); ulong logsSizeSum = 0; while (true) { var next = list[rng.Next(0, list.Count - 1)]; if (logsSizeSum + next.SizeInBytes < options.MaxWriteLogSize) { using (var stream = await store.ReadStream(shardId, next.KeyName, cancellationToken)) { if (stream != null) { writeEntries.AddRange(await WriteEntry.ReadFrom(stream, cancellationToken)); logs.Add(next); logsSizeSum += next.SizeInBytes; } } } else { break; } } // Add usage info for (var i = 0; i < writeEntries.Count; i++) { var entry = writeEntries[i]; entry.FrequencyExtra = usage.GetFrequency(entry.ReadId); writeEntries[i] = entry; } // Sort the best entries to delete writeEntries.Sort((a, b) => { var result = a.FrequencyExtra.CompareTo(b.FrequencyExtra); if (result == 0) { result = ((double)a.ByteCount / Math.Max(1, (double)a.CreationCost)) .CompareTo((double)b.ByteCount / Math.Max(1, (double)b.CreationCost)); } return(result); }); // Delete the first 10% var entriesToDeleteCount = writeEntries.Count / 10; ulong bytesDeletedThisLoop = 0; foreach (var entry in writeEntries.Take(entriesToDeleteCount)) { var blobKey = hasher.GetBlobKey(entry.Key1, entry.Key2, entry.Key3); await store.Delete(shardId, blobKey, cancellationToken); bytesDeletedThisLoop += entry.ByteCount; } // Write the rest of the entries back to disk await WriteMultipleLogs(writeEntries.Skip(entriesToDeleteCount).ToList(), cancellationToken); // Delete the old log files foreach (var logToDelete in logs) { await store.Delete(shardId, logToDelete.KeyName, cancellationToken); } bytesDeleted += bytesDeletedThisLoop; await sizeTracker.OffsetBy(-(long)bytesDeletedThisLoop); } // Repeat until we have enough space } }