internal void EnqueuePutBytes(HashedCacheKey hash, byte[] data, uint cost) { // TODO: have a temporary in-memory cache while the files are being written so that repeated requests for the same file aren't a miss putByteTasks.Enqueue(Task.Run(async() => { try { await evicter.EvictSpaceFor(data.Length, shutdownTokenSource.Token); await evicter.WriteLogEventually(hash, data, cost); await evicter.RecordBytesUsed(data.Length); // Don't cancel while writing bytes await store.WriteBytes(shardId, hash.BlobKey, data, CancellationToken.None); } catch (Exception e) { if (e is OperationCanceledException) { throw; } else { exceptionLog.Enqueue(e); } } }, shutdownTokenSource.Token)); /// Try to cleanup tasks when possible. A long running task may block cleanup of others if (putByteTasks.TryPeek(out Task t)) { if (t.IsCompleted || t.IsFaulted || t.IsCanceled) { if (putByteTasks.TryDequeue(out Task task)) { if (!(task.IsCompleted || task.IsFaulted || task.IsCanceled)) { putByteTasks.Enqueue(task); } } } } }
internal async Task OffsetBy(long byteCount) { using (var mutex = await sizeLock.LockAsync()) { var currentCount = await ReadSize(); var newCount = (ulong)Math.Max(0, (long)currentCount + byteCount); lastCount = newCount; lastTimeRead = clock.GetUtcNow(); await store.WriteBytes(shardId, KeyName, BitConverter.GetBytes(newCount), CancellationToken.None); } }
async Task WriteMultipleLogs(List <WriteEntry> entries, CancellationToken cancellationToken) { if (entries.Count == 0) { return; } var entriesPerLog = (int)options.MaxWriteLogSize / WriteEntry.RowBytes(); var enumerable = entries.AsEnumerable(); while (enumerable.Any()) { var batch = enumerable.Take(entriesPerLog); enumerable = enumerable.Skip(entriesPerLog); var bytes = new List <byte>(batch.Count()); foreach (var entry in batch) { entry.SerializeTo(bytes); } await store.WriteBytes(shardId, GetNewLogName(), bytes.ToArray(), cancellationToken); } }