public Task <Stream> GetStream(CacheKey key, CancellationToken cancellationToken) { var hash = hasher.Hash(key); shards[hash.Shard].PingUsed(hash.ReadId); return(store.ReadStream(hash.Shard, hash.BlobKey, cancellationToken)); }
async Task <ulong> ReadSize() { using (var readStream = await store.ReadStream(shardId, KeyName, CancellationToken.None)) { if (readStream != null) { var buffer = new byte[8]; await readStream.ReadAsync(buffer, 0, buffer.Length); return(BitConverter.ToUInt64(buffer, 0)); } else { return(0); } } }
async Task FrequencyTrackingPersistenceRuntime(CancellationToken cancellationToken) { try { var stream = await store.ReadStream(shardId, "reads", cancellationToken); if (stream != null) { await usage.MergeLoad(stream, cancellationToken); } var lastPingCount = usage.PingCount(); while (!cancellationToken.IsCancellationRequested) { var pingCount = usage.PingCount(); await Task.Delay(options.ReadInfoFlushIntervalMs, cancellationToken); cancellationToken.ThrowIfCancellationRequested(); //Only flush if we've gotten more than 100 reads if (pingCount > lastPingCount + 100) { // We don't want this write to be cancelable await store.WriteBytes(shardId, "reads", usage.Serialize(), CancellationToken.None); } lastPingCount = pingCount; } } catch (Exception e) { if (e is OperationCanceledException) { throw; } else { exceptionLog.Enqueue(e); } } }
async Task WriteLogsMerged(List <WriteEntry> entries, CancellationToken cancellationToken) { // Write logs unmerged unless we have too few entries. var entryThreshold = (int)(.9 * (double)options.MaxWriteLogSize / (double)WriteEntry.RowBytes()); if (entries.Count > entryThreshold) { await FlushLogUnmerged(); return; } // List logs and sort for the smallest var logList = (await store.List(shardId, WriteLogsDir(), cancellationToken)).ToList(); logList.Sort((a, b) => a.SizeInBytes.CompareTo(b.SizeInBytes)); var logs = new List <IBlobInfo>(); ulong totalBytes = (ulong)(entries.Count * WriteEntry.RowBytes()); // Add logs into `logs` until we have enough bytes foreach (var log in logList) { if (totalBytes + log.SizeInBytes > options.MaxWriteLogSize) { break; } else { logs.Add(log); totalBytes += log.SizeInBytes; } } // Use a memory stream to buffer new and old entries in var replacementBytes = new MemoryStream((int)totalBytes); // Buffer new entries var entryBuffer = new List <byte>(WriteEntry.RowBytes()); foreach (var entry in entries) { entryBuffer.Clear(); entry.SerializeTo(entryBuffer); replacementBytes.Write(entryBuffer.ToArray(), 0, entryBuffer.Count); } // Append bytes from old logs foreach (var mergeLog in logs) { using (var stream = await store.ReadStream(shardId, mergeLog.KeyName, cancellationToken)) { if (stream != null) { await stream.CopyToAsync(replacementBytes, 81920, cancellationToken); } } } // Write new log var fileBytes = replacementBytes.ToArray(); await store.WriteBytes(shardId, GetNewLogName(), fileBytes.ToArray(), cancellationToken); // Delete old logs foreach (var mergeLog in logs) { await store.Delete(shardId, mergeLog.KeyName, cancellationToken); } }