async Task WriteLogsMerged(List <WriteEntry> entries, CancellationToken cancellationToken) { // Write logs unmerged unless we have too few entries. var entryThreshold = (int)(.9 * (double)options.MaxWriteLogSize / (double)WriteEntry.RowBytes()); if (entries.Count > entryThreshold) { await FlushLogUnmerged(); return; } // List logs and sort for the smallest var logList = (await store.List(shardId, WriteLogsDir(), cancellationToken)).ToList(); logList.Sort((a, b) => a.SizeInBytes.CompareTo(b.SizeInBytes)); var logs = new List <IBlobInfo>(); ulong totalBytes = (ulong)(entries.Count * WriteEntry.RowBytes()); // Add logs into `logs` until we have enough bytes foreach (var log in logList) { if (totalBytes + log.SizeInBytes > options.MaxWriteLogSize) { break; } else { logs.Add(log); totalBytes += log.SizeInBytes; } } // Use a memory stream to buffer new and old entries in var replacementBytes = new MemoryStream((int)totalBytes); // Buffer new entries var entryBuffer = new List <byte>(WriteEntry.RowBytes()); foreach (var entry in entries) { entryBuffer.Clear(); entry.SerializeTo(entryBuffer); replacementBytes.Write(entryBuffer.ToArray(), 0, entryBuffer.Count); } // Append bytes from old logs foreach (var mergeLog in logs) { using (var stream = await store.ReadStream(shardId, mergeLog.KeyName, cancellationToken)) { if (stream != null) { await stream.CopyToAsync(replacementBytes, 81920, cancellationToken); } } } // Write new log var fileBytes = replacementBytes.ToArray(); await store.WriteBytes(shardId, GetNewLogName(), fileBytes.ToArray(), cancellationToken); // Delete old logs foreach (var mergeLog in logs) { await store.Delete(shardId, mergeLog.KeyName, cancellationToken); } }