public async Task<Etag> AppendAsync(string topic, IEnumerable<RavenJObject> items)
		{
			if (items == null)
				return null;

			var batch = new WriteBatch();

			Etag etag = null;
			foreach (var item in items)
			{
				RavenJToken metadata;
				if (item.TryGetValue("@metadata", out metadata) == false)
					item["@metadata"] = metadata = new RavenJObject();
				((RavenJObject)metadata)["Raven-Entity-Name"] = topic;
				etag = GetNextEtag();
				Slice key = "events/" + etag;
				batch.Put(key, RavenJTokenToStream(item));
			}

			await _storage.Writer.WriteAsync(batch);
			_appendEvent.PulseAll();
			return etag;
		}
		private async Task ReadAndUpdateSystemConfig()
		{
			Slice configKey = "system/config";
			SystemConfig systemConfig;
			using (var stream = _storage.Reader.Read(configKey))
			{
				systemConfig = stream != null
								   ? new JsonSerializer().Deserialize<SystemConfig>(new JsonTextReader(new StreamReader(stream)))
								   : new SystemConfig();
				systemConfig.EtagBase++;
				_sequentialUuidGenerator.EtagBase = systemConfig.EtagBase;
			}

			var writeBatch = new WriteBatch();
			writeBatch.Put(configKey, SmallObjectToMemoryStream(systemConfig));
			await _storage.Writer.WriteAsync(writeBatch);
		}
		public async Task CreateAggregationAsync(IndexDefinition indexDefinition)
		{
			var dynamicViewCompiler = new DynamicViewCompiler(indexDefinition.Name, indexDefinition, Path.Combine(_path, "Generators"));
			var generator = dynamicViewCompiler.GenerateInstance();

			var writeBatch = new WriteBatch();
			var memoryStream = SmallObjectToMemoryStream(indexDefinition);
			writeBatch.Put("aggregators/" + indexDefinition.Name, memoryStream);
			await _storage.Writer.WriteAsync(writeBatch);

			var aggregator = new Aggregator(this, indexDefinition.Name, generator);
			_aggregations.AddOrUpdate(indexDefinition.Name, aggregator, (s, viewGenerator) => aggregator);
			Background.Work(aggregator.StartAggregation);
		}
		internal static IEnumerable<LogReadResult> ReadFromLog(Stream logFile, BufferPool bufferPool)
		{
			var logReader = new LogReader(logFile, true, 0, bufferPool);
			Stream logRecordStream;

			while (logReader.TryReadRecord(out logRecordStream))
			{
				var batch = new WriteBatch();
				ulong seq;
				using (logRecordStream)
				{
					var buffer = new byte[8];
					logRecordStream.ReadExactly(buffer, 8);
					seq = BitConverter.ToUInt64(buffer, 0);
					logRecordStream.ReadExactly(buffer, 4);
					var opCount = BitConverter.ToInt32(buffer, 0);

					for (var i = 0; i < opCount; i++)
					{
						logRecordStream.ReadExactly(buffer, 1);
						var op = (Operations)buffer[0];
						var keyCount = logRecordStream.Read7BitEncodedInt();
						var array = new byte[keyCount];
						logRecordStream.ReadExactly(array, keyCount);

						var key = new Slice(array);

						switch (op)
						{
							case Operations.Delete:
								batch.Delete(key);
								break;
							case Operations.Put:
								logRecordStream.ReadExactly(buffer, 4);
								var size = BitConverter.ToInt64(buffer, 0);
								var value = new MemoryStream();
								logRecordStream.CopyTo(value, size, LogWriter.BlockSize);
								batch.Put(key, value);
								break;
							default:
								throw new ArgumentException("Invalid operation type: " + op);
						}
					}
				}

				yield return new LogReadResult
					{
						WriteSequence = seq,
						WriteBatch = batch
					};
			}
		}
		internal static Task WriteToLogAsync(WriteBatch[] writes, ulong seq, StorageState state, WriteOptions options)
		{
			return Task.Factory.StartNew(
				() =>
				{
					try
					{
						var opCount = writes.Sum(x => x._operations.Count);

						if (log.IsDebugEnabled) log.Debug("Writing {0} operations in seq {1}", opCount, seq);

						state.LogWriter.RecordStarted();

						var buffer = new byte[12];
						Bit.Set(buffer, 0, seq);
						Bit.Set(buffer, 8, opCount);
						state.LogWriter.Write(buffer, 0, 12);

						foreach (var operation in writes.SelectMany(writeBatch => writeBatch._operations))
						{
							buffer[0] = (byte)operation.Op;
							state.LogWriter.Write(buffer, 0, 1);
							state.LogWriter.Write7BitEncodedInt(operation.Key.Count);
							state.LogWriter.Write(operation.Key.Array, operation.Key.Offset, operation.Key.Count);
							if (operation.Op != Operations.Put) continue;

							Bit.Set(buffer, 0, operation.Handle.Size);
							state.LogWriter.Write(buffer, 0, 4);
							using (var stream = state.MemTable.Read(operation.Handle))
							{
								state.LogWriter.CopyFrom(stream);
							}
						}

						state.LogWriter.RecordCompleted(options.FlushToDisk);

						if (log.IsDebugEnabled) log.Debug("Wrote {0} operations in seq {1} to log.", opCount, seq);
					}
					catch (Exception e)
					{
						state.LogWriter.ResetToLastCompletedRecord();

						throw new LogWriterException(e);
					}
				});
		}
		private async Task AggregateAsync()
		{
			var lastAggregatedEtag = (Etag) _lastAggregatedEtag;
			while (_aggregationEngine.CancellationToken.IsCancellationRequested == false)
			{
				var eventDatas = _aggregationEngine.Events(lastAggregatedEtag).Take(1024)
					.ToArray();
				if (eventDatas.Length == 0)
				{
					await _aggregationEngine.WaitForAppendAsync(_appendEventState);
					continue;
				}
				try
				{
					var items = eventDatas.Select(x => new DynamicJsonObject(x.Data)).ToArray();
					var writeBatch = new WriteBatch();
					var groupedByReduceKey = ExecuteMaps(items);
					ExecuteReduce(groupedByReduceKey, writeBatch);
					lastAggregatedEtag = eventDatas.Last().Etag;
					var status = new RavenJObject {{"@etag", lastAggregatedEtag.ToString()}};
					writeBatch.Put(_aggStat, AggregationEngine.RavenJTokenToStream(status));
					await _aggregationEngine.Storage.Writer.WriteAsync(writeBatch);
				}
				catch (Exception e)
				{
					Log.ErrorException("Could not process aggregation", e);
					throw;
				}
				finally
				{
					Thread.VolatileWrite(ref _lastAggregatedEtag, lastAggregatedEtag);

					_aggregationCompleted.PulseAll();
				}
			}
		}
		private void ExecuteReduce(IEnumerable<IGrouping<dynamic, object>> groupedByReduceKey, WriteBatch writeBatch)
		{
			foreach (var grouping in groupedByReduceKey)
			{
				string reduceKey = grouping.Key;
				Slice key = "results/" + _name + "/" + reduceKey;
				var groupedResults = GetItemsToReduce(reduceKey, key, grouping);

				var robustEnumerator = new RobustEnumerator2(_aggregationEngine.CancellationToken, 50)
					{
						OnError = (exception, o) =>
						          Log.WarnException("Could not process reduce for aggregation " + _name + Environment.NewLine + o,
						                            exception)
					};

				var reduceResults =
					robustEnumerator.RobustEnumeration(groupedResults.GetEnumerator(), _generator.ReduceDefinition).ToArray();

				RavenJToken finalResult;
				switch (reduceResults.Length)
				{
					case 0:
						Log.Warn("FLYING PIGS!!! Could not find any results for a reduce on key {0} for aggregator {1}. Should not happen", reduceKey, _name);
						finalResult = new RavenJObject {{"Error", "Invalid reduce result was generated"}};
						break;
					case 1:
						finalResult = RavenJObject.FromObject(reduceResults[0]);
						break;
					default:
						finalResult = new RavenJArray(reduceResults.Select(RavenJObject.FromObject));
						break;
				}
				finalResult.EnsureCannotBeChangeAndEnableSnapshotting();
				_cache.Set(reduceKey, finalResult);
				writeBatch.Put(key, AggregationEngine.RavenJTokenToStream(finalResult));
			}
		}