private async Task ReadAndUpdateSystemConfig()
		{
			Slice configKey = "system/config";
			SystemConfig systemConfig;
			using (var stream = _storage.Reader.Read(configKey))
			{
				systemConfig = stream != null
								   ? new JsonSerializer().Deserialize<SystemConfig>(new JsonTextReader(new StreamReader(stream)))
								   : new SystemConfig();
				systemConfig.EtagBase++;
				_sequentialUuidGenerator.EtagBase = systemConfig.EtagBase;
			}

			var writeBatch = new WriteBatch();
			writeBatch.Put(configKey, SmallObjectToMemoryStream(systemConfig));
			await _storage.Writer.WriteAsync(writeBatch);
		}
		public async Task<Etag> AppendAsync(string topic, IEnumerable<RavenJObject> items)
		{
			if (items == null)
				return null;

			var batch = new WriteBatch();

			Etag etag = null;
			foreach (var item in items)
			{
				RavenJToken metadata;
				if (item.TryGetValue("@metadata", out metadata) == false)
					item["@metadata"] = metadata = new RavenJObject();
				((RavenJObject)metadata)["Raven-Entity-Name"] = topic;
				etag = GetNextEtag();
				Slice key = "events/" + etag;
				batch.Put(key, RavenJTokenToStream(item));
			}

			await _storage.Writer.WriteAsync(batch);
			_appendEvent.PulseAll();
			return etag;
		}
		public async Task CreateAggregationAsync(IndexDefinition indexDefinition)
		{
			var dynamicViewCompiler = new DynamicViewCompiler(indexDefinition.Name, indexDefinition, Path.Combine(_path, "Generators"));
			var generator = dynamicViewCompiler.GenerateInstance();

			var writeBatch = new WriteBatch();
			var memoryStream = SmallObjectToMemoryStream(indexDefinition);
			writeBatch.Put("aggregators/" + indexDefinition.Name, memoryStream);
			await _storage.Writer.WriteAsync(writeBatch);

			var aggregator = new Aggregator(this, indexDefinition.Name, generator);
			_aggregations.AddOrUpdate(indexDefinition.Name, aggregator, (s, viewGenerator) => aggregator);
			Background.Work(aggregator.StartAggregation);
		}
		internal static IEnumerable<LogReadResult> ReadFromLog(Stream logFile, BufferPool bufferPool)
		{
			var logReader = new LogReader(logFile, true, 0, bufferPool);
			Stream logRecordStream;

			while (logReader.TryReadRecord(out logRecordStream))
			{
				var batch = new WriteBatch();
				ulong seq;
				using (logRecordStream)
				{
					var buffer = new byte[8];
					logRecordStream.ReadExactly(buffer, 8);
					seq = BitConverter.ToUInt64(buffer, 0);
					logRecordStream.ReadExactly(buffer, 4);
					var opCount = BitConverter.ToInt32(buffer, 0);

					for (var i = 0; i < opCount; i++)
					{
						logRecordStream.ReadExactly(buffer, 1);
						var op = (Operations)buffer[0];
						var keyCount = logRecordStream.Read7BitEncodedInt();
						var array = new byte[keyCount];
						logRecordStream.ReadExactly(array, keyCount);

						var key = new Slice(array);

						switch (op)
						{
							case Operations.Delete:
								batch.Delete(key);
								break;
							case Operations.Put:
								logRecordStream.ReadExactly(buffer, 4);
								var size = BitConverter.ToInt64(buffer, 0);
								var value = new MemoryStream();
								logRecordStream.CopyTo(value, size, LogWriter.BlockSize);
								batch.Put(key, value);
								break;
							default:
								throw new ArgumentException("Invalid operation type: " + op);
						}
					}
				}

				yield return new LogReadResult
					{
						WriteSequence = seq,
						WriteBatch = batch
					};
			}
		}
		private async Task AggregateAsync()
		{
			var lastAggregatedEtag = (Etag) _lastAggregatedEtag;
			while (_aggregationEngine.CancellationToken.IsCancellationRequested == false)
			{
				var eventDatas = _aggregationEngine.Events(lastAggregatedEtag).Take(1024)
					.ToArray();
				if (eventDatas.Length == 0)
				{
					await _aggregationEngine.WaitForAppendAsync(_appendEventState);
					continue;
				}
				try
				{
					var items = eventDatas.Select(x => new DynamicJsonObject(x.Data)).ToArray();
					var writeBatch = new WriteBatch();
					var groupedByReduceKey = ExecuteMaps(items);
					ExecuteReduce(groupedByReduceKey, writeBatch);
					lastAggregatedEtag = eventDatas.Last().Etag;
					var status = new RavenJObject {{"@etag", lastAggregatedEtag.ToString()}};
					writeBatch.Put(_aggStat, AggregationEngine.RavenJTokenToStream(status));
					await _aggregationEngine.Storage.Writer.WriteAsync(writeBatch);
				}
				catch (Exception e)
				{
					Log.ErrorException("Could not process aggregation", e);
					throw;
				}
				finally
				{
					Thread.VolatileWrite(ref _lastAggregatedEtag, lastAggregatedEtag);

					_aggregationCompleted.PulseAll();
				}
			}
		}
		private void ExecuteReduce(IEnumerable<IGrouping<dynamic, object>> groupedByReduceKey, WriteBatch writeBatch)
		{
			foreach (var grouping in groupedByReduceKey)
			{
				string reduceKey = grouping.Key;
				Slice key = "results/" + _name + "/" + reduceKey;
				var groupedResults = GetItemsToReduce(reduceKey, key, grouping);

				var robustEnumerator = new RobustEnumerator2(_aggregationEngine.CancellationToken, 50)
					{
						OnError = (exception, o) =>
						          Log.WarnException("Could not process reduce for aggregation " + _name + Environment.NewLine + o,
						                            exception)
					};

				var reduceResults =
					robustEnumerator.RobustEnumeration(groupedResults.GetEnumerator(), _generator.ReduceDefinition).ToArray();

				RavenJToken finalResult;
				switch (reduceResults.Length)
				{
					case 0:
						Log.Warn("FLYING PIGS!!! Could not find any results for a reduce on key {0} for aggregator {1}. Should not happen", reduceKey, _name);
						finalResult = new RavenJObject {{"Error", "Invalid reduce result was generated"}};
						break;
					case 1:
						finalResult = RavenJObject.FromObject(reduceResults[0]);
						break;
					default:
						finalResult = new RavenJArray(reduceResults.Select(RavenJObject.FromObject));
						break;
				}
				finalResult.EnsureCannotBeChangeAndEnableSnapshotting();
				_cache.Set(reduceKey, finalResult);
				writeBatch.Put(key, AggregationEngine.RavenJTokenToStream(finalResult));
			}
		}