/// <summary> Retrieves a list of all events written to a stream so far. </summary> public static async Task <IReadOnlyList <KeyValuePair <uint, TEvent> > > GetEvents <TEvent>( EventStream <TEvent> stream, CancellationToken cancel = default(CancellationToken)) where TEvent : class { // Copy the stream to have it reset to position 0 stream = new EventStream <TEvent>(stream.Storage); var list = new List <KeyValuePair <uint, TEvent> >(); Func <bool> finishFetch; do { var fetchTask = stream.BackgroundFetchAsync(cancel); TEvent nextEvent; while ((nextEvent = stream.TryGetNext()) != null) { list.Add(new KeyValuePair <uint, TEvent>(stream.Sequence, nextEvent)); } finishFetch = await fetchTask; } while (finishFetch()); return(list); }
public async Task discard() { var driver = new MemoryStorageDriver(); var stream = new EventStream <IStreamEvent>(driver); await stream.WriteAsync(Enumerable.Range(0, 20) .Select(i => (IStreamEvent) new IntegerEvent(i)).ToArray()); stream = new EventStream <IStreamEvent>(driver); await stream.DiscardUpTo(11); Func <bool> shouldContinue; var next = 10; do { var task = stream.BackgroundFetchAsync(); IStreamEvent e; while ((e = stream.TryGetNext()) != null) { Assert.Equal(next, ((IntegerEvent)e).Integer); ++next; } shouldContinue = await task; } while (shouldContinue()); Assert.Equal(next, 20); }
/// <summary> /// Catch up with the stream (updating the state) until there are no new /// events available. /// </summary> public async Task CatchUpAsync(CancellationToken cancel = default) { Func <bool> finishFetch; // Local variable, to avoid reaching the limit when not doing the // initial catch-up. var eventsSinceLastCacheLoad = 0u; do { var fetchTask = Stream.BackgroundFetchAsync(cancel); // We have started fetching the next batch of events in // the background, so we might as well start processing // those we already have. This pattern is optimized for // when fetching events takes longer than processing them, // and remains safe (i.e. no runaway memory usage) when // the reverse is true. eventsSinceLastCacheLoad += CatchUpLocal(); // Maybe we have reached the event count limit before our // save/load cycle ? if (eventsSinceLastCacheLoad >= EventsBetweenCacheSaves) { eventsSinceLastCacheLoad = 0; var sw = Stopwatch.StartNew(); if (await _projection.TrySaveAsync(cancel)) { // Reset first, to release any used memory. _projection.Reset(); await _projection.TryLoadAsync(cancel); if (_projection.Sequence != Stream.Sequence) { throw new InvalidOperationException( "Projection Save/Load cycle failed to restore sequence."); } _log.Info($"[ES read] cache save/load cycle in {sw.Elapsed} at seq {_projection.Sequence}."); } } finishFetch = await fetchTask; } while (finishFetch()); // We reach this point if 1° all events cached in the stream have // been processed and 2° the fetch operation returned no new events NotifyRefresh(); }
/// <summary> /// Catch up with the stream (updating the state) until there are no new /// events available. /// </summary> public async Task CatchUpAsync(CancellationToken cancel = default(CancellationToken)) { Func <bool> finishFetch; do { var fetchTask = Stream.BackgroundFetchAsync(cancel); // We have started fetching the next batch of events in // the background, so we might as well start processing // those we already have. This pattern is optimized for // when fetching events takes longer than processing them, // and remains safe (i.e. no runaway memory usage) when // the reverse is true. CatchUpLocal(); finishFetch = await fetchTask; } while (finishFetch()); // We reach this point if 1° all events cached in the stream have // been processed and 2° the fetch operation returned no new events SyncStep++; }
/// <summary> Fetches a stream's events and places it in <see cref="Fetched"/>. </summary> private static void CmdFetch(string[] args) { if (args.Length < 2) { Console.WriteLine("Usage: fetch <name> <stream> <limit>?"); Console.WriteLine("Downloads events from remote stream, stores in-memory."); Console.WriteLine(" limit: if provided, only fetch events after this seq (included)."); return; } var name = args[0]; if (Fetched.ContainsKey(name)) { Console.WriteLine("Fetched stream `{0}` already exists.", name); Console.WriteLine("Use `drop {0}` to drop it.", name); return; } var limit = 0u; if (args.Length == 3) { if (!uint.TryParse(args[2], out limit)) { Console.WriteLine("Could not parse limit: {0}", args[2]); return; } } var sw = Stopwatch.StartNew(); var connection = new StorageConfiguration(Parse(args[1])) { ReadOnly = true }; connection.Trace = true; var driver = connection.Connect(); using (Release(driver)) { var stream = new EventStream <JObject>(driver); var list = new List <EventData>(); var start = 0L; Status("Connecting..."); Task.Run((Func <Task>)(async() => { Status("Current size:"); var maxPos = await driver.GetPositionAsync(); Console.WriteLine("Current size: {0:F2} MB", maxPos / (1024.0 * 1024.0)); Status("Current seq:"); var maxSeq = await driver.GetLastKeyAsync(); Console.WriteLine("Current seq: {0}", maxSeq); var asStatsDriver = driver as StatsDriverWrapper; var asReadOnlyDriver = (asStatsDriver?.Inner ?? driver) as ReadOnlyDriverWrapper; var asAzure = (asReadOnlyDriver?.Wrapped ?? asStatsDriver?.Inner ?? driver) as AzureStorageDriver; if (asAzure != null) { for (var i = 0; i < asAzure.Blobs.Count; ++i) { Console.WriteLine("Blob {0}: {1:F2} MB from seq {2}", asAzure.Blobs[i].Name, asAzure.Blobs[i].Properties.Length / (1024.0 * 1024.0), i < asAzure.FirstKey.Count ? asAzure.FirstKey[i] : maxSeq); } } if (limit > 0) { Status($"Moving to seq {limit} .."); await stream.DiscardUpTo(limit); start = stream.Position; } Func <bool> more; do { var fetch = stream.BackgroundFetchAsync(); JObject obj; while ((obj = stream.TryGetNext()) != null) { list.Add(new EventData(stream.Sequence, obj)); } Status("Fetching: {0}/{1} ({2:F2}/{3:F2} MB)", stream.Sequence, maxSeq, (stream.Position - start) / (1024.0 * 1024.0), (maxPos - start) / (1024.0 * 1024.0)); more = await fetch; } while (more()); })).Wait(); Console.WriteLine("Fetched `{0}` ({1} events, {2:F2} MB) in {3:F2}s.", name, list.Count, (stream.Position - start) / (1024.0 * 1024.0), sw.ElapsedMilliseconds / 1000.0); Fetched.Add(name, list); Peek(list); if (Current == null) { Current = list; } } }
/// <summary> Copies from source to destination, returns last position in source. </summary> private static long BackupCopy(string srcname, string dstname, uint maxseq) { var srcDriver = new StorageConfiguration(Parse(srcname)) { ReadOnly = true }.Connect(); using (Release(srcDriver)) { var dstDriver = new StorageConfiguration(Parse(dstname)).Connect(); using (Release(dstDriver)) { var src = new EventStream <JObject>(srcDriver); var dst = new MigrationStream <JObject>(dstDriver); var sw = Stopwatch.StartNew(); var events = 0; var initial = 0L; Status("Connecting..."); Task.Run((Func <Task>)(async() => { Status("Current source size: ..."); var maxPos = await srcDriver.GetPositionAsync(); Console.WriteLine("Current source size: {0:F2} MB", maxPos / (1024.0 * 1024.0)); Status("Current source seq: ..."); var maxSeq = await srcDriver.GetLastKeyAsync(); Console.WriteLine("Current source seq: {0}", maxSeq); Status("Current destination seq: ..."); var bakSeq = await dstDriver.GetLastKeyAsync(); Console.WriteLine("Current destination seq: {0}", bakSeq); if (bakSeq >= maxSeq) { Console.WriteLine("Destination already up-to-date."); return; } var asAzure = ((ReadOnlyDriverWrapper)srcDriver).Wrapped as AzureStorageDriver; if (asAzure != null) { for (var i = 0; i < asAzure.Blobs.Count; ++i) { Console.WriteLine("Blob {0}: {1:F2} MB from seq {2}", asAzure.Blobs[i].Name, asAzure.Blobs[i].Properties.Length / (1024.0 * 1024.0), i < asAzure.FirstKey.Count ? asAzure.FirstKey[i] : maxSeq); } } if (bakSeq > 0) { Status("Skipping to seq {0}...", bakSeq); await src.DiscardUpTo(bakSeq + 1); Console.WriteLine("Skipping to seq {0} done !", bakSeq); } initial = src.Position; Func <bool> more; do { var fetch = src.BackgroundFetchAsync(); var list = new List <KeyValuePair <uint, JObject> >(); JObject obj; while ((obj = src.TryGetNext()) != null) { list.Add(new KeyValuePair <uint, JObject>(src.Sequence, obj)); } events += list.Count; while (list.Count > 0 && list[list.Count - 1].Key > maxseq) { list.RemoveAt(list.Count - 1); } await dst.WriteAsync(list); Status("{0}/{1} ({2:F2}/{3:F2} MB)", src.Sequence, maxSeq, src.Position / (1024.0 * 1024.0), maxPos / (1024.0 * 1024.0)); more = await fetch; } while (more()); })).Wait(); Console.WriteLine("{0} events ({1:F2} MB) in {2:F2}s.", events, (src.Position - initial) / (1024.0 * 1024.0), sw.ElapsedMilliseconds / 1000.0); return(src.Position); } } }