public void TryGcDeleteSet(StructStore store, Predicate <Item> gcFilter) { foreach (var kvp in Clients) { var client = kvp.Key; var deleteItems = kvp.Value; var structs = store.Clients[client]; for (int di = deleteItems.Count - 1; di >= 0; di--) { var deleteItem = deleteItems[di]; var endDeleteItemClock = deleteItem.Clock + deleteItem.Length; for (int si = StructStore.FindIndexSS(structs, deleteItem.Clock); si < structs.Count; si++) { var str = structs[si]; if (str.Id.Clock >= endDeleteItemClock) { break; } if (str is Item strItem && strItem.Deleted && !strItem.Keep && gcFilter(strItem)) { strItem.Gc(store, parentGCd: false); } } } } }
public YDoc RestoreDocument(YDoc originDoc, YDocOptions opts = null) { if (originDoc.Gc) { // We should try to restore a GC-ed document, because some of the restored items might have their content deleted. throw new Exception("originDoc must not be garbage collected"); } using var encoder = new UpdateEncoderV2(); originDoc.Transact(tr => { int size = StateVector.Count(kvp => kvp.Value /* clock */ > 0); encoder.RestWriter.WriteVarUint((uint)size); // Splitting the structs before writing them to the encoder. foreach (var kvp in StateVector) { int client = kvp.Key; int clock = kvp.Value; if (clock == 0) { continue; } if (clock < originDoc.Store.GetState(client)) { tr.Doc.Store.GetItemCleanStart(tr, new ID(client, clock)); } var structs = originDoc.Store.Clients[client]; var lastStructIndex = StructStore.FindIndexSS(structs, clock - 1); // Write # encoded structs. encoder.RestWriter.WriteVarUint((uint)(lastStructIndex + 1)); encoder.WriteClient(client); // First clock written is 0. encoder.RestWriter.WriteVarUint(0); for (int i = 0; i <= lastStructIndex; i++) { structs[i].Write(encoder, 0); } } DeleteSet.Write(encoder); }); var newDoc = new YDoc(opts ?? originDoc.CloneOptionsWithNewGuid()); newDoc.ApplyUpdateV2(encoder.ToArray(), transactionOrigin: "snapshot"); return(newDoc); }
/// <param name="structs">All structs by 'client'.</param> /// <param name="clock">Write structs starting with 'ID(client,clock)'.</param> public static void WriteStructs(IUpdateEncoder encoder, IList <AbstractStruct> structs, int client, int clock) { // Write first id. int startNewStructs = StructStore.FindIndexSS(structs, clock); // Write # encoded structs. encoder.RestWriter.WriteVarUint((uint)(structs.Count - startNewStructs)); encoder.WriteClient(client); encoder.RestWriter.WriteVarUint((uint)clock); // Write first struct with offset. var firstStruct = structs[startNewStructs]; firstStruct.Write(encoder, clock - firstStruct.Id.Clock); for (int i = startNewStructs + 1; i < structs.Count; i++) { structs[i].Write(encoder, 0); } }
public void TryMergeDeleteSet(StructStore store) { // Try to merge deleted / gc'd items. // Merge from right to left for better efficiency and so we don't miss any merge targets. foreach (var kvp in Clients) { var client = kvp.Key; var deleteItems = kvp.Value; var structs = store.Clients[client]; for (int di = deleteItems.Count - 1; di >= 0; di--) { var deleteItem = deleteItems[di]; // Start with merging the item next to the last deleted item. var mostRightIndexToCheck = Math.Min(structs.Count - 1, 1 + StructStore.FindIndexSS(structs, deleteItem.Clock + deleteItem.Length - 1)); for (int si = mostRightIndexToCheck; si > 0 && structs[si].Id.Clock >= deleteItem.Clock; si--) { TryToMergeWithLeft(structs, si); } } } }
public void ReadAndApplyDeleteSet(IDSDecoder decoder, Transaction transaction) { var unappliedDs = new DeleteSet(); var numClients = decoder.Reader.ReadVarUint(); for (int i = 0; i < numClients; i++) { decoder.ResetDsCurVal(); var client = (int)decoder.Reader.ReadVarUint(); var numberOfDeletes = decoder.Reader.ReadVarUint(); if (!Clients.TryGetValue(client, out var structs)) { structs = new List <AbstractStruct>(); // NOTE: Clients map is not updated. } var state = GetState(client); for (int deleteIndex = 0; deleteIndex < numberOfDeletes; deleteIndex++) { var clock = decoder.ReadDsClock(); var clockEnd = clock + decoder.ReadDsLength(); if (clock < state) { if (state < clockEnd) { unappliedDs.Add(client, state, clockEnd - state); } var index = StructStore.FindIndexSS(structs, clock); // We can ignore the case of GC and Delete structs, because we are going to skip them. var str = structs[index]; // Split the first item if necessary. if (!str.Deleted && str.Id.Clock < clock) { var splitItem = (str as Item).SplitItem(transaction, clock - str.Id.Clock); structs.Insert(index + 1, splitItem); // Increase, we now want to use the next struct. index++; } while (index < structs.Count) { str = structs[index++]; if (str.Id.Clock < clockEnd) { if (!str.Deleted) { if (clockEnd < str.Id.Clock + str.Length) { var splitItem = (str as Item).SplitItem(transaction, clockEnd - str.Id.Clock); structs.Insert(index, splitItem); } str.Delete(transaction); } } else { break; } } } else { unappliedDs.Add(client, clock, clockEnd - clock); } } } if (unappliedDs.Clients.Count > 0) { // @TODO: No need for encoding+decoding ds anymore. using var unappliedDsEncoder = new DSEncoderV2(); unappliedDs.Write(unappliedDsEncoder); _pendingDeleteReaders.Add(new DSDecoderV2(new MemoryStream(unappliedDsEncoder.ToArray()))); } }
internal static void CleanupTransactions(IList <Transaction> transactionCleanups, int i) { if (i < transactionCleanups.Count) { var transaction = transactionCleanups[i]; var doc = transaction.Doc; var store = doc.Store; var ds = transaction.DeleteSet; var mergeStructs = transaction._mergeStructs; var actions = new List <Action>(); try { ds.SortAndMergeDeleteSet(); transaction.AfterState = store.GetStateVector(); doc._transaction = null; actions.Add(() => { doc.InvokeOnBeforeObserverCalls(transaction); }); actions.Add(() => { foreach (var kvp in transaction.Changed) { var itemType = kvp.Key; var subs = kvp.Value; if (itemType._item == null || !itemType._item.Deleted) { itemType.CallObserver(transaction, subs); } } }); actions.Add(() => { // Deep observe events. foreach (var kvp in transaction.ChangedParentTypes) { var type = kvp.Key; var events = kvp.Value; // We need to think about the possibility that the user transforms the YDoc in the event. if (type._item == null || !type._item.Deleted) { foreach (var evt in events) { if (evt.Target._item == null || !evt.Target._item.Deleted) { evt.CurrentTarget = type; } } // Sort events by path length so that top-level events are fired first. var sortedEvents = events.ToList(); sortedEvents.Sort((a, b) => a.Path.Count - b.Path.Count); Debug.Assert(sortedEvents.Count > 0); actions.Add(() => { type.CallDeepEventHandlerListeners(sortedEvents, transaction); }); } } }); actions.Add(() => { doc.InvokeOnAfterTransaction(transaction); }); CallAll(actions); } finally { // Replace deleted items with ItemDeleted / GC. // This is where content is actually removed from the Yjs Doc. if (doc.Gc) { ds.TryGcDeleteSet(store, doc.GcFilter); } ds.TryMergeDeleteSet(store); // On all affected store.clients props, try to merge. foreach (var kvp in transaction.AfterState) { var client = kvp.Key; var clock = kvp.Value; if (!transaction.BeforeState.TryGetValue(client, out int beforeClock)) { beforeClock = 0; } if (beforeClock != clock) { var structs = store.Clients[client]; var firstChangePos = Math.Max(StructStore.FindIndexSS(structs, beforeClock), 1); for (int j = structs.Count - 1; j >= firstChangePos; j--) { DeleteSet.TryToMergeWithLeft(structs, j); } } } // Try to merge mergeStructs. // TODO: It makes more sense to transform mergeStructs to a DS, sort it, and merge from right to left // but at the moment DS does not handle duplicates. for (int j = 0; j < mergeStructs.Count; j++) { var client = mergeStructs[j].Id.Client; var clock = mergeStructs[j].Id.Clock; var structs = store.Clients[client]; var replacedStructPos = StructStore.FindIndexSS(structs, clock); if (replacedStructPos + 1 < structs.Count) { DeleteSet.TryToMergeWithLeft(structs, replacedStructPos + 1); } if (replacedStructPos > 0) { DeleteSet.TryToMergeWithLeft(structs, replacedStructPos); } } if (!transaction.Local) { if (!transaction.AfterState.TryGetValue(doc.ClientId, out int afterClock)) { afterClock = -1; } if (!transaction.BeforeState.TryGetValue(doc.ClientId, out int beforeClock)) { beforeClock = -1; } if (afterClock != beforeClock) { doc.ClientId = YDoc.GenerateNewClientId(); // Debug.WriteLine($"{nameof(Transaction)}: Changed the client-id because another client seems to be using it."); } } // @todo: Merge all the transactions into one and provide send the data as a single update message. doc.InvokeOnAfterTransactionCleanup(transaction); doc.InvokeUpdateV2(transaction); foreach (var subDoc in transaction.SubdocsAdded) { doc.Subdocs.Add(subDoc); } foreach (var subDoc in transaction.SubdocsRemoved) { doc.Subdocs.Remove(subDoc); } doc.InvokeSubdocsChanged(transaction.SubdocsLoaded, transaction.SubdocsAdded, transaction.SubdocsRemoved); foreach (var subDoc in transaction.SubdocsRemoved) { subDoc.Destroy(); } if (transactionCleanups.Count <= i + 1) { doc._transactionCleanups.Clear(); doc.InvokeAfterAllTransactions(transactionCleanups); } else { CleanupTransactions(transactionCleanups, i + 1); } } } }