internal bool PostChangeNotifications() { bool posted = false; // This is a 'while' instead of an 'if' because when we finish posting notifications, there // might be new ones that have arrived as a result of notification handlers making document // changes of their own (the replicator manager will do this.) So we need to check again. while (!Storage.InTransaction && _isOpen && !_isPostingChangeNotifications && _changesToNotify != null && _changesToNotify.Count > 0) { try { _isPostingChangeNotifications = true; IList<DocumentChange> outgoingChanges = new List<DocumentChange>(); foreach (var change in _changesToNotify) { outgoingChanges.Add(change); } _changesToNotify.Clear(); // TODO: change this to match iOS and call cachedDocumentWithID var isExternal = false; foreach (var change in outgoingChanges) { var document = GetDocument(change.DocumentId); document.RevisionAdded(change, true); if (change.SourceUrl != null) { isExternal = true; } } var args = new DatabaseChangeEventArgs { Changes = outgoingChanges, IsExternal = isExternal, Source = this }; var changeEvent = _changed; if (changeEvent != null) changeEvent(this, args); posted = true; } catch (Exception e) { Log.E(TAG, "Got exception posting change notifications", e); } finally { _isPostingChangeNotifications = false; } } return posted; }
private void OnDatabaseChanged (object sender, DatabaseChangeEventArgs e) { if (_willUpdate) { return; } var updateInterval = _updateInterval * 2; foreach (var change in e.Changes) { if (change.SourceUrl == null) { updateInterval /= 2; break; } } _willUpdate = true; Log.D(TAG, "Will update after {0} sec...", updateInterval); Task.Delay(TimeSpan.FromSeconds(updateInterval)).ContinueWith(t => { if(_willUpdate) { Update(); } }); }
private void OnChanged(Object sender, DatabaseChangeEventArgs args) { var changes = args.Changes; foreach (DocumentChange change in changes) { // Skip revisions that originally came from the database I'm syncing to: var source = change.SourceUrl; if (source != null && source.Equals(RemoteUrl)) { return; } var rev = change.AddedRevision; if (LocalDatabase.RunFilter(_filter, FilterParams, rev)) { AddToInbox(rev); } } }
internal void PostChangeNotifications() { // This is a 'while' instead of an 'if' because when we finish posting notifications, there // might be new ones that have arrived as a result of notification handlers making document // changes of their own (the replicator manager will do this.) So we need to check again. while (transactionLevel == 0 && open && !PostingChangeNotifications && ChangesToNotify.Count > 0) { try { PostingChangeNotifications = true; IList<DocumentChange> outgoingChanges = new AList<DocumentChange>(); foreach (var change in ChangesToNotify) { outgoingChanges.Add(change); } ChangesToNotify.Clear(); Boolean isExternal = false; foreach (var change in outgoingChanges) { Document document = GetDocument(change.DocumentId); document.RevisionAdded(change); if (change.SourceUrl != null) { isExternal = true; } } var args = new DatabaseChangeEventArgs { Changes = outgoingChanges, IsExternal = isExternal, Source = this } ; var changeEvent = Changed; if (changeEvent != null) changeEvent(this, args); } catch (Exception e) { Log.E(Database.Tag, this + " got exception posting change notifications", e); } finally { PostingChangeNotifications = false; } } }
private void OnDatabaseChanged (object sender, DatabaseChangeEventArgs e) { if (_willUpdate) { return; } var updateInterval = _updateInterval * 2; foreach (var change in e.Changes) { if (change.SourceUrl == null) { updateInterval /= 2; break; } } _willUpdate = true; var updateDelay = ((_lastUpdatedAt + TimeSpan.FromSeconds(updateInterval)) - DateTime.Now).TotalSeconds; updateDelay = Math.Max(0, Math.Min(_updateInterval, updateDelay)); Log.To.Query.I(TAG, "{0} will update after {1} sec...", this, updateDelay); Task.Delay(TimeSpan.FromSeconds(updateDelay)).ContinueWith(t => { if(_willUpdate) { Update(); } }); }
private void OnDatabaseChanged (object sender, DatabaseChangeEventArgs e) { Log.D(Tag, "OnDatabaseChanged() called"); Update(); }
internal void OnChanged(Object sender, DatabaseChangeEventArgs args) { var changes = args.Changes; foreach (DocumentChange change in changes) { // Skip revisions that originally came from the database I'm syncing to: var source = change.SourceUrl; if (source != null && source.Equals(RemoteUrl)) { return; } var rev = change.AddedRevision; IDictionary<String, Object> paramsFixMe = null; // TODO: these should not be null if (LocalDatabase.RunFilter(filter, paramsFixMe, rev)) { AddToInbox(rev); } } }
private void OnChanged(Object sender, DatabaseChangeEventArgs args) { var changes = args.Changes; foreach (DocumentChange change in changes) { // Skip expired documents if (change.IsExpiration) { return; } // Skip revisions that originally came from the database I'm syncing to: var source = change.SourceUrl; if (source != null && source.Equals(RemoteUrl)) { return; } var rev = change.AddedRevision; if (LocalDatabase.RunFilter(_filter, FilterParams, rev)) { Log.To.Sync.V(TAG, "{0} queuing {1} {2}", this, LocalDatabase.GetSequence(rev), rev); AddToInbox(rev); } } }
private void PurgeExpired(object state) { if (!_purgeActive) { _purgeActive = true; Log.To.Database.V(TAG, "{0} running purge job NOW...", this); if (Storage == null || !Storage.IsOpen) { Log.To.Database.W(TAG, "{0} storage is null or closed, cannot run purge job, returning early...", this); return; } var results = Storage?.PurgeExpired() ?? new List<string>(); var changedEvt = _changed; if (results.Count > 0) { Log.To.Database.I(TAG, "{0} purged {1} expired documents", this, results.Count); if (changedEvt != null) { var changes = new List<DocumentChange>(); var args = new DatabaseChangeEventArgs(); args.Source = this; foreach (var result in results) { var change = new DocumentChange(new RevisionInternal(result, null, true), null, false, null); change.IsExpiration = true; changes.Add(change); } args.Changes = changes; changedEvt(this, args); } } _purgeActive = false; SchedulePurgeExpired(TimeSpan.FromSeconds(1)); } else { Log.To.Database.I(TAG, "Purge already running, will try again later..."); } }
internal bool PostChangeNotifications() { bool posted = false; // This is a 'while' instead of an 'if' because when we finish posting notifications, there // might be new ones that have arrived as a result of notification handlers making document // changes of their own (the replicator manager will do this.) So we need to check again. while((Storage == null || !Storage.InTransaction) && IsOpen && !_isPostingChangeNotifications && _changesToNotify != null && _changesToNotify.Count > 0) { try { _isPostingChangeNotifications = true; IList<DocumentChange> outgoingChanges = new List<DocumentChange>(); foreach(var change in _changesToNotify) { outgoingChanges.Add(change); } _changesToNotify.Clear(); // TODO: change this to match iOS and call cachedDocumentWithID var isExternal = false; foreach(var change in outgoingChanges) { var document = GetDocument(change.DocumentId); if(document == null) { continue; } document.RevisionAdded(change, true); if(change.SourceUrl != null) { isExternal = true; } } var args = new DatabaseChangeEventArgs { Changes = outgoingChanges, IsExternal = isExternal, Source = this }; Log.To.Database.I(TAG, "{0} posting change notifications: seq {1}", this, new LogJsonString(from change in outgoingChanges select change.AddedRevision.Sequence)); Log.To.TaskScheduling.V(TAG, "Scheduling Change callback..."); Manager.CapturedContext.StartNew(() => { var changeEvent = _changed; if(changeEvent != null) { Log.To.TaskScheduling.V(TAG, "Firing Change callback..."); changeEvent(this, args); } else { Log.To.TaskScheduling.V(TAG, "Change callback is null, not firing..."); } }); posted = true; } catch(Exception e) { Log.To.Database.E(TAG, "Got exception posting change notifications", e); } finally { _isPostingChangeNotifications = false; } } return posted; }