public void TestBatcherCancel()
        {
            var mre       = new ManualResetEventSlim();
            var scheduler = new SingleTaskThreadpoolScheduler();
            var batcher   = new Batcher <int>(new TaskFactory(scheduler), 5, 500, (inbox) =>
            {
                mre.Set();
            });

            batcher.QueueObject(0);
            batcher.Clear();
            Assert.False(mre.Wait(TimeSpan.FromSeconds(1)), "Batcher ran after being cancelled");
        }
        public void TestBatcherAddAfterCancel()
        {
            var evt       = new CountdownEvent(2);
            var scheduler = new SingleTaskThreadpoolScheduler();
            var batcher   = new Batcher <int>(new TaskFactory(scheduler), 5, 500, (inbox) =>
            {
                evt.Signal();
            });

            batcher.QueueObject(0);
            batcher.Clear();
            batcher.QueueObject(0);
            Assert.False(evt.Wait(TimeSpan.FromSeconds(1)), "Batcher ran too many times");
            Assert.True(evt.CurrentCount == 1, "Batcher never ran");
        }
Example #3
0
        /// <summary>
        /// Stops the <see cref="Couchbase.Lite.Replication"/>.
        /// </summary>
        public virtual void Stop()
        {
            if (!IsRunning)
            {
                return;
            }

            Log.V(Database.Tag, ToString() + " STOPPING...");
            Batcher.Clear();
            // no sense processing any pending changes
            continuous = false;
            StopRemoteRequests();
            CancelPendingRetryIfReady();
            LocalDatabase.ForgetReplication(this);

            if (IsRunning && asyncTaskCount == 0)
            {
                Stopped();
            }
        }
Example #4
0
        public void TestBatcherCancel()
        {
            var mre       = new ManualResetEventSlim();
            var scheduler = new SingleTaskThreadpoolScheduler();

            var batcher = new Batcher <int>(new BatcherOptions <int> {
                WorkExecutor = new TaskFactory(scheduler),
                Capacity     = 5,
                Delay        = TimeSpan.FromMilliseconds(500),
                Processor    = (inbox) => mre.Set()
            });

            batcher.QueueObject(0);
            mre.Wait(1000).Should().BeTrue("because otherwise the batcher didn't initially run");
            mre.Reset();

            batcher.QueueObject(0);
            batcher.Clear();
            mre.Wait(TimeSpan.FromSeconds(1)).Should().BeFalse("because otherwise the batcher ran after being cancelled");
        }
Example #5
0
        public void TestBatcherAddAfterCancel()
        {
            var evt       = new CountdownEvent(1);
            var scheduler = new SingleTaskThreadpoolScheduler();
            var batcher   = new Batcher <int>(new BatcherOptions <int> {
                WorkExecutor = new TaskFactory(scheduler),
                Capacity     = 5,
                Delay        = TimeSpan.FromMilliseconds(500),
                Processor    = (inbox) => evt.Signal()
            });

            batcher.QueueObject(0);
            evt.Wait(1000).Should().BeTrue("because otherwise the batcher didn't initially run");
            evt.Reset(2);

            batcher.QueueObject(0);
            batcher.Clear();
            batcher.QueueObject(0);
            evt.Wait(TimeSpan.FromSeconds(1.5)).Should().BeFalse("because otherwise the batcher ran too many times");
            evt.CurrentCount.Should().Be(1, "because otherwise the batcher never ran");
        }
        public void TestBatcherAddAfterCancel()
        {
            var evt = new CountdownEvent(1);
            var scheduler = new SingleTaskThreadpoolScheduler();
            var batcher = new Batcher<int>(new TaskFactory(scheduler), 5, 500, (inbox) =>
            {
                evt.Signal();
            });

            batcher.QueueObject(0);
            Assert.IsTrue(evt.Wait(1000), "Batcher didn't initially run");
            evt.Reset(2);

            batcher.QueueObject(0);
            batcher.Clear();
            batcher.QueueObject(0);
            Assert.False(evt.Wait(TimeSpan.FromSeconds(1.5)), "Batcher ran too many times");
            Assert.True(evt.CurrentCount == 1, "Batcher never ran");
        }
        public void TestBatcherCancel()
        {
            var mre = new ManualResetEventSlim();
            var scheduler = new SingleTaskThreadpoolScheduler();
            var batcher = new Batcher<int>(new TaskFactory(scheduler), 5, 500, (inbox) =>
            {
                mre.Set();
            });

            batcher.QueueObject(0);
            Assert.IsTrue(mre.Wait(1000), "Batcher didn't initially run");
            mre.Reset();

            batcher.QueueObject(0);
            batcher.Clear();
            Assert.False(mre.Wait(TimeSpan.FromSeconds(1)), "Batcher ran after being cancelled");
        }
Example #8
0
        /// <summary>
        /// Adds the nodes debugging.
        /// </summary>
        public void RecalculateDebug()
        {
            DisposeDebugNativeDatastructures();
            int numNodes = gridDepth * gridWidth;

            // prepare the job that calculates the vertices for the neighbor connection lines
            int arrayLength = numNodes * NodeNeighbors * 2;

            connectionsMeshVertices = new NativeArray <Vector3>(arrayLength, Allocator.Persistent);
            connectionsMeshIndices  = new NativeArray <int>(arrayLength, Allocator.Persistent);

            CalculateConnectionMeshJob calcConnectionsMeshJob = new CalculateConnectionMeshJob(NodeNeighbors, nodesTransforms, nodesNeighbors, connectionsMeshVertices, connectionsMeshIndices);
            JobHandle calcConnectionMeshHandle = calcConnectionsMeshJob.Schedule(numNodes, 8);

            // do other required stuff before calling complete so we have actual parallelism
            MeshRenderer mr = Utils.GetOrAddComponent <MeshRenderer>(transform, out bool createdRenderer);

            mr.shadowCastingMode    = ShadowCastingMode.Off;
            mr.sharedMaterial       = nodeConnectionsMaterial;
            mr.lightProbeUsage      = LightProbeUsage.Off;
            mr.reflectionProbeUsage = ReflectionProbeUsage.Off;
            mr.enabled = showNodesConnections;

            MeshFilter filter = Utils.GetOrAddComponent <MeshFilter>(transform, out bool createdFilter);

            filter.sharedMesh = connectionsMesh;

            // the nodes themselves
            nodeBatcher.Clear();

            if (showNodes)
            {
                for (int i = 0; i < numNodes; i++)
                {
                    NodeTransform nt       = nodesTransforms[i];
                    NodeType      nodeType = nodesTypes[i];

                    Color32 c;

                    if (nodeType == NodeType.Invalid)
                    {
                        c = invalidNodeColor;
                    }
                    else if (nodeType == NodeType.OccupiedByObstacle)
                    {
                        c = nonWalkableNodeColor;
                    }
                    else
                    {
                        c = walkableNodeColor;
                    }

                    Vector3   pos = nt.Pos + (nt.Up * NodeVisualNormalOffset);
                    Matrix4x4 trs = Matrix4x4.TRS(pos, nt.GetRotation(), Vector3.one);

                    // batch each node quad debug
                    nodeBatcher.AddItem(c, trs);
                }
            }

            calcConnectionMeshHandle.Complete();

            // set the mesh using the results of the job
            connectionsMesh.SetVertices(calcConnectionsMeshJob.vertices);
            connectionsMesh.SetIndices(calcConnectionsMeshJob.indices, MeshTopology.Lines, 0);
        }
        internal override void BeginReplicating()
        {
            // If we're still waiting to create the remote db, do nothing now. (This method will be
            // re-invoked after that request finishes; see maybeCreateRemoteDB() above.)
            if (_creatingTarget)
            {
                Log.To.Sync.D(TAG, "creatingTarget == true, doing nothing");
                return;
            }

            _pendingSequences = new SortedDictionary <long, int>();
            if (!Int64.TryParse(LastSequence, out _maxPendingSequence))
            {
                Log.To.Sync.W(TAG, "{0} is not a valid last sequence, using 0", LastSequence);
                _maxPendingSequence = 0;
            }

            if (Filter != null)
            {
                _filter = LocalDatabase.GetFilter(Filter);
            }
            else
            {
                // If not filter function was provided, but DocIds were
                // specified, then only push the documents listed in the
                // DocIds property. It is assumed that if the users
                // specified both a filter name and doc ids that their
                // custom filter function will handle that. This is
                // consistent with the iOS behavior.
                if (DocIds != null && DocIds.Any())
                {
                    _filter = (rev, filterParams) => DocIds.Contains(rev.Document.Id);
                }
            }

            if (Filter != null && _filter == null)
            {
                Log.To.Sync.W(TAG, "{0}: No ReplicationFilter registered for filter '{1}'; ignoring", this, Filter);
            }

            // Process existing changes since the last push:
            long lastSequenceLong = 0;

            if (LastSequence != null)
            {
                lastSequenceLong = long.Parse(LastSequence);
            }

            if (ReplicationOptions.PurgePushed)
            {
                _purgeQueue = new Batcher <RevisionInternal>(new BatcherOptions <RevisionInternal> {
                    WorkExecutor = WorkExecutor,
                    Capacity     = EphemeralPurgeBatchSize,
                    Delay        = EphemeralPurgeDelay,
                    Processor    = PurgeRevs,
                    TokenSource  = CancellationTokenSource
                });
            }

            // Now listen for future changes (in continuous mode):
            // Note:  This needs to happen before adding the observer
            // or else there is a race condition.
            // A document could be added between the call to
            // ChangesSince and adding the observer, which would result
            // in a document being skipped
            if (Continuous)
            {
                _observing             = true;
                LocalDatabase.Changed += OnChanged;
            }

            var options = ChangesOptions.Default;

            options.IncludeConflicts = true;
            var  changes  = LocalDatabase.ChangesSinceStreaming(lastSequenceLong, options, _filter, FilterParams);
            bool hasItems = changes.Any();

            foreach (var change in changes)
            {
                Batcher.QueueObject(change);
                if (Status == ReplicationStatus.Stopped)
                {
                    Batcher.Clear();
                    return;
                }
            }

            if (hasItems)
            {
                Batcher.Flush();
            }

            if (Continuous)
            {
                if (!hasItems)
                {
                    Log.To.Sync.V(TAG, "No changes to push, switching to idle...");
                    FireTrigger(ReplicationTrigger.WaitingForChanges);
                }
            }
            else
            {
                if (!hasItems)
                {
                    Log.To.Sync.V(TAG, "No changes to push, firing StopGraceful...");
                    FireTrigger(ReplicationTrigger.StopGraceful);
                }
            }
        }