예제 #1
0
        public async Task<StreamSequenceToken> DeliverBatch(GuidId subscriptionId, Immutable<IBatchContainer> batch, StreamSequenceToken prevToken)
        {
            foreach (var each in batch.Value.GetEvents<object>())
                await handler(each.Item1);

            return null;
        }
 public IQueueCacheCursor GetCacheCursor(Guid streamGuid, string streamNamespace, StreamSequenceToken token)
 {
     // NOTE: We assume the client ALWAYS wants to replay the whole dictionary, if it doesn't then it shouldn't be using this stream type in the first place.
     if (token != null && !(token is DictStreamToken))
     {
         // Null token can come from a stream subscriber that is just interested to start consuming from latest (the most recent event added to the cache).
         throw new ArgumentOutOfRangeException("token", "token must be of type DictStreamToken");
     }
     var dict = GetDict(streamNamespace, streamGuid);
     var dictCursor = dict.GetCursor();
     return new DictQueueCacheCursor(dict, streamNamespace, streamGuid);
 }
예제 #3
0
 public override int CompareTo(StreamSequenceToken other)
 {
     if (other == null)
         return 1;
     
     var token = other as EventSequenceToken;
     if (token == null)
         throw new ArgumentOutOfRangeException("other");
     
     int difference = sequenceNumber.CompareTo(token.sequenceNumber);
     return difference != 0 ? difference : eventIndex.CompareTo(token.eventIndex);
 }
        public override int CompareTo(StreamSequenceToken other)
        {
            if (other == null)
                return 1;

            var token = other as TimeSequenceToken;
            if (token == null)
                throw new ArgumentOutOfRangeException(nameof(other));

            var difference = Timestamp.CompareTo(token.Timestamp);
            return difference != 0 ? difference : EventIndex.CompareTo(token.EventIndex);
        }
예제 #5
0
 public StreamPosition(IStreamIdentity streamIdentity, StreamSequenceToken sequenceToken)
 {
     if (streamIdentity == null)
     {
         throw new ArgumentNullException("streamIdentity");
     }
     if (sequenceToken == null)
     {
         throw new ArgumentNullException("sequenceToken");
     }
     StreamIdentity = streamIdentity;
     SequenceToken = sequenceToken;
 }
예제 #6
0
        public Task OnNextAsync(int item, StreamSequenceToken token = null)
        {
            logger.Info("OnNextAsync(item={0}, token={1})", item, token != null ? token.ToString() : "null");
            if (failPeriodTimer == null)
            {
                eventsConsumedCount++;
            }
            else if(failPeriodTimer.Elapsed >= failPeriod)
            {
                failPeriodTimer = null;
                eventsConsumedCount++;
            }
            else
            {
                eventsFailedCount++;
                throw new AggregateException("GO WAY!");
            }

            return TaskDone.Done;
        }
 private async Task<bool> ErrorProtocol(StreamConsumerData consumerData, Exception exceptionOccured, bool isDeliveryError, IBatchContainer batch, StreamSequenceToken token)
 {
     // notify consumer about the error or that the data is not available.
     await OrleansTaskExtentions.ExecuteAndIgnoreException(
         () => DeliverErrorToConsumer(
             consumerData, exceptionOccured, batch));
     // record that there was a delivery failure
     if (isDeliveryError)
     {
         await OrleansTaskExtentions.ExecuteAndIgnoreException(
             () => streamFailureHandler.OnDeliveryFailure(
                 consumerData.SubscriptionId, streamProviderName, consumerData.StreamId, token));
     }
     else
     {
         await OrleansTaskExtentions.ExecuteAndIgnoreException(
                () => streamFailureHandler.OnSubscriptionFailure(
                    consumerData.SubscriptionId, streamProviderName, consumerData.StreamId, token));
     }
     // if configured to fault on delivery failure and this is not an implicit subscription, fault and remove the subscription
     if (streamFailureHandler.ShouldFaultSubsriptionOnError && !SubscriptionMarker.IsImplicitSubscription(consumerData.SubscriptionId.Guid))
     {
         try
         {
             // notify consumer of faulted subscription, if we can.
             await OrleansTaskExtentions.ExecuteAndIgnoreException(
                 () => DeliverErrorToConsumer(
                     consumerData, new FaultedSubscriptionException(consumerData.SubscriptionId, consumerData.StreamId), batch));
             // mark subscription as faulted.
             await pubSub.FaultSubscription(consumerData.StreamId, consumerData.SubscriptionId);
         }
         finally
         {
             // remove subscription
             RemoveSubscriber_Impl(consumerData.SubscriptionId, consumerData.StreamId);
         }
         return true;
     }
     return false;
 }
        private async Task<bool> DoHandshakeWithConsumer(
            StreamConsumerData consumerData,
            StreamSequenceToken cacheToken)
        {
            StreamHandshakeToken requestedHandshakeToken = null;
            // if not cache, then we can't get cursor and there is no reason to ask consumer for token.
            if (queueCache != null)
            {
                Exception exceptionOccured = null;
                try
                {
                    requestedHandshakeToken = await AsyncExecutorWithRetries.ExecuteWithRetries(
                         i => consumerData.StreamConsumer.GetSequenceToken(consumerData.SubscriptionId),
                         AsyncExecutorWithRetries.INFINITE_RETRIES,
                         (exception, i) => true,
                         config.MaxEventDeliveryTime,
                         DefaultBackoffProvider);

                    if (requestedHandshakeToken != null)
                    {
                        consumerData.SafeDisposeCursor(logger);
                        consumerData.Cursor = queueCache.GetCacheCursor(consumerData.StreamId.Guid, consumerData.StreamId.Namespace, requestedHandshakeToken.Token);
                    }
                    else
                    {
                        if (consumerData.Cursor == null) // if the consumer did not ask for a specific token and we already have a cursor, jsut keep using it.
                            consumerData.Cursor = queueCache.GetCacheCursor(consumerData.StreamId.Guid, consumerData.StreamId.Namespace, cacheToken);
                    }
                }
                catch (Exception exception)
                {
                    exceptionOccured = exception;
                }
                if (exceptionOccured != null)
                {
                    bool faultedSubscription = await ErrorProtocol(consumerData, exceptionOccured, false, null, requestedHandshakeToken != null ? requestedHandshakeToken.Token : null);
                    if (faultedSubscription) return false;
                }
            }
            consumerData.LastToken = requestedHandshakeToken; // use what ever the consumer asked for as LastToken for next handshake (even if he asked for null).
            // if we don't yet have a cursor (had errors in the handshake or data not available exc), get a cursor at the event that triggered that consumer subscription.
            if (consumerData.Cursor == null && queueCache != null)
            {
                try
                {
                    consumerData.Cursor = queueCache.GetCacheCursor(consumerData.StreamId.Guid, consumerData.StreamId.Namespace, cacheToken);
                }
                catch (Exception)
                {
                    consumerData.Cursor = queueCache.GetCacheCursor(consumerData.StreamId.Guid, consumerData.StreamId.Namespace, null); // just in case last GetCacheCursor failed.
                }
            }
            return true;
        }
예제 #9
0
 internal void Set(LinkedListNode<SimpleQueueCacheItem> item)
 {
     Element = item;
     SequenceToken = item.Value.SequenceToken;
 }
예제 #10
0
 public StreamConsumerData AddConsumer(GuidId subscriptionId, StreamId streamId, IStreamConsumerExtension streamConsumer, StreamSequenceToken token, IStreamFilterPredicateWrapper filter)
 {
     var consumerData = new StreamConsumerData(subscriptionId, streamId, streamConsumer, filter);
     queueData.Add(subscriptionId, consumerData);
     return consumerData;
 }
예제 #11
0
            public int Compare(TestCachedMessage cachedMessage, StreamSequenceToken token)
            {
                var myToken = new EventSequenceToken(cachedMessage.SequenceNumber, cachedMessage.EventIndex);

                return(myToken.CompareTo(token));
            }
        //public virtual IQueueCacheCursor GetCacheCursor(Guid streamGuid, string streamNamespace, StreamSequenceToken token)
        public virtual IQueueCacheCursor GetCacheCursor(IStreamIdentity streamIdentity, StreamSequenceToken token)
        {
            if (token != null && !(token is EventSequenceToken))
            {
                // Null token can come from a stream subscriber that is just interested to
                // start consuming from latest (the most recent event added to the cache).
                throw new ArgumentOutOfRangeException(nameof(token), "token must be of type EventSequenceToken");
            }

            var cursor = new TimedQueueCacheCursor(this, streamIdentity.Guid, streamIdentity.Namespace, _logger);
            InitializeCursor(cursor, token);
            return cursor;
        }
예제 #13
0
        public virtual void AddToCache(IList<IBatchContainer> msgs)
        {
            if (msgs == null) throw new ArgumentNullException("msgs");

            Log(logger, "AddToCache: added {0} items to cache.", msgs.Count);
            foreach (var message in msgs)
            {
                Add(message, message.SequenceToken);
                lastSequenceTokenAddedToCache = message.SequenceToken;
            }
        }
예제 #14
0
        private async Task SendAndReceiveFromQueueAdapter(IQueueAdapterFactory adapterFactory, IProviderConfiguration config)
        {
            IQueueAdapter adapter = await adapterFactory.CreateAdapter();

            IQueueAdapterCache cache = adapterFactory.GetQueueAdapterCache();

            // Create receiver per queue
            IStreamQueueMapper mapper = adapterFactory.GetStreamQueueMapper();
            Dictionary <QueueId, IQueueAdapterReceiver> receivers = mapper.GetAllQueues().ToDictionary(queueId => queueId, adapter.CreateReceiver);
            Dictionary <QueueId, IQueueCache>           caches    = mapper.GetAllQueues().ToDictionary(queueId => queueId, cache.CreateQueueCache);

            await Task.WhenAll(receivers.Values.Select(receiver => receiver.Initialize(TimeSpan.FromSeconds(5))));

            // test using 2 streams
            Guid streamId1 = Guid.NewGuid();
            Guid streamId2 = Guid.NewGuid();

            int receivedBatches = 0;
            var streamsPerQueue = new ConcurrentDictionary <QueueId, HashSet <IStreamIdentity> >();

            // reader threads (at most 2 active queues because only two streams)
            var work = new List <Task>();

            foreach (KeyValuePair <QueueId, IQueueAdapterReceiver> receiverKvp in receivers)
            {
                QueueId queueId  = receiverKvp.Key;
                var     receiver = receiverKvp.Value;
                var     qCache   = caches[queueId];
                Task    task     = Task.Factory.StartNew(() =>
                {
                    while (receivedBatches < NumBatches)
                    {
                        var messages = receiver.GetQueueMessagesAsync(CloudQueueMessage.MaxNumberOfMessagesToPeek).Result.ToArray();
                        if (!messages.Any())
                        {
                            continue;
                        }
                        foreach (IBatchContainer message in messages)
                        {
                            streamsPerQueue.AddOrUpdate(queueId,
                                                        id => new HashSet <IStreamIdentity> {
                                new StreamIdentity(message.StreamGuid, message.StreamGuid.ToString())
                            },
                                                        (id, set) =>
                            {
                                set.Add(new StreamIdentity(message.StreamGuid, message.StreamGuid.ToString()));
                                return(set);
                            });
                            output.WriteLine("Queue {0} received message on stream {1}", queueId,
                                             message.StreamGuid);
                            Assert.Equal(NumMessagesPerBatch / 2, message.GetEvents <int>().Count());    // "Half the events were ints"
                            Assert.Equal(NumMessagesPerBatch / 2, message.GetEvents <string>().Count()); // "Half the events were strings"
                        }
                        Interlocked.Add(ref receivedBatches, messages.Length);
                        qCache.AddToCache(messages);
                    }
                });
                work.Add(task);
            }

            // send events
            List <object> events = CreateEvents(NumMessagesPerBatch);

            work.Add(Task.Factory.StartNew(() => Enumerable.Range(0, NumBatches)
                                           .Select(i => i % 2 == 0 ? streamId1 : streamId2)
                                           .ToList()
                                           .ForEach(streamId =>
                                                    adapter.QueueMessageBatchAsync(streamId, streamId.ToString(),
                                                                                   events.Take(NumMessagesPerBatch).ToArray(), null, RequestContext.Export(this.fixture.SerializationManager)).Wait())));
            await Task.WhenAll(work);

            // Make sure we got back everything we sent
            Assert.Equal(NumBatches, receivedBatches);

            // check to see if all the events are in the cache and we can enumerate through them
            StreamSequenceToken firstInCache = new EventSequenceTokenV2(0);

            foreach (KeyValuePair <QueueId, HashSet <IStreamIdentity> > kvp in streamsPerQueue)
            {
                var receiver = receivers[kvp.Key];
                var qCache   = caches[kvp.Key];

                foreach (IStreamIdentity streamGuid in kvp.Value)
                {
                    // read all messages in cache for stream
                    IQueueCacheCursor cursor         = qCache.GetCacheCursor(streamGuid, firstInCache);
                    int messageCount                 = 0;
                    StreamSequenceToken tenthInCache = null;
                    StreamSequenceToken lastToken    = firstInCache;
                    while (cursor.MoveNext())
                    {
                        Exception ex;
                        messageCount++;
                        IBatchContainer batch = cursor.GetCurrent(out ex);
                        output.WriteLine("Token: {0}", batch.SequenceToken);
                        Assert.True(batch.SequenceToken.CompareTo(lastToken) >= 0, $"order check for event {messageCount}");
                        lastToken = batch.SequenceToken;
                        if (messageCount == 10)
                        {
                            tenthInCache = batch.SequenceToken;
                        }
                    }
                    output.WriteLine("On Queue {0} we received a total of {1} message on stream {2}", kvp.Key, messageCount, streamGuid);
                    Assert.Equal(NumBatches / 2, messageCount);
                    Assert.NotNull(tenthInCache);

                    // read all messages from the 10th
                    cursor       = qCache.GetCacheCursor(streamGuid, tenthInCache);
                    messageCount = 0;
                    while (cursor.MoveNext())
                    {
                        messageCount++;
                    }
                    output.WriteLine("On Queue {0} we received a total of {1} message on stream {2}", kvp.Key, messageCount, streamGuid);
                    const int expected = NumBatches / 2 - 10 + 1; // all except the first 10, including the 10th (10 + 1)
                    Assert.Equal(expected, messageCount);
                }
            }
        }
예제 #15
0
 public Task OnNextAsync(T item, StreamSequenceToken token = null)
 {
     this.NumConsumed++;
     this.logger.Info($"Consumer {this.GetHashCode()} OnNextAsync() with NumConsumed {this.NumConsumed}");
     return(TaskDone.Done);
 }
예제 #16
0
 public Task <StreamSubscriptionHandle <ToTP> > SubscribeAsync(IAsyncObserver <ToTP> observer, StreamSequenceToken token, StreamFilterPredicate filterFunc = null, object filterData = null)
 {
     throw new NotSupportedException();
 }
예제 #17
0
 public Task OnNextBatchAsync(IEnumerable <ToTP> batch, StreamSequenceToken token = null)
 {
     return(Task.WhenAll(batch.Select(item => (_stream.OnNextAsync(item.AsReference <FromTP>(), token)))));
     //TODO: replace with the code below, as soon as stream.OnNextBatchAsync is supported.
     //return _stream.OnNextBatchAsync(batch.Select(x => x.AsReference<FromTP>), token); //not supported yet!
 }
예제 #18
0
 public Task OnNextAsync(ToTP item, StreamSequenceToken token = null)
 {
     return(_stream.OnNextAsync(item.AsReference <FromTP>(), token));
 }
예제 #19
0
 public Task OnNextAsync(StreamItem item, StreamSequenceToken token = null)
 {
     return(_consumer.OnNextAsync(item, token));
 }
        private void SetCursor(Cursor cursor, StreamSequenceToken sequenceToken)
        {
            // If nothing in cache, unset token, and wait for more data.
            if (messageBlocks.Count == 0)
            {
                cursor.State         = Cursor.States.Unset;
                cursor.SequenceToken = sequenceToken;
                return;
            }

            LinkedListNode <CachedMessageBlock <TQueueMessage, TCachedMessage> > newestBlock = messageBlocks.First;

            // if sequenceToken is null, iterate from newest message in cache
            if (sequenceToken == null)
            {
                cursor.State         = Cursor.States.Idle;
                cursor.CurrentBlock  = newestBlock;
                cursor.Index         = newestBlock.Value.NewestMessageIndex;
                cursor.SequenceToken = newestBlock.Value.NewestSequenceToken;
                return;
            }

            // If sequenceToken is too new to be in cache, unset token, and wait for more data.
            TCachedMessage newestMessage = newestBlock.Value.NewestMessage;

            if (cacheDataAdapter.CompareCachedMessageToSequenceToken(ref newestMessage, sequenceToken) < 0)
            {
                cursor.State         = Cursor.States.Unset;
                cursor.SequenceToken = sequenceToken;
                return;
            }

            // Check to see if sequenceToken is too old to be in cache
            TCachedMessage oldestMessage = messageBlocks.Last.Value.OldestMessage;

            if (cacheDataAdapter.CompareCachedMessageToSequenceToken(ref oldestMessage, sequenceToken) > 0)
            {
                // throw cache miss exception
                throw new QueueCacheMissException(sequenceToken, messageBlocks.Last.Value.OldestSequenceToken, messageBlocks.First.Value.NewestSequenceToken);
            }

            // Find block containing sequence number, starting from the newest and working back to oldest
            LinkedListNode <CachedMessageBlock <TQueueMessage, TCachedMessage> > node = messageBlocks.First;

            while (true)
            {
                TCachedMessage oldestMessageInBlock = node.Value.OldestMessage;
                if (cacheDataAdapter.CompareCachedMessageToSequenceToken(ref oldestMessageInBlock, sequenceToken) <= 0)
                {
                    break;
                }
                node = node.Next;
            }

            // return cursor from start.
            cursor.CurrentBlock = node;
            cursor.Index        = node.Value.GetIndexOfFirstMessageLessThanOrEqualTo(sequenceToken);
            // if cursor has been idle, move to next message after message specified by sequenceToken
            if (cursor.State == Cursor.States.Idle)
            {
                // if there are more messages in this block, move to next message
                if (!cursor.IsNewestInBlock)
                {
                    cursor.Index++;
                }
                // if this is the newest message in this block, move to oldest message in newer block
                else if (node.Previous != null)
                {
                    cursor.CurrentBlock = node.Previous;
                    cursor.Index        = cursor.CurrentBlock.Value.OldestMessageIndex;
                }
                else
                {
                    cursor.State = Cursor.States.Idle;
                    return;
                }
            }
            cursor.SequenceToken = cursor.CurrentBlock.Value.GetSequenceToken(cursor.Index);
            cursor.State         = Cursor.States.Set;
        }
 /// <summary>
 /// Sets sequence token by serializing it to property.
 /// </summary>
 /// <param name="token"></param>
 public virtual void SetSequenceToken(StreamSequenceToken token)
 {
     SequenceToken = token != null ? GetTokenBytes(token) : null;
 }
 private Task OnNextAsync(StreamImmutabilityTestObject myObject, StreamSequenceToken streamSequenceToken)
 {
     _myObject = myObject;
     return TaskDone.Done;
 }
예제 #23
0
 public void Refresh(StreamSequenceToken token)
 {
 }
예제 #24
0
        internal void ResetCursor(SimpleQueueCacheCursor cursor, StreamSequenceToken token)
        {
            Log(logger, "ResetCursor: {0} to token {1}", cursor, token);

            if (cursor.IsSet)
            {
                cursor.Element.Value.CacheBucket.UpdateNumCursors(-1);
            }
            cursor.Reset(token);
        }
 public Task OnDeliveryFailure(GuidId subscriptionId, string streamProviderName, IStreamIdentity streamIdentity,
                               StreamSequenceToken sequenceToken)
 {
     return(Task.CompletedTask);
 }
        private LinkedListNode<TimedQueueCacheItem> FindNodeBySequenceToken(StreamSequenceToken sequenceToken)
        {
            // First we find a bucket where the node is in
            var sequenceBucket =
                _cacheCursorHistogram.First(
                    bucket =>
                        !sequenceToken.Newer(bucket.NewestMember.Value.SequenceToken) &&
                        !sequenceToken.Older(bucket.OldestMember.Value.SequenceToken));

            // Now that we have the bucket, we iterate on the members there starting from the newest in the bucket
            LinkedListNode<TimedQueueCacheItem> node = sequenceBucket.NewestMember;
            while (node != null && node.Value.SequenceToken.Newer(sequenceToken))
            {
                // did we get to the end?
                // node is the last message in the cache
                if (node.Next == null)
                    break;

                // if sequenceId is between the two, take the lower
                if (node.Next.Value.SequenceToken.Older(sequenceToken))
                {
                    node = node.Next;
                    break;
                }

                node = node.Next;
            }

            return node;
        }
 public Task QueueMessageBatchAsync <T>(Guid streamGuid, string streamNamespace, IEnumerable <T> events, StreamSequenceToken token,
                                        Dictionary <string, object> requestContext)
 {
     return(TaskDone.Done);
 }
 private Task OnNext(int e, StreamSequenceToken token, int countCapture, Counter count)
 {
     logger.Info("Got next event {0} on handle {1}", e, countCapture);
     var contextValue = RequestContext.Get(SampleStreaming_ProducerGrain.RequestContextKey) as string;
     if (!String.Equals(contextValue, SampleStreaming_ProducerGrain.RequestContextValue))
     {
         throw new Exception(String.Format("Got the wrong RequestContext value {0}.", contextValue));
     }
     count.Increment();
     return TaskDone.Done;
 }
예제 #29
0
 public IQueueCacheCursor GetCacheCursor(IStreamIdentity streamIdentity, StreamSequenceToken token)
 {
     return(new Cursor(cache, streamIdentity, token));
 }
예제 #30
0
 internal void Reset(StreamSequenceToken token)
 {
     Element = null;
     SequenceToken = token;
 }
예제 #31
0
 public Cursor(IEventHubQueueCache cache, IStreamIdentity streamIdentity, StreamSequenceToken token)
 {
     this.cache = cache;
     cursor     = cache.GetCursor(streamIdentity, token);
 }
        // Called by rendezvous when new remote subscriber subscribes to this stream.
        private async Task AddSubscriber_Impl(
            GuidId subscriptionId,
            StreamId streamId,
            IStreamConsumerExtension streamConsumer,
            StreamSequenceToken cacheToken,
            IStreamFilterPredicateWrapper filter)
        {
            if (IsShutdown) return;

            StreamConsumerCollection streamDataCollection;
            if (!pubSubCache.TryGetValue(streamId, out streamDataCollection))
            {
                streamDataCollection = new StreamConsumerCollection(DateTime.UtcNow);
                pubSubCache.Add(streamId, streamDataCollection);
            }

            StreamConsumerData data;
            if (!streamDataCollection.TryGetConsumer(subscriptionId, out data))
                data = streamDataCollection.AddConsumer(subscriptionId, streamId, streamConsumer, filter ?? DefaultStreamFilter);

            if (await DoHandshakeWithConsumer(data, cacheToken))
            {
                if (data.State == StreamConsumerDataState.Inactive)
                    RunConsumerCursor(data, data.Filter).Ignore(); // Start delivering events if not actively doing so
            }
        }
예제 #33
0
 /// <summary>
 /// Compare a cached message with a sequence token to determine if it message is before or after the token
 /// </summary>
 /// <param name="cachedMessage"></param>
 /// <param name="comparer"></param>
 /// <param name="streamToken"></param>
 /// <returns></returns>
 public static int Compare <TCachedMessage>(this ICacheDataComparer <TCachedMessage> comparer, StreamSequenceToken streamToken, TCachedMessage cachedMessage)
 {
     return(0 - comparer.Compare(cachedMessage, streamToken));
 }
        private async Task RegisterStream(StreamId streamId, StreamSequenceToken firstToken, DateTime now)
        {
            var streamData = new StreamConsumerCollection(now);
            pubSubCache.Add(streamId, streamData);
            // Create a fake cursor to point into a cache.
            // That way we will not purge the event from the cache, until we talk to pub sub.
            // This will help ensure the "casual consistency" between pre-existing subscripton (of a potentially new already subscribed consumer) 
            // and later production.
            var pinCursor = queueCache.GetCacheCursor(streamId.Guid, streamId.Namespace, firstToken);

            try
            {
                await RegisterAsStreamProducer(streamId, firstToken);
            }finally
            {
                // Cleanup the fake pinning cursor.
                pinCursor.Dispose();
            }
        }
예제 #35
0
        /// <summary>
        /// Writes a set of events to the queue as a single batch associated with the provided streamId.
        /// </summary>
        /// <typeparam name="T"></typeparam>
        /// <param name="streamGuid"></param>
        /// <param name="streamNamespace"></param>
        /// <param name="events"></param>
        /// <param name="token"></param>
        /// <param name="requestContext"></param>
        /// <returns></returns>
        public Task QueueMessageBatchAsync <T>(Guid streamGuid, string streamNamespace, IEnumerable <T> events, StreamSequenceToken token,
                                               Dictionary <string, object> requestContext)
        {
            if (token != null)
            {
                throw new NotImplementedException("EventHub stream provider currently does not support non-null StreamSequenceToken.");
            }
            EventData eventData = EventHubBatchContainer.ToEventData(this.SerializationManager, streamGuid, streamNamespace, events, requestContext);

#if NETSTANDARD
            return(client.SendAsync(eventData, streamGuid.ToString()));
#else
            return(client.SendAsync(eventData));
#endif
        }
        private async Task RegisterAsStreamProducer(StreamId streamId, StreamSequenceToken streamStartToken)
        {
            try
            {
                if (pubSub == null) throw new NullReferenceException("Found pubSub reference not set up correctly in RetreaveNewStream");

                IStreamProducerExtension meAsStreamProducer = this.AsReference<IStreamProducerExtension>();
                ISet<PubSubSubscriptionState> streamData = await pubSub.RegisterProducer(streamId, streamProviderName, meAsStreamProducer);
                if (logger.IsVerbose) logger.Verbose((int)ErrorCode.PersistentStreamPullingAgent_16, "Got back {0} Subscribers for stream {1}.", streamData.Count, streamId);

                var addSubscriptionTasks = new List<Task>(streamData.Count);
                foreach (PubSubSubscriptionState item in streamData)
                {
                    addSubscriptionTasks.Add(AddSubscriber_Impl(item.SubscriptionId, item.Stream, item.Consumer, streamStartToken, item.Filter));
                }
                await Task.WhenAll(addSubscriptionTasks);
            }
            catch (Exception exc)
            {
                // RegisterAsStreamProducer is fired with .Ignore so we should log if anything goes wrong, because there is no one to catch the exception
                logger.Error((int)ErrorCode.PersistentStreamPullingAgent_17, "Ignored RegisterAsStreamProducer Error", exc);
                throw;
            }
        }
예제 #37
0
 public Task OnNextAsync(int item, StreamSequenceToken token = null)
 {
     this.logger.LogInformation("OnNextAsync: item: {Item}, token = {Token}", item, token);
     return(Task.CompletedTask);
 }
 private static byte[] GetTokenBytes(StreamSequenceToken token)
 {
     var bodyStream = new BinaryTokenStreamWriter();
     SerializationManager.Serialize(token, bodyStream);
     return bodyStream.ToByteArray();
 }
예제 #39
0
 internal void Reset(StreamSequenceToken token)
 {
     Element       = null;
     SequenceToken = token;
 }
예제 #40
0
        public virtual IQueueCacheCursor GetCacheCursor(Guid streamGuid, string streamNamespace, StreamSequenceToken token)
        {
            if (token != null && !(token is EventSequenceToken))
            {
                // Null token can come from a stream subscriber that is just interested to start consuming from latest (the most recent event added to the cache).
                throw new ArgumentOutOfRangeException("token", "token must be of type EventSequenceToken");
            }

            var cursor = new SimpleQueueCacheCursor(this, streamGuid, streamNamespace, logger);
            InitializeCursor(cursor, token, true);
            return cursor;
        }
        private void Add(IBatchContainer batch, StreamSequenceToken sequenceToken)
        {
            if (batch == null) throw new ArgumentNullException(nameof(batch));

            var cacheBucket = GetOrCreateBucket();

            cacheBucket.UpdateNumItems(1);
            // Add message to linked list
            var item = new TimedQueueCacheItem
            {
                Batch = batch,
                SequenceToken = sequenceToken,
                CacheBucket = cacheBucket,
            };

            item.Timestamp = GetTimestampForItem(batch);

            var newNode = new LinkedListNode<TimedQueueCacheItem>(item);

            // If it's the first item, then we also update 
            if (cacheBucket.NumCurrentItems == 1)
            {
                Log(_logger, "TimedQueueCache for QueueId:{0}, Add: The oldest timespan in the cache is {1}", Id.ToString(), item.Timestamp);
                cacheBucket.OldestMemberTimestamp = item.Timestamp;
                cacheBucket.OldestMember = newNode;
            }

            // Setting the newest member
            cacheBucket.NewestMemberTimestamp = item.Timestamp;
            cacheBucket.NewestMember = newNode;

            _cachedMessages.AddFirst(newNode);

            _counterMessagesInCache.Increment(Id.ToString(), 1);
        }
예제 #42
0
        internal void InitializeCursor(SimpleQueueCacheCursor cursor, StreamSequenceToken sequenceToken, bool enforceSequenceToken)
        {
            Log(logger, "InitializeCursor: {0} to sequenceToken {1}", cursor, sequenceToken);
           
            if (cachedMessages.Count == 0) // nothing in cache
            {
                StreamSequenceToken tokenToReset = sequenceToken ?? (lastSequenceTokenAddedToCache != null ? ((EventSequenceToken)lastSequenceTokenAddedToCache).NextSequenceNumber() : null);
                ResetCursor(cursor, tokenToReset);
                return;
            }

            // if offset is not set, iterate from newest (first) message in cache, but not including the irst message itself
            if (sequenceToken == null)
            {
                StreamSequenceToken tokenToReset = lastSequenceTokenAddedToCache != null ? ((EventSequenceToken)lastSequenceTokenAddedToCache).NextSequenceNumber() : null;
                ResetCursor(cursor, tokenToReset);
                return;
            }

            if (sequenceToken.Newer(cachedMessages.First.Value.SequenceToken)) // sequenceId is too new to be in cache
            {
                ResetCursor(cursor, sequenceToken);
                return;
            }

            LinkedListNode<SimpleQueueCacheItem> lastMessage = cachedMessages.Last;
            // Check to see if offset is too old to be in cache
            if (sequenceToken.Older(lastMessage.Value.SequenceToken))
            {
                if (enforceSequenceToken)
                {
                    // throw cache miss exception
                    throw new QueueCacheMissException(sequenceToken, cachedMessages.Last.Value.SequenceToken, cachedMessages.First.Value.SequenceToken);
                }
                sequenceToken = lastMessage.Value.SequenceToken;
            }

            // Now the requested sequenceToken is set and is also within the limits of the cache.

            // Find first message at or below offset
            // Events are ordered from newest to oldest, so iterate from start of list until we hit a node at a previous offset, or the end.
            LinkedListNode<SimpleQueueCacheItem> node = cachedMessages.First;
            while (node != null && node.Value.SequenceToken.Newer(sequenceToken))
            {
                // did we get to the end?
                if (node.Next == null) // node is the last message
                    break;
                
                // if sequenceId is between the two, take the higher
                if (node.Next.Value.SequenceToken.Older(sequenceToken))
                    break;
                
                node = node.Next;
            }

            // return cursor from start.
            SetCursor(cursor, node);
        }
예제 #43
0
 /// <summary>
 /// cachePressureContribution should be a double between 0-1, indicating how much danger the item is of being removed from the cache.
 ///   0 indicating  no danger,
 ///   1 indicating removal is imminent.
 /// </summary>
 /// <param name="token"></param>
 /// <param name="cachePressureContribution"></param>
 /// <returns></returns>
 protected abstract bool TryCalculateCachePressureContribution(StreamSequenceToken token, out double cachePressureContribution);
예제 #44
0
        private void Add(IBatchContainer batch, StreamSequenceToken sequenceToken)
        {
            if (batch == null) throw new ArgumentNullException("batch");

            CacheBucket cacheBucket = null;
            if (cacheCursorHistogram.Count == 0)
            {
                cacheBucket = new CacheBucket();
                cacheCursorHistogram.Add(cacheBucket);
            }
            else
            {
                cacheBucket = cacheCursorHistogram[cacheCursorHistogram.Count - 1]; // last one
            }

            if (cacheBucket.NumCurrentItems == CACHE_HISTOGRAM_MAX_BUCKET_SIZE) // last bucket is full, open a new one
            {
                cacheBucket = new CacheBucket();
                cacheCursorHistogram.Add(cacheBucket);
            }

            // Add message to linked list
            var item = new SimpleQueueCacheItem
            {
                Batch = batch,
                SequenceToken = sequenceToken,
                CacheBucket = cacheBucket
            };

            cachedMessages.AddFirst(new LinkedListNode<SimpleQueueCacheItem>(item));
            cacheBucket.UpdateNumItems(1);

            if (Size > maxCacheSize)
            {
                //var last = cachedMessages.Last;
                cachedMessages.RemoveLast();
                var bucket = cacheCursorHistogram[0]; // same as:  var bucket = last.Value.CacheBucket;
                bucket.UpdateNumItems(-1);
                if (bucket.NumCurrentItems == 0)
                {
                    cacheCursorHistogram.RemoveAt(0);
                }
            }
        }
예제 #45
0
 public override bool Equals(StreamSequenceToken other)
 {
     var token = other as EventSequenceToken;
     return token != null && (token.sequenceNumber == sequenceNumber &&
                              token.eventIndex == eventIndex);
 }
        internal void InitializeCursor(TimedQueueCacheCursor cursor, StreamSequenceToken sequenceToken)
        {
            Log(_logger, "TimedQueueCache for QueueId:{0}, InitializeCursor: {1} to sequenceToken {2}", Id.ToString(), cursor, sequenceToken);

            if (_cachedMessages.Count == 0) // nothing in cache
            {
                Log(_logger, "TimedQueueCache for QueueId:{0}, InitializeCursor: The TimedQueueCache is empty", Id.ToString());
                ResetCursor(cursor, sequenceToken);
                return;
            }

            // if offset is not set, iterate from newest (first) message in cache, but not 
            // including the first message itself
            if (sequenceToken == null)
            {
                LinkedListNode<TimedQueueCacheItem> firstMessage = _cachedMessages.First;
                ResetCursor(cursor, ((EventSequenceToken)firstMessage.Value.SequenceToken).NextSequenceNumber());
                return;
            }

            // Since we do not support finding a sequence of type x.y where y > 0, we round the token down
            var flooredToken = FloorSequenceToken(sequenceToken);

            if (flooredToken.Newer(FirstItem.SequenceToken)) // sequenceId is too new to be in cache
            {
                Log(_logger, "TimedQueueCache for QueueId:{0}, initializing with newer token", Id.ToString());
                ResetCursor(cursor, flooredToken);
                return;
            }

            // Check to see if offset is too old to be in cache
            if (flooredToken.Older(LastItem.SequenceToken))
            {
                // We don't throw cache misses, we are more tolerant. Starting the cursor 
                // from the last message and logging the incident
                _logger.Info("TimedQueueCache for QueueId:{0}, InitializeCursor: Sequence tried to subscribe with an older token: {0}, started instead from oldest token in cache which is: {1} and was inserted on {2}", Id.ToString(), sequenceToken, LastItem.SequenceToken, LastItem.Timestamp);
                SetCursor(cursor, _cachedMessages.Last);
                return;
            }

            // Now the requested sequenceToken is set and is also within the limits of the cache.
            var node = FindNodeBySequenceToken(flooredToken);

            // return cursor from start.
            SetCursor(cursor, node);
        }
예제 #47
0
            public int CompareCachedMessageToSequenceToken(ref TestCachedMessage cachedMessage, StreamSequenceToken token)
            {
                var realToken = (EventSequenceToken)token;

                return(cachedMessage.SequenceNumber != realToken.SequenceNumber
                    ? (int)(cachedMessage.SequenceNumber - realToken.SequenceNumber)
                    : 0 - realToken.EventIndex);
            }
        internal void ResetCursor(TimedQueueCacheCursor cursor, StreamSequenceToken token)
        {
            Log(_logger, "TimedQueueCache for QueueId:{0}, ResetCursor: {0} to token {1}", Id.ToString(), cursor, token);

            if (cursor.IsSet)
            {
                cursor.NextElement.Value.CacheBucket.UpdateNumCursors(-1);
            }

            cursor.Reset(token);
        }
 public void Refresh(StreamSequenceToken token)
 {
     // do nothing
 }
        private StreamSequenceToken FloorSequenceToken(StreamSequenceToken token)
        {
            if (!(token is EventSequenceToken)) return token;
            EventSequenceToken tokenAsEventSequenceToken = (EventSequenceToken)token;
            if (tokenAsEventSequenceToken.EventIndex == 0) return token;

            EventSequenceToken flooredToken = new EventSequenceToken(tokenAsEventSequenceToken.SequenceNumber);
            return flooredToken;
        }
예제 #51
0
 /// <summary>
 /// Get a cursor into the cache to read events from a stream.
 /// </summary>
 /// <param name="streamIdentity"></param>
 /// <param name="sequenceToken"></param>
 /// <returns></returns>
 public object GetCursor(IStreamIdentity streamIdentity, StreamSequenceToken sequenceToken)
 {
     return(cache.GetCursor(streamIdentity, sequenceToken));
 }