public Message Add(MessageIdData messageId, uint redeliveryCount, MessageMetadata metadata, ReadOnlySequence <byte> data) { lock (_lock) { if (_trackBatches) { _batches.AddLast(new Batch(messageId, metadata.NumMessagesInBatch)); } long index = 0; for (var i = 0; i < metadata.NumMessagesInBatch; ++i) { var singleMetadataSize = data.ReadUInt32(index, true); index += 4; var singleMetadata = Serializer.Deserialize <SingleMessageMetadata>(data.Slice(index, singleMetadataSize)); index += singleMetadataSize; var singleMessageId = new MessageId(messageId.LedgerId, messageId.EntryId, messageId.Partition, i); var message = new Message(singleMessageId, redeliveryCount, metadata, singleMetadata, data.Slice(index, singleMetadata.PayloadSize)); _messages.Enqueue(message); index += (uint)singleMetadata.PayloadSize; } return(_messages.Dequeue()); } }
public static void MapFrom(this MessageIdData destination, MessageId source) { destination.LedgerId = source.LedgerId; destination.EntryId = source.EntryId; destination.Partition = source.Partition; destination.BatchIndex = source.BatchIndex; }
public MessageIdData?Acknowledge(MessageIdData messageId) { lock (_lock) { foreach (var batch in _batches) { if (messageId.LedgerId != batch.MessageId.LedgerId || messageId.EntryId != batch.MessageId.EntryId || messageId.Partition != batch.MessageId.Partition) { continue; } batch.Acknowledge(messageId.BatchIndex); if (batch.IsAcknowledged()) { _batches.Remove(batch); return(batch.MessageId); } break; } return(null); } }
public static IMessageId FromByteArrayWithTopic(byte[] data, TopicName topicName) { if (data == null) { throw new ArgumentException(); } var builder = new MessageIdData(); MessageIdData idData = builder; IMessageId messageId; if (idData.BatchIndex >= 0) { messageId = new BatchMessageId((long)idData.ledgerId, (long)idData.entryId, idData.Partition, idData.BatchIndex, idData.BatchSize, BatchMessageAcker.NewAcker(idData.BatchSize)); } else { messageId = new MessageId((long)idData.ledgerId, (long)idData.entryId, idData.Partition); } if (idData.Partition > -1 && topicName != null) { var t = new TopicName(); messageId = new TopicMessageId(t.GetPartition(idData.Partition).ToString(), topicName.ToString(), messageId); } return(messageId); }
public MessageReceived(MessageMetadata metadata, ReadOnlySequence <byte> payload, MessageIdData messageId, int redeliveryCount, bool chueckSum, short magicNumber) { MessageId = messageId; Payload = payload; RedeliveryCount = redeliveryCount; Metadata = metadata; CheckSum = chueckSum; MagicNumber = magicNumber; }
private async ValueTask Acknowledge(MessageIdData messageIdData, CommandAck.AckType ackType, CancellationToken cancellationToken) { await _executor.Execute(() => { _cachedCommandAck.Type = ackType; _cachedCommandAck.MessageIds.Clear(); _cachedCommandAck.MessageIds.Add(messageIdData); return(Stream.Send(_cachedCommandAck)); }, cancellationToken); }
private async ValueTask Acknowledge(MessageIdData messageIdData, CommandAck.AckType ackType, CancellationToken cancellationToken) { ThrowIfDisposed(); await _executor.Execute(() => { _cachedCommandAck.Type = ackType; _cachedCommandAck.MessageIds.Clear(); _cachedCommandAck.MessageIds.Add(messageIdData); return(_channel.Send(_cachedCommandAck, cancellationToken)); }, cancellationToken).ConfigureAwait(false); }
public static ReadOnlySequence <byte> NewSeek(long consumerId, long requestId, long ledgerId, long entryId, long[] ackSet) { var seek = new CommandSeek { ConsumerId = (ulong)consumerId, RequestId = (ulong)requestId }; var messageId = new MessageIdData { ledgerId = (ulong)ledgerId, entryId = (ulong)entryId, AckSets = ackSet }; seek.MessageId = messageId; return(Serializer.Serialize(seek.ToBaseCommand())); }
private async ValueTask Acknowledge(MessageIdData messageIdData, CommandAck.AckType ackType, CancellationToken cancellationToken) { ThrowIfDisposed(); var commandAck = _commandAckPool.Get(); commandAck.Type = ackType; commandAck.MessageIds.Clear(); commandAck.MessageIds.Add(messageIdData); try { await _executor.Execute(() => Acknowledge(commandAck, cancellationToken), cancellationToken).ConfigureAwait(false); } finally { _commandAckPool.Return(commandAck); } }
// / Serialization public static IMessageId FromByteArray(byte[] data) { if (data == null) { throw new ArgumentException(); } var inputStream = new CodedInputStream(data); var builder = new MessageIdData(); MessageIdData idData = builder; try { //idData.MergeFrom(inputStream); } catch (System.Exception e) { throw e; } MessageId messageId; if (idData.BatchIndex >= 0) { if (idData.BatchSize > 0) { messageId = new BatchMessageId((long)idData.ledgerId, (long)idData.entryId, idData.Partition, idData.BatchIndex, idData.BatchSize, BatchMessageAcker.NewAcker(idData.BatchSize)); } else { messageId = new BatchMessageId((long)idData.ledgerId, (long)idData.entryId, idData.Partition, idData.BatchIndex); } } else { messageId = new MessageId((long)idData.ledgerId, (long)idData.entryId, idData.Partition); } return(messageId); }
// batchIndex is -1 if message is non-batched message and has the batchIndex for a batch message public virtual byte[] ToByteArray(int batchIndex, int batchSize) { MessageIdData msgId = new MessageIdData { ledgerId = (ulong)(_ledgerId), entryId = (ulong)(_entryId) }; if (_partitionIndex >= 0) { msgId.Partition = _partitionIndex; } if (batchIndex != -1) { msgId.BatchIndex = batchIndex; } if (batchSize > 0) { msgId.BatchSize = batchSize; } return(msgId.ToByteArrays()); }
public MessagePackage(MessageIdData messageId, ReadOnlySequence <byte> data) { MessageId = messageId; Data = data; }
internal MessageId(MessageIdData messageIdData) => Data = messageIdData;
private void ReceiveIndividualMessagesFromBatch(MessageMetadata msgMetadata, int redeliveryCount, IList <long> ackSet, byte[] uncompressedPayload, MessageIdData messageId, IActorRef cnx) { _log.Warning($"Closing consumer [{Subscription}]-[{ConsumerName}] due to unsupported received batch-message with zero receiver queue size"); // close connection cnx.GracefulStop(TimeSpan.FromSeconds(1)); }
public LastMessageIdResponse(long ledgerId, long entryId, int partition, int batchIndex, int batchSize, long[] ackSets, MessageIdData deletePosition) { LedgerId = ledgerId; EntryId = entryId; Partition = partition; BatchIndex = batchIndex; BatchSize = batchSize; AckSets = ackSets; if (deletePosition != null) { MarkDeletePosition = new MarkDeletePosition((long)deletePosition.ledgerId, (long)deletePosition.entryId, deletePosition.Partition, deletePosition.BatchIndex, deletePosition.BatchSize, deletePosition.AckSets); } }
public Builder() { _messageId = new MessageIdData(); }
public static MessageId ToMessageId(this MessageIdData messageIdData) => new MessageId(messageIdData.LedgerId, messageIdData.EntryId, messageIdData.Partition, messageIdData.BatchIndex);
public Batch(MessageIdData messageId, int numberOfMessages) { MessageId = messageId; _acknowledgementIndex = new BitArray(numberOfMessages, false); }
public MessagePackage(MessageIdData messageId, uint redeliveryCount, ReadOnlySequence <byte> data) { MessageId = messageId; RedeliveryCount = redeliveryCount; Data = data; }
public static ReadOnlySequence <byte> NewSubscribe(string topic, string subscription, long consumerId, long requestId, CommandSubscribe.SubType subType, int priorityLevel, string consumerName, bool isDurable, MessageIdData startMessageId, IDictionary <string, string> metadata, bool readCompacted, bool isReplicated, CommandSubscribe.InitialPosition subscriptionInitialPosition, long startMessageRollbackDurationInSec, ISchemaInfo schemaInfo, bool createTopicIfDoesNotExist) { return(NewSubscribe(topic, subscription, consumerId, requestId, subType, priorityLevel, consumerName, isDurable, startMessageId, metadata, readCompacted, isReplicated, subscriptionInitialPosition, startMessageRollbackDurationInSec, schemaInfo, createTopicIfDoesNotExist, null)); }
public static ReadOnlySequence <byte> NewSubscribe(string topic, string subscription, long consumerId, long requestId, CommandSubscribe.SubType subType, int priorityLevel, string consumerName, bool isDurable, MessageIdData startMessageId, IDictionary <string, string> metadata, bool readCompacted, bool isReplicated, CommandSubscribe.InitialPosition subscriptionInitialPosition, long startMessageRollbackDurationInSec, ISchemaInfo schemaInfo, bool createTopicIfDoesNotExist, KeySharedPolicy keySharedPolicy) { var subscribe = new CommandSubscribe { Topic = topic, Subscription = subscription, subType = subType, ConsumerId = (ulong)consumerId, ConsumerName = consumerName, RequestId = (ulong)requestId, PriorityLevel = priorityLevel, Durable = isDurable, ReadCompacted = readCompacted, initialPosition = subscriptionInitialPosition, ReplicateSubscriptionState = isReplicated, ForceTopicCreation = createTopicIfDoesNotExist }; if (keySharedPolicy != null) { var keySharedMeta = new KeySharedMeta { allowOutOfOrderDelivery = keySharedPolicy.AllowOutOfOrderDelivery, keySharedMode = ConvertKeySharedMode(keySharedPolicy.KeySharedMode) }; if (keySharedPolicy is KeySharedPolicy.KeySharedPolicySticky sticky) { var ranges = sticky.GetRanges().Ranges; foreach (var range in ranges) { keySharedMeta.hashRanges.Add(new IntRange { Start = range.Start, End = range.End }); } } subscribe.keySharedMeta = keySharedMeta; } if (startMessageId != null) { subscribe.StartMessageId = startMessageId; } if (startMessageRollbackDurationInSec > 0) { subscribe.StartMessageRollbackDurationSec = (ulong)startMessageRollbackDurationInSec; } subscribe.Metadatas.AddRange(CommandUtils.ToKeyValueList(metadata)); if (schemaInfo != null) { var schema = GetSchema(schemaInfo); subscribe.Schema = schema; } return(Serializer.Serialize(subscribe.ToBaseCommand())); }