Example #1
0
 public void Store(MessageChunk chunk) =>
 Add(new InMemoryStoredChunk
 {
     MessageId = chunk.OriginalMessageId,
     ChunkId   = chunk.ChunkId,
     Content   = chunk.Content
 });
        /// <summary>
        ///   Breaks down an insert message that may be too large into managable sizes.
        ///   When inserting only one document there will be only one chunk.  However chances
        ///   are that when inserting thousands of documents at once there will be many.
        /// </summary>
        protected void ChunkMessage(BsonWriter writer)
        {
            var baseSize = CalculateBaseSize(writer);

            var chunk = new MessageChunk {
                Size = baseSize, Documents = new List <object>()
            };

            foreach (var document in Documents)
            {
                var documentSize = writer.CalculateSize(document);

                if (documentSize + baseSize >= MaximumMessageSize)
                {
                    throw new MongoException("Document is too big to fit in a message.");
                }

                if (documentSize + chunk.Size > MaximumMessageSize)
                {
                    _chunks.Add(chunk);
                    chunk = new MessageChunk {
                        Size = baseSize, Documents = new List <object>()
                    };
                }

                chunk.Documents.Add(document);
                chunk.Size += documentSize;
            }

            _chunks.Add(chunk);
        }
        public void JoinIfComplete_AllChunks_Joined()
        {
            var originalMessage = new BinaryMessage
            {
                MessageId = Guid.NewGuid(),
                Content   = GetByteArray(500)
            };

            var originalSerializedMessage = _serializer.Serialize(originalMessage);

            var chunks = new MessageChunk[3];

            chunks[0] = new MessageChunk
            {
                MessageId         = Guid.NewGuid(),
                ChunkId           = 0,
                ChunksCount       = 3,
                OriginalMessageId = originalMessage.MessageId.ToString(),
                Content           = originalSerializedMessage.AsMemory().Slice(0, 300).ToArray()
            };
            chunks[1] = new MessageChunk
            {
                MessageId         = Guid.NewGuid(),
                ChunkId           = 1,
                ChunksCount       = 3,
                OriginalMessageId = originalMessage.MessageId.ToString(),
                Content           = originalSerializedMessage.AsMemory().Slice(300, 300).ToArray()
            };
            chunks[2] = new MessageChunk
            {
                MessageId         = Guid.NewGuid(),
                ChunkId           = 2,
                ChunksCount       = 3,
                OriginalMessageId = originalMessage.MessageId.ToString(),
                Content           = originalSerializedMessage.AsMemory().Slice(600).ToArray()
            };

            var result = new ChunkConsumer(_store).JoinIfComplete(chunks[0]);

            result.Should().BeNull();
            result = new ChunkConsumer(_store).JoinIfComplete(chunks[1]);
            result.Should().BeNull();
            result = new ChunkConsumer(_store).JoinIfComplete(chunks[2]);
            result.Should().NotBeNull();

            var deserializedResult = (BinaryMessage)_serializer.Deserialize(result);

            deserializedResult.Content.Should().BeEquivalentTo(originalMessage.Content);
        }
        /// <summary>
        ///   Writes out a header and the chunk of documents.
        /// </summary>
        /// <param name = "stream"></param>
        /// <param name = "chunk"></param>
        protected void WriteChunk(Stream stream, MessageChunk chunk)
        {
            WriteHeader(new BinaryWriter(stream), chunk.Size);

            var writer = new BsonWriter(stream, _bsonWriterSettings);

            writer.WriteValue(BsonType.Integer, 0);
            writer.Write(FullCollectionName, false);

            foreach (var document in chunk.Documents)
            {
                writer.WriteObject(document);
            }

            writer.Flush();
        }
        /// <summary>
        /// Breaks down an insert message that may be too large into managable sizes.  
        /// When inserting only one document there will be only one chunk.  However chances
        /// are that when inserting thousands of documents at once there will be many.
        /// </summary>
        protected void ChunkMessage(BsonWriter writer)
        {
            int baseSize = CalculateBaseSize(writer);

            MessageChunk chunk = new MessageChunk(){Size = baseSize, Documents = new List<Document>()};
            foreach(Document doc in this.Documents){
                int docSize = writer.CalculateSize(doc);
                if(docSize + baseSize >= MessageBase.MaximumMessageSize) throw new MongoException("Document is too big to fit in a message.");

                if(docSize + chunk.Size > MessageBase.MaximumMessageSize){
                    chunks.Add(chunk);
                    chunk = new MessageChunk(){Size = baseSize, Documents = new List<Document>()};
                }
                chunk.Documents.Add(doc);
                chunk.Size += docSize;
            }
            chunks.Add(chunk);
        }
        /// <summary>
        ///   Breaks down an insert message that may be too large into managable sizes.  
        ///   When inserting only one document there will be only one chunk.  However chances
        ///   are that when inserting thousands of documents at once there will be many.
        /// </summary>
        protected void ChunkMessage(BsonWriter writer){
            var baseSize = CalculateBaseSize(writer);

            var chunk = new MessageChunk{Size = baseSize, Documents = new List<object>()};
            
            foreach(var document in Documents){
                var documentSize = writer.CalculateSize(document);
                
                if(documentSize + baseSize >= MaximumMessageSize)
                    throw new MongoException("Document is too big to fit in a message.");

                if(documentSize + chunk.Size > MaximumMessageSize){
                    _chunks.Add(chunk);
                    chunk = new MessageChunk{Size = baseSize, Documents = new List<object>()};
                }
                
                chunk.Documents.Add(document);
                chunk.Size += documentSize;
            }

            _chunks.Add(chunk);
        }
Example #7
0
        private void EnsureMessageFetched()
        {
            var maxBytes    = _config.FetchBytes;
            var maxWaitTime = _config.FetchMilliseconds;

            if (_messages == null)
            {
                maxWaitTime = _config.FetchMilliseconds / 2;
            }
            else if (_messages.Count == 0 || _messages.Position == _messages.Count - 1)
            {
                maxWaitTime = _config.FetchMilliseconds * 2;
            }
            else
            {
                return;
            }

            if (_coordinator.State != CoordinatorState.Stable)
            {
                Trace.TraceWarning("{0:HH:mm:ss.fff} [{1:d2}] #6 Rebalance {2}, fetch interrupted",
                                   DateTime.Now, Thread.CurrentThread.ManagedThreadId, _coordinator.State);
                BlockForRebalace(_config.RebalaceBlockMilliseconds);
            }
            if (_coordinator.State != CoordinatorState.Stable)
            {
                throw new InvalidOperationException("Load balancing is still not complete");
            }

            var partition = _partitionDispatcher.SelectParition();
            var offset    = _offsets.GetCurrentOffset(partition);

            Trace.TraceInformation("{0:HH:mm:ss.fff} [{1:d2}] Fetch group '{2}', topic '{3}'[{4}], offset {5}",
                                   DateTime.Now, Thread.CurrentThread.ManagedThreadId, GroupId, Topic, partition, offset);
            var messages = _client.Fetch(Topic, partition, offset, maxBytes: maxBytes, maxWaitTime: maxWaitTime).ToArray();

            _messages = new MessageChunk(messages, partition);
        }
        /// <summary>
        /// Writes out a header and the chunk of documents.
        /// </summary>
        /// <param name="stream"></param>
        /// <param name="chunk"></param>
        protected void WriteChunk(Stream stream, MessageChunk chunk)
        {
            WriteHeader(new BinaryWriter(stream), chunk.Size);

            BsonWriter writer = new BsonWriter(stream);
            writer.WriteValue(BsonDataType.Integer,0);
            writer.WriteString(this.FullCollectionName);

            foreach(Document doc in chunk.Documents){
                writer.Write(doc);
            }
            writer.Flush();
        }
        /// <summary>
        ///   Writes out a header and the chunk of documents.
        /// </summary>
        /// <param name = "stream"></param>
        /// <param name = "chunk"></param>
        protected void WriteChunk(Stream stream, MessageChunk chunk){
            WriteHeader(new BinaryWriter(stream), chunk.Size);

            var writer = new BsonWriter(stream, _bsonWriterSettings);
            writer.WriteValue(BsonType.Integer, 0);
            writer.Write(FullCollectionName, false);

            foreach(var document in chunk.Documents)
                writer.WriteObject(document);

            writer.Flush();
        }