public void WriteTo(MemoryStream output) { using (var writer = new KafkaBinaryWriter(output)) { WriteTo(writer); } }
public override byte[] Serialize() { using (var ms = new MemoryStream(this.Size)) { using (var writer = new KafkaBinaryWriter(ms)) { writer.Write(ms.Capacity - 4); writer.Write((short)this.ApiKey); writer.Write(ApiVersion); writer.Write(CorrelationId); writer.WriteShortString(ClientId); writer.WriteShortString(ConsumerGroupId); writer.Write(ConsumerGroupGenerationId); writer.WriteShortString(ConsumerId); writer.Write(RetentionTime); writer.Write(RequestInfo.Count); foreach (var kv in RequestInfo) { writer.WriteShortString(kv.Key); writer.Write(kv.Value.Count); foreach (var info in kv.Value) { writer.Write(info.PartitionId); writer.Write(info.Offset); writer.WriteShortString(info.Metadata); } } return(ms.GetBuffer()); } } }
public override byte[] Serialize() { using (var ms = new MemoryStream(this.Size)) { using (var writer = new KafkaBinaryWriter(ms)) { writer.Write(ms.Capacity - 4); writer.Write((short)this.ApiKey); writer.Write(ApiVersion); writer.Write(CorrelationId); writer.WriteShortString(ClientId); writer.WriteShortString(this.GroupId); writer.Write(this.GenerationId); writer.WriteShortString(this.MemberId); writer.Write(this.GroupAssignmentInfos.Count); for (int i = 0; i < this.GroupAssignmentInfos.Count; i++) { writer.WriteShortString(this.GroupAssignmentInfos[i].MemberId); if (this.GroupAssignmentInfos[i].MemberAssignment != null) { writer.Write(this.GroupAssignmentInfos[i].MemberAssignment.Length); writer.Write(this.GroupAssignmentInfos[i].MemberAssignment); } } return(ms.GetBuffer()); } } }
public override byte[] Serialize() { using (var ms = new MemoryStream(this.Size)) using (var writer = new KafkaBinaryWriter(ms)) { writer.Write(ms.Capacity - DefaultRequestSizeSize); writer.Write((short)this.ApiKey); writer.Write(ApiVersion); writer.Write(CorrelationId); writer.WriteShortString(ClientId); writer.Write(ReplicaId); writer.Write(RequestInfo.Count); foreach (var kv in RequestInfo) { writer.WriteShortString(kv.Key); writer.Write(kv.Value.Count); foreach (var info in kv.Value) { info.WriteTo(writer); } } return(ms.GetBuffer()); } }
public void WriteTo(KafkaBinaryWriter writer) { //Guard.NotNull(writer, "writer"); // if leader exists writer.Write(this.PartitionId); if (this.Leader != null) { writer.Write((byte)1); this.Leader.WriteTo(writer); } else { writer.Write((byte)0); } // number of replicas writer.Write((short)Replicas.Count()); foreach (var replica in Replicas) { replica.WriteTo(writer); } // number of in-sync replicas writer.Write((short)this.Isr.Count()); foreach (var isr in Isr) { isr.WriteTo(writer); } writer.Write((byte)0); }
public override byte[] Serialize() { using (var ms = new MemoryStream(this.Size)) { using (var writer = new KafkaBinaryWriter(ms)) { writer.Write(ms.Capacity - DefaultRequestSizeSize); writer.Write((short)this.ApiKey); writer.Write(this.ApiVersion); writer.Write(this.CorrelationId); writer.WriteShortString(this.ClientId); writer.Write(this.ReplicaId); writer.Write(this.MaxWait); writer.Write(this.MinBytes); writer.Write(this.OffsetInfo.Count); foreach (var offsetInfo in this.OffsetInfo) { writer.WriteShortString(offsetInfo.Key); writer.Write(offsetInfo.Value.Count); foreach (var v in offsetInfo.Value) { v.WriteTo(writer); } } return(ms.GetBuffer()); } } }
public void WriteTo(KafkaBinaryWriter writer) { //Guard.NotNull(writer, "writer"); writer.Write(this.Id); writer.WriteShortString(this.Host, KafkaRequest.DefaultEncoding); writer.Write(this.Port); }
public byte[] Serialize() { byte[] bytes; // serialize this object using (var ms = new MemoryStream()) { var writer = new KafkaBinaryWriter(ms); writer.Write(this.Version); if (this.PartitionAssignmentInfos != null && this.PartitionAssignmentInfos.Length > 0) { writer.Write(this.PartitionAssignmentInfos.Length); foreach (var pi in this.PartitionAssignmentInfos) { if (string.IsNullOrEmpty(pi.Topic)) { writer.Write((short)0); } else { writer.Write((short)pi.Topic.Length); writer.Write(Encoding.UTF8.GetBytes(pi.Topic)); } if (pi.Partitions == null || pi.Partitions.Length == 0) { writer.Write(0); } else { writer.Write(pi.Partitions.Length); foreach (var ppi in pi.Partitions) { writer.Write(ppi); } } } } else { writer.Write(0); } if (this.UserData != null && this.UserData.Length > 0) { writer.Write(this.UserData.Length); writer.Write(this.UserData); } else { writer.Write(0); } bytes = ms.ToArray(); } return(bytes); }
public void WriteTo(System.IO.MemoryStream output) { //Guard.NotNull(output, "output"); using (var writer = new KafkaBinaryWriter(output)) { this.WriteTo(writer); } }
public void WriteTo(KafkaBinaryWriter writer) { //Guard.NotNull(writer, "writer"); writer.WriteShortString(this.Topic, KafkaRequest.DefaultEncoding); writer.Write(this.PartitionsMetadata.Count()); foreach (var partitionMetadata in PartitionsMetadata) { partitionMetadata.WriteTo(writer); } }
public override byte[] Serialize() { using (var ms = new MemoryStream(this.Size)) using (var writer = new KafkaBinaryWriter(ms)) { writer.Write(ms.Capacity - 4); writer.Write((short)this.ApiKey); writer.Write(ApiVersion); writer.Write(CorrelationId); writer.WriteShortString(ClientId); writer.WriteShortString(this.GroupId); return(ms.GetBuffer()); } }
public override byte[] Serialize() { using (var ms = new MemoryStream(this.Size)) { using (var writer = new KafkaBinaryWriter(ms)) { writer.Write(ms.Capacity - DefaultRequestSizeSize); writer.Write((short)this.ApiKey); writer.Write(this.ApiVersion); writer.Write(this.CorrelationId); writer.WriteShortString(this.ClientId, DefaultEncoding); writer.Write(this.Topics.Count()); foreach (var topic in Topics) { writer.WriteShortString(topic, DefaultEncoding); } return(ms.GetBuffer()); } } }
public override byte[] Serialize() { using (var ms = new MemoryStream(this.Size)) using (var writer = new KafkaBinaryWriter(ms)) { writer.Write(ms.Capacity - 4); writer.Write((short)this.ApiKey); writer.Write(ApiVersion); writer.Write(CorrelationId); writer.WriteShortString(ClientId); writer.WriteShortString(this.ConsumerGroup); if (this.OffsetFetchRequestInfo != null) { writer.Write(OffsetFetchRequestInfo.Count); foreach (var info in this.OffsetFetchRequestInfo) { writer.WriteShortString(info.Key); if (info.Value == null) { writer.Write(0); } else { writer.Write(info.Value.Count); foreach (var p in info.Value) { writer.Write(p); } } } } else { writer.Write(0); } return(ms.GetBuffer()); } }
public byte[] Serialize() { byte[] bytes; using (var ms = new MemoryStream()) { var writer = new KafkaBinaryWriter(ms); writer.Write(this.Version); if (this.Topics == null || this.Topics.Count == 0) { writer.Write(0); } else { writer.Write(this.Topics.Count); for (int i = 0; i < this.Topics.Count; i++) { var topicbytes = Encoding.UTF8.GetBytes(this.Topics[i]); writer.Write((short)topicbytes.Length); writer.Write(topicbytes); } } if (this.UserData == null || this.UserData.Length == 0) { writer.Write(0); } else { writer.Write(this.UserData.Length); writer.Write(this.UserData); } bytes = ms.ToArray(); } return(bytes); }
public override byte[] Serialize() { using (var ms = new MemoryStream(this.Size)) { using (var writer = new KafkaBinaryWriter(ms)) { writer.Write(ms.Capacity - 4); writer.Write((short)this.ApiKey); writer.Write(ApiVersion); writer.Write(CorrelationId); writer.WriteShortString(ClientId); writer.WriteShortString(this.GroupId); writer.Write(this.SessionTimeout); /* * // in version 1 * writer.Write(this.ReblanceTimeout); */ writer.WriteShortString(this.MemberId); writer.WriteShortString(this.ProtocolType); writer.Write(this.GroupProtocols.Count); for (int i = 0; i < this.GroupProtocols.Count; i++) { writer.WriteShortString(this.GroupProtocols[i].ProtocolName); writer.Write(this.GroupProtocols[i].ProtocolMetadata.Length); writer.Write(this.GroupProtocols[i].ProtocolMetadata); } return(ms.GetBuffer()); } } }
public void WriteTo(KafkaBinaryWriter writer) { writer.Write(PartitionId); writer.Write(Offset); writer.Write(FetchSize); }
public void WriteTo(KafkaBinaryWriter writer) { writer.Write(PartitionId); writer.Write(Time); writer.Write(MaxNumOffsets); }