public override byte[] Serialize(Change <V> data, SerializationContext context) { byte[] oldValueBytes = innerSerdes.Serialize(data.OldValue, context); byte[] newValueBytes = innerSerdes.Serialize(data.NewValue, context); return(ByteBuffer .Build(CalculateCapacity(oldValueBytes, newValueBytes)) .PutInt(oldValueBytes?.Length ?? 0) .Put(oldValueBytes) .PutInt(newValueBytes?.Length ?? 0) .Put(newValueBytes) .ToArray()); }
public void Send <K, V>(string topic, K key, V value, Headers headers, long timestamp, ISerDes <K> keySerializer, ISerDes <V> valueSerializer) { var k = key != null?keySerializer.Serialize(key, new SerializationContext(MessageComponentType.Key, topic, headers)) : null; var v = value != null?valueSerializer.Serialize(value, new SerializationContext(MessageComponentType.Value, topic, headers)) : null; producer?.Produce( topic, new Message <byte[], byte[]> { Key = k, Value = v }); // NOT USED FOR MOMENT //producer?.Produce( // topic, // new Message<byte[], byte[]> { Key = k, Value = v }, // (report) => { // if (report.Error.Code == ErrorCode.NoError && report.Status == PersistenceStatus.Persisted) // { // if (offsets.ContainsKey(report.TopicPartition) && offsets[report.TopicPartition] <= report.Offset) // offsets[report.TopicPartition] = report.Offset; // else // offsets.Add(report.TopicPartition, report.Offset); // } // }); }
public void Send <K, V>(string topic, K key, V value, Headers headers, long timestamp, ISerDes <K> keySerializer, ISerDes <V> valueSerializer) { var k = key != null?keySerializer.Serialize(key) : null; var v = value != null?valueSerializer.Serialize(value) : null; producer?.Produce( topic, new Message <byte[], byte[]> { Key = k, Value = v }, (report) => { if (report.Error.Code == ErrorCode.NoError && report.Status == PersistenceStatus.Persisted) { if (offsets.ContainsKey(report.TopicPartition) && offsets[report.TopicPartition] < report.Offset) { offsets[report.TopicPartition] = report.Offset; } else { offsets.Add(report.TopicPartition, report.Offset); } } }); }
private (byte[], byte[]) GetBytes(K key, V value) { byte[] k = keySerdes != null?keySerdes.Serialize(key) : configuration.DefaultKeySerDes.SerializeObject(key); byte[] v = valueSerdes != null?valueSerdes.Serialize(value) : configuration.DefaultValueSerDes.SerializeObject(value); return(k, v); }
public static byte[] ToBinary <K>(Windowed <K> timeKey, ISerDes <K> serializer, String topic) { byte[] bytes = serializer.Serialize(timeKey.Key, new Confluent.Kafka.SerializationContext(Confluent.Kafka.MessageComponentType.Key, topic)); ByteBuffer buf = ByteBuffer.Build(bytes.Length + TIMESTAMP_SIZE); buf.Put(bytes); buf.PutLong(timeKey.Window.StartMs); return(buf.ToArray()); }
private Bytes GetKeyBytes(K key) { if (keySerdes != null) { return(new Bytes(keySerdes.Serialize(key, GetSerializationContext(true)))); } else { throw new StreamsException($"The serializer is not compatible to the actual key (Key type: {typeof(K).FullName}). Change the default Serdes in StreamConfig or provide correct Serdes via method parameters(using the DSL)"); } }
private byte[] GetValueBytes(V value) { if (valueSerdes != null) { return(valueSerdes.Serialize(value, GetSerializationContext(false))); } else { throw new StreamsException($"The serializer is not compatible to the actual value (Value type: {typeof(V).FullName}). Change the default Serdes in StreamConfig or provide correct Serdes via method parameters(using the DSL)"); } }
public void Publish <TMessage>(TMessage message) where TMessage : class { var body = serdes.Serialize(message); var routingKey = message.GetType().FullName; channel.BasicPublish(exchange: EXCHANGE_NAME, routingKey: routingKey, basicProperties: null, body: body); }
/// <summary> /// Serialize an <see cref="Windowed{K}"/> instance to byte array /// </summary> /// <param name="data">Instance to serialize</param> /// <param name="context">serialization context</param> /// <returns>Return an array of byte</returns> public override byte[] Serialize(Windowed <T> data, SerializationContext context) { if (data == null) { return(null); } var bytesKey = innerSerdes.Serialize(data.Key, context); var bytes = WindowKeyHelper.ToStoreKeyBinary(bytesKey, data.Window.StartMs, 0); return(bytes.Get); }
public async Task AppendEventsToStreamAsync(string stream, IEnumerable <IEvent> events, int?expectedVersion, CancellationToken cancellationToken = default) { var stopWatch = new Stopwatch(); stopWatch.Start(); var adaptedEvents = new List <NewStreamMessage>(); foreach (var e in events) { var metadata = _serDes.Serialize(new EventMetadata(e.GetType(), Correlation.CorrelationManager.GetCorrelationId())); var data = _serDes.Serialize(e); var ae = new NewStreamMessage(e.EventId, GetFullTypeName(e.GetType()), data, metadata); adaptedEvents.Add(ae); } await _streamStore.AppendToStream(stream, expectedVersion ?? ExpectedVersion.Any, adaptedEvents.ToArray(), cancellationToken); stopWatch.Stop(); _logger.LogDebug("SqlStreamStore.AppendEventsToStreamAsync for {Stream} took {ElapsedMilliseconds} ms", stream, stopWatch.ElapsedMilliseconds); }
public void Publish <TMessage>(TMessage message) where TMessage : class { var body = serdes.Serialize(message); var properties = channel.CreateBasicProperties(); properties.Persistent = true; channel.BasicPublish(exchange: "", routingKey: QUEUE_NAME, basicProperties: properties, body: body); }
private byte[] GetKeyBytes(K key) { if (key != null) { if (keySerdes != null) { return(keySerdes.Serialize(key, new Confluent.Kafka.SerializationContext(Confluent.Kafka.MessageComponentType.Key, pipe.TopicName))); } return(configuration.DefaultKeySerDes.SerializeObject(key, new Confluent.Kafka.SerializationContext(Confluent.Kafka.MessageComponentType.Key, pipe.TopicName))); } return(null); }
private byte[] GetValueBytes(V value) { if (value != null) { if (valueSerdes != null) { return(valueSerdes.Serialize(value, new Confluent.Kafka.SerializationContext(Confluent.Kafka.MessageComponentType.Value, pipe.TopicName))); } return(configuration.DefaultValueSerDes.SerializeObject(value, new Confluent.Kafka.SerializationContext(Confluent.Kafka.MessageComponentType.Value, pipe.TopicName))); } return(null); }
public async Task AppendEventsToStreamAsync(string stream, IEnumerable <IEvent> events, int?expectedVersion, CancellationToken cancellationToken = default) { var stopWatch = new Stopwatch(); stopWatch.Start(); var gregsEvents = new List <EventData>(); foreach (var e in events) { var metadata = _serDes.Serialize(new EventMetadata(e.GetType(), CorrelationManager.GetCorrelationId())); var data = _serDes.Serialize(e); var ge = new EventData(e.EventId, GetFullTypeName(e.GetType()), true, data, metadata); gregsEvents.Add(ge); } using (var connection = await GetConnectionAsync()) { await connection.AppendToStreamAsync(stream, expectedVersion ?? ExpectedVersion.Any, gregsEvents.ToArray()); } stopWatch.Stop(); _logger.LogDebug("GetEventStoreClient.AppendEventsToStreamAsync for {Stream} took {ElapsedMilliseconds} ms", stream, stopWatch.ElapsedMilliseconds); }
/// <summary> /// Serialize an <see cref="Windowed{K}"/> instance to byte array /// </summary> /// <param name="data">Instance to serialize</param> /// <param name="context">serialization context</param> /// <returns>Return an array of byte</returns> public override byte[] Serialize(Windowed <T> data, SerializationContext context) { if (data == null) { return(null); } using var mStream = new MemoryStream(); using (var bufferStream = new BufferedStream(mStream)) { bufferStream.Write(innerSerdes.Serialize(data.Key, context)); bufferStream.Write(BitConverter.GetBytes(data.Window.StartMs)); } return(mStream.ToArray()); }
private byte[] GetKeyBytes(string topic, K key) { if (key != null) { if (keySerdes != null) { return(keySerdes.Serialize(key, new Confluent.Kafka.SerializationContext(Confluent.Kafka.MessageComponentType.Key, topic))); } else { return(configuration.DefaultKeySerDes.SerializeObject(key, new Confluent.Kafka.SerializationContext(Confluent.Kafka.MessageComponentType.Key, topic))); } } else { return(null); } }
private byte[] GetValueBytes(V value) { if (value != null) { if (valueSerdes != null) { return(valueSerdes.Serialize(value)); } else { return(configuration.DefaultValueSerDes.SerializeObject(value)); } } else { return(null); } }
private byte[] GetKeyBytes(K key) { if (key != null) { if (keySerdes != null) { return(keySerdes.Serialize(key)); } else { return(configuration.DefaultKeySerDes.SerializeObject(key)); } } else { return(null); } }
private byte[] GetValueBytes(string topic, V value) { if (value != null) { if (valueSerdes != null) { return(valueSerdes.Serialize(value, new Confluent.Kafka.SerializationContext(Confluent.Kafka.MessageComponentType.Value, topic))); } else { return(configuration.DefaultValueSerDes.SerializeObject(value, new Confluent.Kafka.SerializationContext(Confluent.Kafka.MessageComponentType.Value, topic))); } } else { return(null); } }
public static Bytes ToStoreKeyBinary <K>(Windowed <K> timeKey, int seqnum, ISerDes <K> keySerdes) { byte[] serializedKey = keySerdes.Serialize(timeKey.Key, new Confluent.Kafka.SerializationContext()); return(ToStoreKeyBinary(serializedKey, timeKey.Window.StartMs, seqnum)); }
public static Bytes ToStoreKeyBinary <K>(K key, long timestamp, int seqnum, ISerDes <K> keySerdes) { byte[] serializedKey = keySerdes.Serialize(key, new Confluent.Kafka.SerializationContext()); return(ToStoreKeyBinary(serializedKey, timestamp, seqnum)); }
public void Send <K, V>(string topic, K key, V value, Headers headers, long timestamp, ISerDes <K> keySerializer, ISerDes <V> valueSerializer) { var k = key != null?keySerializer.Serialize(key, new SerializationContext(MessageComponentType.Key, topic, headers)) : null; var v = value != null?valueSerializer.Serialize(value, new SerializationContext(MessageComponentType.Value, topic, headers)) : null; try { producer?.Produce( topic, new Message <byte[], byte[]> { Key = k, Value = v }, (report) => { if (report.Error.IsError) { StringBuilder sb = new StringBuilder(); sb.AppendLine($"{logPrefix}Error encountered sending record to topic {topic} for task {id} due to:"); sb.AppendLine($"{logPrefix}Error Code : {report.Error.Code.ToString()}"); sb.AppendLine($"{logPrefix}Message : {report.Error.Reason}"); if (IsFatalError(report)) { sb.AppendLine($"{logPrefix}Written offsets would not be recorded and no more records would be sent since this is a fatal error."); log.Error(sb.ToString()); throw new StreamsException(sb.ToString()); } else if (IsRecoverableError(report)) { sb.AppendLine($"{logPrefix}Written offsets would not be recorded and no more records would be sent since the producer is fenced, indicating the task may be migrated out"); log.Error(sb.ToString()); throw new TaskMigratedException(sb.ToString()); } else { if (configuration.ProductionExceptionHandler(report) == ExceptionHandlerResponse.FAIL) { sb.AppendLine($"{logPrefix}Exception handler choose to FAIL the processing, no more records would be sent."); log.Error(sb.ToString()); throw new ProductionException(sb.ToString()); } else { sb.AppendLine($"{logPrefix}Exception handler choose to CONTINUE processing in spite of this error but written offsets would not be recorded."); log.Error(sb.ToString()); } } } else if (report.Status == PersistenceStatus.NotPersisted || report.Status == PersistenceStatus.PossiblyPersisted) { log.Warn($"{logPrefix}Record not persisted or possibly persisted: (timestamp {report.Message.Timestamp.UnixTimestampMs}) topic=[{topic}] partition=[{report.Partition}] offset=[{report.Offset}]. May config Retry configuration, depends your use case."); } else if (report.Status == PersistenceStatus.Persisted) { log.Debug($"{logPrefix}Record persisted: (timestamp {report.Message.Timestamp.UnixTimestampMs}) topic=[{topic}] partition=[{report.Partition}] offset=[{report.Offset}]"); } }); }catch (ProduceException <byte[], byte[]> produceException) { if (IsRecoverableError(produceException.Error)) { throw new TaskMigratedException($"Producer got fenced trying to send a record [{logPrefix}] : {produceException.Message}"); } else { throw new StreamsException($"Error encountered trying to send record to topic {topic} [{logPrefix}] : {produceException.Message}"); } } }