예제 #1
0
 public KafkaAvroSerializer(string schemaRegistryUrl, TokenCredential credential, string schemaGroup, Boolean autoRegisterSchemas = false)
 {
     this.serializer = new SchemaRegistryAvroObjectSerializer(
         new SchemaRegistryClient(
             schemaRegistryUrl,
             credential),
         schemaGroup,
         new SchemaRegistryAvroObjectSerializerOptions()
     {
         AutoRegisterSchemas = autoRegisterSchemas
     });
 }
        public async Task CannotDeserializeUnsupportedType()
        {
            var client    = CreateClient();
            var groupName = TestEnvironment.SchemaRegistryGroup;

            using var memoryStream = new MemoryStream();
            var serializer = new SchemaRegistryAvroObjectSerializer(client, groupName, new SchemaRegistryAvroObjectSerializerOptions {
                AutoRegisterSchemas = true
            });

            Assert.ThrowsAsync <ArgumentException>(async() => await serializer.DeserializeAsync(memoryStream, typeof(TimeZoneInfo), CancellationToken.None));
            await Task.CompletedTask;
        }
        public void Deserialize()
        {
            var client = new SchemaRegistryClient(TestEnvironment.SchemaRegistryUri, TestEnvironment.Credential);

            using var memoryStream = new MemoryStream();

            #region Snippet:Deserialize
            string groupName = "<schema_group_name>";

            var serializer = new SchemaRegistryAvroObjectSerializer(client, groupName, new SchemaRegistryAvroObjectSerializerOptions {
                AutoRegisterSchemas = true
            });
            memoryStream.Position = 0;
            Employee employee = (Employee)serializer.Deserialize(memoryStream, typeof(Employee), CancellationToken.None);
            #endregion
        }
예제 #4
0
        public void Deserialize()
        {
            using var memoryStream = new MemoryStream(_memoryStreamBytes);
            string groupName = TestEnvironment.SchemaRegistryGroup;

            #region Snippet:SchemaRegistryAvroDeserialize
            var serializer = new SchemaRegistryAvroObjectSerializer(schemaRegistryClient, groupName, new SchemaRegistryAvroObjectSerializerOptions {
                AutoRegisterSchemas = true
            });
            memoryStream.Position = 0;
            Employee employee = (Employee)serializer.Deserialize(memoryStream, typeof(Employee), CancellationToken.None);
            #endregion

            Assert.AreEqual(42, employee.Age);
            Assert.AreEqual("John Doe", employee.Name);
        }
        public void Serialize()
        {
            var client = new SchemaRegistryClient(TestEnvironment.SchemaRegistryUri, TestEnvironment.Credential);

            #region Snippet:Serialize
            var employee = new Employee {
                Age = 42, Name = "John Doe"
            };
            string groupName = "<schema_group_name>";

            using var memoryStream = new MemoryStream();
            var serializer = new SchemaRegistryAvroObjectSerializer(client, groupName, new SchemaRegistryAvroObjectSerializerOptions {
                AutoRegisterSchemas = true
            });
            serializer.Serialize(memoryStream, employee, typeof(Employee), CancellationToken.None);
            #endregion
        }
예제 #6
0
        public void Serialize()
        {
            string groupName = TestEnvironment.SchemaRegistryGroup;

            #region Snippet:SchemaRegistryAvroSerialize
            var employee = new Employee {
                Age = 42, Name = "John Doe"
            };

            using var memoryStream = new MemoryStream();
            var serializer = new SchemaRegistryAvroObjectSerializer(schemaRegistryClient, groupName, new SchemaRegistryAvroObjectSerializerOptions {
                AutoRegisterSchemas = true
            });
            serializer.Serialize(memoryStream, employee, typeof(Employee), CancellationToken.None);
            #endregion

            Assert.IsTrue(memoryStream.Length > 0);
            _memoryStreamBytes = memoryStream.ToArray();
        }
        public async Task CanSerializeAndDeserialize()
        {
            var client    = CreateClient();
            var groupName = TestEnvironment.SchemaRegistryGroup;
            var employee  = new Employee {
                Age = 42, Name = "Caketown"
            };

            using var memoryStream = new MemoryStream();
            var serializer = new SchemaRegistryAvroObjectSerializer(client, groupName, new SchemaRegistryAvroObjectSerializerOptions {
                AutoRegisterSchemas = true
            });
            await serializer.SerializeAsync(memoryStream, employee, typeof(Employee), CancellationToken.None);

            memoryStream.Position = 0;
            var deserializedObject = await serializer.DeserializeAsync(memoryStream, typeof(Employee), CancellationToken.None);

            var readEmployee = deserializedObject as Employee;

            Assert.IsNotNull(readEmployee);
            Assert.AreEqual("Caketown", readEmployee.Name);
            Assert.AreEqual(42, readEmployee.Age);
        }
        public async Task CanSerializeAndDeserializeGenericRecord()
        {
            var client    = CreateClient();
            var groupName = TestEnvironment.SchemaRegistryGroup;
            var record    = new GenericRecord((RecordSchema)Employee._SCHEMA);

            record.Add("Name", "Caketown");
            record.Add("Age", 42);

            using var memoryStream = new MemoryStream();
            var serializer = new SchemaRegistryAvroObjectSerializer(client, groupName, new SchemaRegistryAvroObjectSerializerOptions {
                AutoRegisterSchemas = true
            });
            await serializer.SerializeAsync(memoryStream, record, typeof(GenericRecord), CancellationToken.None);

            memoryStream.Position = 0;
            var deserializedObject = await serializer.DeserializeAsync(memoryStream, typeof(GenericRecord), CancellationToken.None);

            var readRecord = deserializedObject as GenericRecord;

            Assert.IsNotNull(readRecord);
            Assert.AreEqual("Caketown", readRecord.GetValue(0));
            Assert.AreEqual(42, readRecord.GetValue(1));
        }
        /// <summary>
        ///   Runs the sample using the specified Event Hubs connection information.
        /// </summary>
        ///
        /// <param name="fullyQualifiedNamespace">The fully qualified Event Hubs namespace.  This is likely to be similar to <c>{yournamespace}.servicebus.windows.net</c>.</param>
        /// <param name="eventHubName">The name of the Event Hub, sometimes known as its path, that the sample should run against.</param>
        /// <param name="schemaGroupName">The name of the schema group in the Schema Registry.</param>
        /// <param name="tenantId">The Azure Active Directory tenant that holds the service principal.</param>
        /// <param name="clientId">The Azure Active Directory client identifier of the service principal.</param>
        /// <param name="secret">The Azure Active Directory secret of the service principal.</param>
        ///
        public async Task RunAsync(string fullyQualifiedNamespace,
                                   string eventHubName,
                                   string schemaGroupName,
                                   string tenantId,
                                   string clientId,
                                   string secret)
        {
            // Service principal authentication is a means for applications to authenticate against Azure Active
            // Directory and consume Azure services. This is advantageous compared to using a connection string for
            // authorization, as it offers a far more robust mechanism for transparently updating credentials in place,
            // without an application being explicitly aware or involved.
            //
            // For this example, we'll take advantage of a service principal to publish and receive events.  To do so, we'll make
            // use of the ClientSecretCredential from the Azure.Identity library to enable the Event Hubs clients to perform authorization
            // using a service principal.

            ClientSecretCredential credential = new ClientSecretCredential(tenantId, clientId, secret);

            // This client is used to connect to Azure Schema Registry. This will allow us to serialize Avro messages without encoding
            // the schema into the message. Instead, the schema ID from the Azure Schema Registry will be used.

            SchemaRegistryClient schemaRegistryClient = new SchemaRegistryClient(fullyQualifiedNamespace, credential);

            // Creating the Avro serializer requires the SchemaRegistryClient. Setting AutoRegisterSchemas to true allows any schema
            // provided to Azure Schema Registry to be added to the registry automatically.

            SchemaRegistryAvroObjectSerializer avroSerializer = new SchemaRegistryAvroObjectSerializer(schemaRegistryClient, schemaGroupName,
                                                                                                       new SchemaRegistryAvroObjectSerializerOptions {
                AutoRegisterSchemas = true
            });

            // This defines the Message schema a single time to be used for all of our generic records.

            RecordSchema exampleSchema = (RecordSchema)Schema.Parse(
                "{\"type\":\"record\",\"name\":\"Message\",\"namespace\":\"ExampleSchema\",\"fields\":[{\"name\":\"Message\",\"type\":\"string\"}]}");

            // To start, we'll publish a small number of events using a producer client.  To ensure that our client is appropriately closed, we'll
            // take advantage of the asynchronous dispose when we are done or when an exception is encountered.

            await using (var producerClient = new EventHubProducerClient(fullyQualifiedNamespace, eventHubName, credential))
            {
                using EventDataBatch eventBatch = await producerClient.CreateBatchAsync();

                GenericRecord firstEventRecord = new GenericRecord(exampleSchema);
                firstEventRecord.Add("Message", "Hello, Event Hubs!");
                EventData firstEvent = new EventData(avroSerializer.SerializeToBinaryData(firstEventRecord, typeof(GenericRecord)));
                eventBatch.TryAdd(firstEvent);

                GenericRecord secondEventRecord = new GenericRecord(exampleSchema);
                secondEventRecord.Add("Message", "The middle event is this one");
                EventData secondEvent = new EventData(avroSerializer.SerializeToBinaryData(secondEventRecord, typeof(GenericRecord)));
                eventBatch.TryAdd(secondEvent);

                GenericRecord thirdEventRecord = new GenericRecord(exampleSchema);
                thirdEventRecord.Add("Message", "Goodbye, Event Hubs!");
                EventData thirdEvent = new EventData(avroSerializer.SerializeToBinaryData(thirdEventRecord, typeof(GenericRecord)));
                eventBatch.TryAdd(thirdEvent);

                await producerClient.SendAsync(eventBatch);

                Console.WriteLine("The event batch has been published.");
            }

            // Now that the events have been published, we'll read back all events from the Event Hub using a consumer client.
            // It's important to note that because events are not removed from the partition when consuming, that if you're using
            // an existing Event Hub for the sample, you will see events that were published prior to running this sample as well
            // as those from the batch that we just sent.
            //
            // An Event Hub consumer is associated with a specific Event Hub and consumer group.  The consumer group is
            // a label that identifies one or more consumers as a set.  Often, consumer groups are named after the responsibility
            // of the consumer in an application, such as "Telemetry" or "OrderProcessing".  When an Event Hub is created, a default
            // consumer group is created with it, called "$Default."
            //
            // Each consumer has a unique view of the events in a partition that it reads from, meaning that events are available to all
            // consumers and are not removed from the partition when a consumer reads them.  This allows for one or more consumers to read and
            // process events from the partition at different speeds and beginning with different events without interfering with
            // one another.
            //
            // When events are published, they will continue to exist in the partition and be available for consuming until they
            // reach an age where they are older than the retention period.
            // (see: https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-faq#what-is-the-maximum-retention-period-for-events)
            //
            // In this example, we will create our consumer client using the default consumer group that is created with an Event Hub.
            // Our consumer will begin watching the partition at the very end, reading only new events that we will publish for it.

            await using (var consumerClient = new EventHubConsumerClient(EventHubConsumerClient.DefaultConsumerGroupName, fullyQualifiedNamespace, eventHubName, credential))
            {
                // To ensure that we do not wait for an indeterminate length of time, we'll stop reading after we receive three events.  For a
                // fresh Event Hub, those will be the three that we had published.  We'll also ask for cancellation after 30 seconds, just to be
                // safe.

                using CancellationTokenSource cancellationSource = new CancellationTokenSource();
                cancellationSource.CancelAfter(TimeSpan.FromSeconds(90));

                int eventsRead    = 0;
                int maximumEvents = 3;

                await foreach (PartitionEvent partitionEvent in consumerClient.ReadEventsAsync(cancellationSource.Token))
                {
                    BinaryData    binaryData  = partitionEvent.Data.EventBody;
                    GenericRecord eventRecord = binaryData.ToObject <GenericRecord>(avroSerializer);
                    string        messageText = (string)eventRecord["Message"];
                    Console.WriteLine($"Event Read: { messageText }");
                    eventsRead++;

                    if (eventsRead >= maximumEvents)
                    {
                        break;
                    }
                }
            }

            // At this point, our clients have both passed their "using" scopes and have safely been disposed of.  We
            // have no further obligations.

            Console.WriteLine();
        }
 /// <summary>
 /// Constructor for KafkaAvroDeserializer.
 /// </summary>
 /// <param name="schemaRegistryUrl"></param> URL endpoint for Azure Schema Registry instance
 /// <param name="credential"></param> TokenCredential implementation for OAuth2 authentication
 public KafkaAvroDeserializer(string schemaRegistryUrl, TokenCredential credential)
 {
     this.serializer = new SchemaRegistryAvroObjectSerializer(new SchemaRegistryClient(schemaRegistryUrl, credential), "$default");
 }