public async Task TestFetchAllPartitionsForTopic() { var oneTopicMetadataResponse = new MetadataResponse { BrokersMeta = new[] { new BrokerMeta {Id = 1, Host = "localhost", Port = 1}, new BrokerMeta {Id = 2, Host = "localhost", Port = 2}, new BrokerMeta {Id = 3, Host = "localhost", Port = 3} }, TopicsMeta = new[] { new TopicMeta {TopicName = "topic1", ErrorCode = ErrorCode.NoError, Partitions = new [] { new PartitionMeta{ErrorCode = ErrorCode.NoError, Id = 1, Leader = 1, Replicas = TestData.Isr1, Isr = TestData.Isr1}, new PartitionMeta{ErrorCode = ErrorCode.LeaderNotAvailable, Id = 2, Leader = 2, Replicas = TestData.Isr1, Isr = TestData.Isr1}, new PartitionMeta{ErrorCode = ErrorCode.NoError, Id = 3, Leader = 3, Replicas = TestData.Isr1, Isr = TestData.Isr1}, }} } }; foreach (var nodeMock in _nodeMocks) { nodeMock.Setup(n => n.FetchMetadata(new[] { "topic1" })).Returns(Task.FromResult(oneTopicMetadataResponse)); } _cluster.Start(); var partitions = await _cluster.RequireAllPartitionsForTopic("topic1"); CollectionAssert.AreEqual(new[] { 1, 2, 3 }, partitions); Assert.AreEqual(0, _internalErrors); }
private void UpdateInternalMetadataCache(MetadataResponse metadata) { var noLeaderElectedForPartition = metadata.Topics.Select(x => new { topic = x.Name, partition = x.Partitions.FirstOrDefault(i => i.LeaderId == -1) }) .FirstOrDefault(x => x.partition != null); if (noLeaderElectedForPartition != null) { throw new NoLeaderElectedForPartition(string.Format("topic:{0} partition:{1}", noLeaderElectedForPartition.topic, noLeaderElectedForPartition.partition)); } //resolve each broker var brokerEndpoints = metadata.Brokers.Select(broker => new { Broker = broker, Endpoint = _kafkaOptions.KafkaConnectionFactory.Resolve(broker.Address, _kafkaOptions.Log) }); foreach (var broker in brokerEndpoints) { //if the connection is in our default connection index already, remove it and assign it to the broker index. IKafkaConnection connection; _defaultConnectionIndex.TryRemove(broker.Endpoint, out connection); Func <int, IKafkaConnection> connectionFactory = (i) => connection ?? _kafkaOptions.KafkaConnectionFactory.Create(broker.Endpoint, _kafkaOptions.ResponseTimeoutMs, _kafkaOptions.Log, _kafkaOptions.MaxRetry); UpsertConnectionToBrokerConnectionIndex(broker.Broker.BrokerId, broker.Endpoint, connectionFactory); } foreach (var topic in metadata.Topics) { var localTopic = new Tuple <Topic, DateTime>(topic, DateTime.Now); _topicIndex.AddOrUpdate(topic.Name, s => localTopic, (s, existing) => localTopic); } }
public async Task TestAllNodesDead() { var metadataResponseWithOneNode = new MetadataResponse { BrokersMeta = new[] { new BrokerMeta {Id = 1, Host = "localhost", Port = 1} }, TopicsMeta = new[] { new TopicMeta {TopicName = "topic1", ErrorCode = ErrorCode.NoError, Partitions = new [] { new PartitionMeta{ErrorCode = ErrorCode.NoError, Id = 1, Leader = 1, Isr = TestData.Isr1}, }} } }; foreach (var nodeMock in _nodeMocks) { nodeMock.Setup(n => n.FetchMetadata()).Returns(Task.FromResult(metadataResponseWithOneNode)); } _cluster.Start(); _nodeMocks[0].Verify(n => n.FetchMetadata(), Times.Once()); //kill the only available node and check that it is reloaded from seeds in order to refresh the metadata _nodeMocks[1].Raise(n => n.Dead += null, _nodeMocks[1].Object); await _cluster.RequireNewRoutingTable(); _nodeMocks[0].Verify(n=>n.FetchMetadata(), Times.Exactly(2)); Assert.AreEqual(0, _internalErrors); }
public static async Task <MDataInfo> PreparePublicDirectory(Session session) { var mDataInfo = await session.MDataInfoActions.RandomPublicAsync(16000); using (var signPubKey = await session.Crypto.AppPubSignKeyAsync()) using (var entryhandle = await session.MDataEntries.NewAsync()) using (var permissionHandle = await session.MDataPermissions.NewAsync()) { var metadata = new MetadataResponse { Name = "Random Pubic Container", Description = "Public container for web files", TypeTag = mDataInfo.TypeTag, XorName = mDataInfo.Name }; var encMetaData = await session.MData.EncodeMetadata(metadata); var permissions = new PermissionSet { Read = true, ManagePermissions = true, Insert = true }; await session.MDataEntries.InsertAsync(entryhandle, Encoding.UTF8.GetBytes(AppConstants.MDataMetaDataKey).ToList(), encMetaData); await session.MDataEntries.InsertAsync( entryhandle, Encoding.UTF8.GetBytes("index.html").ToList(), Encoding.UTF8.GetBytes("<html><body>Hello</body></html>").ToList()); await session.MDataPermissions.InsertAsync(permissionHandle, signPubKey, permissions); await session.MData.PutAsync(mDataInfo, permissionHandle, entryhandle); } return(mDataInfo); }
private void UpdateTopicCache(MetadataResponse metadata) { if (metadata == null) { return; } var partitionElections = metadata.topic_metadata.SelectMany( t => t.partition_metadata .Where(p => p.IsElectingLeader) .Select(p => new TopicPartition(t.topic, p.partition_id))) .ToList(); if (partitionElections.Any()) { throw GetPartitionElectionException(partitionElections); } var topicCache = _topicCache; try { foreach (var topic in metadata.topic_metadata) { topicCache = topicCache.SetItem(topic.topic, new Tuple <MetadataResponse.Topic, DateTimeOffset>(topic, DateTimeOffset.UtcNow)); } } finally { _topicCache = topicCache; } }
public async Task <MetadataResponse> Metadata(MetadataRequest request) { var response = new MetadataResponse(); foreach (var project in _workspace.CurrentSolution.Projects) { var compilation = await project.GetCompilationAsync(); var symbol = compilation.GetTypeByMetadataName(request.TypeName); if (symbol != null && symbol.ContainingAssembly.Name == request.AssemblyName) { var cancellationSource = new CancellationTokenSource(TimeSpan.FromMilliseconds(request.Timeout)); var document = await MetadataHelper.GetDocumentFromMetadata(project, symbol, cancellationSource.Token); if (document != null) { var source = await document.GetTextAsync(); response.SourceName = MetadataHelper.GetFilePathForSymbol(project, symbol); response.Source = source.ToString(); return(response); } } } return(response); }
private bool ParseSearchParams(string searchLocation, out string searchPano, out LatLngPoint searchPoint) { searchPano = null; searchPoint = null; if (!string.IsNullOrEmpty(searchLocation)) { var panoMatch = GMAPS_URL_PANO_PATTERN.Match(searchLocation); var latLngMatch = GMAPS_URL_LATLNG_PATTERN.Match(searchLocation); if (panoMatch.Success && MetadataResponse.IsPano(panoMatch.Groups[1].Value)) { searchPano = panoMatch.Groups[1].Value; } else if (MetadataResponse.IsPano(searchLocation)) { searchPano = searchLocation; } if (latLngMatch.Success && LatLngPoint.TryParse(latLngMatch.Groups[1].Value, out var point)) { searchPoint = point; } else if (LatLngPoint.TryParse(searchLocation, out var point2)) { searchPoint = point2; } } return(metadata == null || searchPano != null && metadata.Pano_ID != searchPano || searchPoint != null && searchPoint.Distance(metadata.Location) > 3f); }
private void UpdateInternalMetadataCache(MetadataResponse metadata) { //resolve each broker var brokerEndpoints = metadata.Brokers.Select(broker => new { Broker = broker, Endpoint = _kafkaOptions.KafkaConnectionFactory.Resolve(broker.Address, _kafkaOptions.Log) }); foreach (var broker in brokerEndpoints) { //if the connection is in our default connection index already, remove it and assign it to the broker index. IKafkaConnection connection; if (_defaultConnectionIndex.TryRemove(broker.Endpoint, out connection)) { UpsertConnectionToBrokerConnectionIndex(broker.Broker.BrokerId, connection); } else { connection = _kafkaOptions.KafkaConnectionFactory.Create(broker.Endpoint, _kafkaOptions.ResponseTimeoutMs, _kafkaOptions.Log, _kafkaOptions.MaxRetry); UpsertConnectionToBrokerConnectionIndex(broker.Broker.BrokerId, connection); } } foreach (var topic in metadata.Topics) { var localTopic = new Tuple <Topic, DateTime>(topic, DateTime.Now); _topicIndex.AddOrUpdate(topic.Name, s => localTopic, (s, existing) => localTopic); } }
private static async Task GetImageDataAsync(MetadataResponse metadata) { if (metadata.Status == System.Net.HttpStatusCode.OK) { var geo = await gmaps.ReverseGeocodeAsync(metadata.Location) .ConfigureAwait(false); try { using var stream = await gmaps.GetImageStreamAsync(metadata.Pano_id, 20, 0, 0) .ConfigureAwait(false); var image = imageDecoder.Deserialize(stream); form.SetImage(metadata, geo, image); } catch (Exception exp) { form.SetError(exp); } } else { form.SetError(); } }
static int Main(string[] args) { string topicName = null; const int correlationId = 0; if (args.Length < 1) { Usage(); return(-1); } string serverAddress = args[0].Split(':')[0]; int serverPort = Convert.ToInt32(args[0].Split(':')[1]); if (args.Length > 1) { topicName = args[1]; } var connector = new Connector(serverAddress, serverPort); MetadataResponse metadataResponse = connector.Metadata(correlationId, "C# KafkaMetadata util", topicName); Console.WriteLine("Brookers:"); foreach (var broker in metadataResponse.Brokers) { Console.WriteLine("\t" + broker); } Console.WriteLine(metadataResponse); return(0); }
private void UpdateInternalMetadataCache(MetadataResponse metadata) { foreach (var broker in metadata.Brokers) { var localBroker = broker; _brokerConnectionIndex.AddOrUpdate(broker.BrokerId, i => { return(_kafkaOptions.KafkaConnectionFactory.Create(localBroker.Address, _kafkaOptions.ResponseTimeoutMs, _kafkaOptions.Log)); }, (i, connection) => { //if a connection changes for a broker close old connection and create a new one if (connection.KafkaUri == localBroker.Address) { return(connection); } _kafkaOptions.Log.WarnFormat("Broker:{0} Uri changed from:{1} to {2}", localBroker.BrokerId, connection.KafkaUri, localBroker.Address); using (connection) { return(_kafkaOptions.KafkaConnectionFactory.Create(localBroker.Address, _kafkaOptions.ResponseTimeoutMs, _kafkaOptions.Log)); } }); } foreach (var topic in metadata.Topics) { var localTopic = topic; _topicIndex.AddOrUpdate(topic.Name, s => localTopic, (s, existing) => localTopic); } }
public async Task TestEmptyResponseMetadata() { var emptyMetadataResponse = new MetadataResponse { BrokersMeta = new BrokerMeta[0], TopicsMeta = new TopicMeta[0] }; foreach (var nodeMock in _nodeMocks) { nodeMock.Setup(n => n.FetchMetadata()).Returns(Task.FromResult(emptyMetadataResponse)); } _cluster.Start(); _nodeMocks[0].Verify(n => n.FetchMetadata(), Times.Once()); var emptyRoutingTable = new RoutingTable(new Dictionary<string, Partition[]>()); AssertRouting(_routingTable, emptyRoutingTable); //next we check that even if the routing table is empty we can still refresh metadata by reloading the node from seeds var metadataResponseWithNodes = new MetadataResponse { BrokersMeta = new[] { new BrokerMeta {Id = 1, Host = "localhost", Port = 1}, new BrokerMeta {Id = 2, Host = "localhost", Port = 2} }, TopicsMeta = new[] { new TopicMeta {TopicName = "topic2", ErrorCode = ErrorCode.NoError, Partitions = new [] { new PartitionMeta{ErrorCode = ErrorCode.NoError, Id = 1, Leader = 1, Isr = TestData.Isr1}, new PartitionMeta{ErrorCode = ErrorCode.NoError, Id = 2, Leader = 2, Isr = TestData.Isr1}, }} } }; foreach (var nodeMock in _nodeMocks) { nodeMock.Setup(n => n.FetchMetadata()).Returns(Task.FromResult(metadataResponseWithNodes)); } var routing = await _cluster.RequireNewRoutingTable(); _nodeMocks[0].Verify(n => n.FetchMetadata(), Times.Exactly(2)); var routingTableWithNodes = new RoutingTable(new Dictionary<string, Partition[]> { {"topic2", new[] { new Partition {Id = 1, Leader = _nodeMocks[0].Object}, new Partition {Id = 2, Leader = _nodeMocks[1].Object} }} }); AssertRouting(routing, routingTableWithNodes); Assert.AreEqual(0, _internalErrors); }
public void TestDeserializeMetadataResponse() { var meta = new MetadataResponse { BrokersMeta = new[] { new BrokerMeta { Host = "Host", Id = 100, Port = 18909 }, new BrokerMeta { Host = "tsoH", Id = 28, Port = 1 } }, TopicsMeta = new[] { new TopicMeta { ErrorCode = ErrorCode.NoError, TopicName = "tropique", Partitions = new[] { new PartitionMeta { ErrorCode = ErrorCode.LeaderNotAvailable, Id = 0, Leader = -1, Replicas = new[] { 100 }, Isr = new int[0] } } } } }; using (var serialized = new ReusableMemoryStream(null)) { meta.Serialize(serialized, null); Assert.AreEqual(74, serialized.Length); // TODO: better check that serialization is correct? serialized.Position = 0; var metaDeser = MetadataResponse.Deserialize(serialized, null); Assert.AreEqual(meta.BrokersMeta.Length, metaDeser.BrokersMeta.Length); Assert.AreEqual(meta.TopicsMeta.Length, metaDeser.TopicsMeta.Length); Assert.AreEqual(meta.BrokersMeta[0].Host, metaDeser.BrokersMeta[0].Host); Assert.AreEqual(meta.BrokersMeta[1].Host, metaDeser.BrokersMeta[1].Host); Assert.AreEqual(meta.BrokersMeta[0].Id, metaDeser.BrokersMeta[0].Id); Assert.AreEqual(meta.BrokersMeta[1].Id, metaDeser.BrokersMeta[1].Id); Assert.AreEqual(meta.BrokersMeta[0].Port, metaDeser.BrokersMeta[0].Port); Assert.AreEqual(meta.BrokersMeta[1].Port, metaDeser.BrokersMeta[1].Port); Assert.AreEqual("tropique", metaDeser.TopicsMeta[0].TopicName); Assert.AreEqual(ErrorCode.NoError, metaDeser.TopicsMeta[0].ErrorCode); Assert.AreEqual(ErrorCode.LeaderNotAvailable, metaDeser.TopicsMeta[0].Partitions[0].ErrorCode); Assert.AreEqual(0, metaDeser.TopicsMeta[0].Partitions[0].Id); Assert.AreEqual(-1, metaDeser.TopicsMeta[0].Partitions[0].Leader); } }
private IEnumerable <MetadataValidationResult> ValidateResponse(MetadataResponse metadata) { foreach (var broker in metadata.Brokers) { yield return(ValidateBroker(broker)); } foreach (var topic in metadata.Topics) { yield return(ValidateTopic(topic)); } }
public async Task <IHttpActionResult> GetMetadata(MetadataRequest request) { MetadataResponse response = await request.GetResponse(); if (response.Metadata == null) { HttpResponseMessage respMess = new HttpResponseMessage(response.StatusCode) { ReasonPhrase = response.ErrorMessage }; return(ResponseMessage(respMess)); } return(Ok(response.Metadata)); }
private void UpdateConnectionCache(MetadataResponse metadata) { if (metadata == null) { return; } var allConnections = _allConnections; var brokerConnections = _brokerConnections; var connectionsToDispose = ImmutableList <IConnection> .Empty; try { foreach (var broker in metadata.Brokers) { var endpoint = _connectionFactory.Resolve(new Uri($"http://{broker.Host}:{broker.Port}"), Log); IConnection connection; if (brokerConnections.TryGetValue(broker.BrokerId, out connection)) { if (connection.Endpoint.Equals(endpoint)) { // existing connection, nothing to change } else { // ReSharper disable once AccessToModifiedClosure Log.Warn(() => LogEvent.Create($"Broker {broker.BrokerId} Uri changed from {connection.Endpoint} to {endpoint}")); // A connection changed for a broker, so close the old connection and create a new one connectionsToDispose = connectionsToDispose.Add(connection); connection = _connectionFactory.Create(endpoint, ConnectionConfiguration, Log); // important that we create it here rather than set to null or we'll get it again from allConnections } } if (connection == null && !allConnections.TryGetValue(endpoint, out connection)) { connection = _connectionFactory.Create(endpoint, ConnectionConfiguration, Log); } allConnections = allConnections.SetItem(endpoint, connection); brokerConnections = brokerConnections.SetItem(broker.BrokerId, connection); } } finally { _allConnections = allConnections; _brokerConnections = brokerConnections; DisposeConnections(connectionsToDispose); } }
private void SetControls(MetadataResponse metadata, Image image, string address) { if (InvokeRequired) { _ = Invoke(new Action <MetadataResponse, Image, string>(SetControls), metadata, image, address); } else { locationTextBox.Text = address ?? string.Empty; panoTextbox.Text = metadata.Pano_ID; latLngTextbox.Text = metadata.Location.ToString(CultureInfo.InvariantCulture); cubeMapPictureBox.Image?.Dispose(); cubeMapPictureBox.Image = image; } }
private void ResponseToRoutingTable(MetadataResponse response) { var routes = new Dictionary <string, Partition[]>(); foreach (var tm in response.TopicsMeta.Where(_ => Error.IsPartitionOkForClients(_.ErrorCode))) { routes[tm.TopicName] = tm.Partitions.Where( _ => Error.IsPartitionOkForClients(_.ErrorCode) && _.Leader >= 0) .Select(_ => new Partition { Id = _.Id, Leader = _nodesById[_.Leader], NbIsr = _.Isr.Length }).OrderBy(p => p).ToArray(); } _routingTable = new RoutingTable(routes); }
public async Task SelectExactPartitionShouldThrowWhenBrokerCollectionIsEmpty() { var metadataResponse = await BrokerRouterProxy.CreateMetadataResponseWithMultipleBrokers(); metadataResponse = new MetadataResponse(topics: metadataResponse.Topics); var routerProxy = new BrokerRouterProxy(); #pragma warning disable 1998 routerProxy.Connection1.MetadataResponseFunction = async() => metadataResponse; #pragma warning restore 1998 var router = routerProxy.Create(); await router.GetTopicMetadataAsync(TestTopic, CancellationToken.None); Assert.Throws <CachedMetadataException>(() => router.GetBrokerRoute(TestTopic, 1)); }
public async Task SelectExactPartitionShouldThrowWhenServerCollectionIsEmpty() { var metadataResponse = await RoutingScenario.DefaultMetadataResponse(); metadataResponse = new MetadataResponse(topics: metadataResponse.topic_metadata); var scenario = new RoutingScenario(); #pragma warning disable 1998 scenario.Connection1.Add(ApiKey.Metadata, async _ => metadataResponse); #pragma warning restore 1998 var router = scenario.CreateRouter(); var testTopic = RoutingScenario.TestTopic; await router.GetTopicMetadataAsync(testTopic, CancellationToken.None); Assert.Throws <RoutingException>(() => router.GetTopicConnection(testTopic, 1)); }
public short Produce(string topicName, int partitionId, string data) { try { if (!topicPartitionDictionary.ContainsKey(topicName)) { // Check if topic exist and on what partition. // This call will automatically create the topic if the brooker is set up to auto create MetadataResponse metadataResponse = connector.Metadata(DefaultCorrelationId, clientId, topicName); short errorCode = metadataResponse.TopicErrorCode(topicName); if (errorCode != (short)KafkaErrorCode.NoError) { if (errorCode != (short)KafkaErrorCode.LeaderNotAvailable) { return(errorCode); } // Check if the topic was auto created metadataResponse = connector.Metadata(DefaultCorrelationId, clientId, topicName); errorCode = metadataResponse.TopicErrorCode(topicName); if (errorCode != (short)KafkaErrorCode.NoError) { return(errorCode); } topicPartitionDictionary.Add(topicName, metadataResponse.Partitions(topicName)[0]); } else { topicPartitionDictionary.Add(topicName, metadataResponse.Partitions(topicName)[0]); } } if (partitionId == -1) { partitionId = topicPartitionDictionary[topicName]; } var message = Encoding.UTF8.GetBytes(data); ProduceResponse response = connector.Produce(DefaultCorrelationId, clientId, 500, topicName, partitionId, message); return(response.ErrorCode(topicName, 0)); } catch (SocketException ex) { throw new KafkaException(ex.Message); } }
public void AddsAzureEnvironmentUsingARMEndpoint() { Mock <ICommandRuntime> commandRuntimeMock = new Mock <ICommandRuntime>(); SetupConfirmation(commandRuntimeMock); var cmdlet = new AddAzureRMEnvironmentCommand() { CommandRuntime = commandRuntimeMock.Object, Name = "Stack", ARMEndpoint = "https://management.local.azurestack.external/" }; Mock <EnvironmentHelper> envHelperMock = new Mock <EnvironmentHelper>(); MetadataResponse metadataEndpoints = new MetadataResponse { GalleryEndpoint = "https://galleryendpoint", GraphEndpoint = "https://graphendpoint", PortalEndpoint = "https://portalendpoint", authentication = new Authentication { Audiences = new[] { "audience1", "audience2" }, LoginEndpoint = "https://loginendpoint" } }; envHelperMock.Setup(f => f.RetrieveMetaDataEndpoints(It.IsAny <string>())).ReturnsAsync(metadataEndpoints); envHelperMock.Setup(f => f.RetrieveDomain(It.IsAny <string>())).Returns("domain"); cmdlet.EnvHelper = envHelperMock.Object; cmdlet.SetParameterSet("ARMEndpoint"); cmdlet.InvokeBeginProcessing(); cmdlet.ExecuteCmdlet(); cmdlet.InvokeEndProcessing(); commandRuntimeMock.Verify(f => f.WriteObject(It.IsAny <PSAzureEnvironment>()), Times.Once()); var profileClient = new RMProfileClient(AzureRmProfileProvider.Instance.GetProfile <AzureRmProfile>()); IAzureEnvironment env = AzureRmProfileProvider.Instance.Profile.GetEnvironment("Stack"); Assert.Equal(env.Name, cmdlet.Name); Assert.Equal(cmdlet.ARMEndpoint, env.GetEndpoint(AzureEnvironment.Endpoint.ResourceManager)); Assert.Equal("https://loginendpoint/", env.GetEndpoint(AzureEnvironment.Endpoint.ActiveDirectory)); Assert.Equal("audience1", env.GetEndpoint(AzureEnvironment.Endpoint.ActiveDirectoryServiceEndpointResourceId)); Assert.Equal("https://graphendpoint", env.GetEndpoint(AzureEnvironment.Endpoint.GraphEndpointResourceId)); envHelperMock.Verify(f => f.RetrieveDomain(It.IsAny <string>()), Times.Once); envHelperMock.Verify(f => f.RetrieveMetaDataEndpoints(It.IsAny <string>()), Times.Once); }
/// <summary> /// Given a collection of server connections, query for the topic metadata. /// </summary> /// <param name="connections">The server connections to query. Will cycle through the collection, starting at zero until a response is received.</param> /// <param name="topics">The collection of topics to get metadata for.</param> /// <returns>MetadataResponse validated to be complete.</returns> public async Task <MetadataResponse> Get(IKafkaConnection[] connections, IEnumerable <string> topics) { var request = new MetadataRequest { Topics = topics.ToList() }; if (request.Topics.Count <= 0) { return(null); } var maxRetryAttempt = 2; var performRetry = false; var retryAttempt = 0; MetadataResponse metadataResponse = null; do { performRetry = false; metadataResponse = await GetMetadataResponse(connections, request); if (metadataResponse == null) { return(null); } foreach (var validation in ValidateResponse(metadataResponse)) { switch (validation.Status) { case ValidationResult.Retry: performRetry = true; _log.WarnFormat(validation.Message); break; case ValidationResult.Error: throw validation.Exception; } } await BackoffOnRetry(++retryAttempt, performRetry).ConfigureAwait(false); } while (retryAttempt < maxRetryAttempt && _interrupted == false && performRetry); return(metadataResponse); }
private void ResponseToTopology(MetadataResponse response) { // New stuff foreach (var bm in response.BrokersMeta) { var hostPort = BuildKey(bm.Host, bm.Port); _tmpNewNodes.Add(hostPort); _tmpNewNodeIds.Add(bm.Id); INode node; if (!_nodesByHostPort.TryGetValue(hostPort, out node)) { node = _nodeFactory(bm.Host, bm.Port); _nodesByHostPort[hostPort] = node; } if (!_nodes.ContainsKey(node)) { _nodes[node] = bm; } _nodes[node].Id = bm.Id; _nodesById[bm.Id] = node; } // Clean old var idToClean = _nodesById.Keys.Where(id => !_tmpNewNodeIds.Contains(id)).ToList(); foreach (var id in idToClean) { _nodesById.Remove(id); } var hostToClean = _nodesByHostPort.Keys.Where(host => !_tmpNewNodes.Contains(host)).ToList(); foreach (var host in hostToClean) { var node = _nodesByHostPort[host]; _nodesByHostPort.Remove(host); _nodes.Remove(node); node.Stop(); } _tmpNewNodes.Clear(); _tmpNewNodeIds.Clear(); }
public void SetImage(MetadataResponse metadata, GeocodingResponse geocode, Image image) { if (metadata is null) { throw new ArgumentNullException(nameof(metadata)); } if (geocode is null) { throw new ArgumentNullException(nameof(geocode)); } var address = (from result in geocode.Results orderby result.Formatted_Address.Length descending select result.Formatted_Address) .FirstOrDefault(); SetControls(metadata, image, address); }
/// <summary> /// Connect to the cluster. Connects to all seed addresses, and fetches initial metadata for the cluster. /// </summary> /// <returns></returns> public async Task ConnectAsync() { await Scheduler.Ask(() => { // we cannot reconnect if we have closed already. if (_state == ClusterState.Closed) { throw new BrokerException("Cluster is already closed. Cannot reconnect. Please create a new Cluster."); } if (_state != ClusterState.Disconnected) { return(false); } _log.Debug("Connecting"); var initBrokers = Connection.ParseAddress(_seedBrokers). Select(seed => new BrokerMeta { Host = seed.Item1, Port = seed.Item2, NodeId = -99 }).ToArray(); EtwTrace.Log.ClusterStarting(_id); var initMeta = new MetadataResponse { Topics = new TopicMeta[0], Brokers = initBrokers }; MergeTopicMeta(initMeta); _state = ClusterState.Connected; // start up a recovery monitor to watch for recovered partitions _partitionRecoveryMonitor = new PartitionRecoveryMonitor(this, _protocol, _cancel.Token); // Merge metadata that recovery monitor discovers _partitionRecoveryMonitor.NewMetadataEvents.Subscribe(MergeTopicMeta, ex => _log.Error(ex, "Error thrown by RecoveryMonitor.NewMetadataEvents!")); _log.Debug("Connected"); EtwTrace.Log.ClusterStarted(_id); return(true); }).ConfigureAwait(false); }
public void MetadataResponse( [Values(0, 1, 2)] short version, [Values(1, 15)] int brokersPerRequest, [Values("testTopic")] string topicName, [Values(1, 10)] int topicsPerRequest, [Values(1, 5)] int partitionsPerTopic, [Values( ErrorCode.NONE, ErrorCode.UNKNOWN_TOPIC_OR_PARTITION )] ErrorCode errorCode) { var brokers = new List <KafkaClient.Protocol.Server>(); for (var b = 0; b < brokersPerRequest; b++) { string rack = null; if (version >= 1) { rack = "Rack" + b; } brokers.Add(new KafkaClient.Protocol.Server(b, "broker-" + b, 9092 + b, rack)); } var topics = new List <MetadataResponse.Topic>(); for (var t = 0; t < topicsPerRequest; t++) { var partitions = new List <MetadataResponse.Partition>(); for (var partitionId = 0; partitionId < partitionsPerTopic; partitionId++) { var leader = _randomizer.Next(0, brokersPerRequest - 1); var replica = 0; var replicas = _randomizer.Next(0, brokersPerRequest - 1).Repeat(() => replica++); var isr = 0; var isrs = _randomizer.Next(0, replica).Repeat(() => isr++); partitions.Add(new MetadataResponse.Partition(partitionId, leader, errorCode, replicas, isrs)); } topics.Add(new MetadataResponse.Topic(topicName + t, errorCode, partitions, version >= 1 ? topicsPerRequest % 2 == 0 : (bool?)null)); } var response = new MetadataResponse(brokers, topics, version >= 1 ? brokersPerRequest : (int?)null, version >= 2 ? $"cluster-{version}" : null); response.AssertCanEncodeDecodeResponse(version); }
public void MetadataResponse( [Values(0, 1, 2)] short version, [Values(1, 15)] int brokersPerRequest, [Values("test", "a really long name, with spaces and punctuation!")] string topicName, [Values(1, 10)] int topicsPerRequest, [Values(1, 5)] int partitionsPerTopic, [Values( ErrorResponseCode.None, ErrorResponseCode.UnknownTopicOrPartition )] ErrorResponseCode errorCode) { var brokers = new List <Broker>(); for (var b = 0; b < brokersPerRequest; b++) { string rack = null; if (version >= 1) { rack = "Rack" + b; } brokers.Add(new Broker(b, "broker-" + b, 9092 + b, rack)); } var topics = new List <MetadataResponse.Topic>(); for (var t = 0; t < topicsPerRequest; t++) { var partitions = new List <MetadataResponse.Partition>(); for (var partitionId = 0; partitionId < partitionsPerTopic; partitionId++) { var leader = _randomizer.Next(0, brokersPerRequest - 1); var replica = 0; var replicas = _randomizer.Next(0, brokersPerRequest - 1).Repeat(() => replica++); var isr = 0; var isrs = _randomizer.Next(0, replica).Repeat(() => isr++); partitions.Add(new MetadataResponse.Partition(partitionId, leader, errorCode, replicas, isrs)); } topics.Add(new MetadataResponse.Topic(topicName + t, errorCode, partitions, version >= 1 ? topicsPerRequest % 2 == 0 : (bool?)null)); } var response = new MetadataResponse(brokers, topics, version >= 1 ? brokersPerRequest : (int?)null, version >= 2 ? $"cluster-{version}" : null); response.AssertCanEncodeDecodeResponse(version); }
/// <summary> /// Get meta data for a topic /// </summary> /// <param name="correlationId"></param>Id used by the client to identify this transaction. Returned in the response /// <param name="clientId"></param>Name to identify the client. Used in server logs /// <param name="topicName"></param> Name of the requested topic. If topic name is null metadata for all topics will be returned /// <returns></returns> public MetadataResponse Metadata(int correlationId, string clientId, String topicName) { MetadataRequest request = new MetadataRequest(correlationId, clientId, topicName); using (var connection = new KafkaConnection(server, port)) { connection.Write(request.GetRequestBytes().ToArray()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength == 0) { return(null); } byte[] data = connection.Read(dataLength); MetadataResponse metadataResponse = new MetadataResponse(); metadataResponse.Parse(data, 0); return(metadataResponse); } }
public async Task <ShareMDataModel> AddRandomPrivateMDataAsync() { var typeTag = 150001; var mdInfo = await _session.MDataInfoActions.RandomPrivateAsync((ulong)typeTag); var metadata = new MetadataResponse { Name = "Random Private Mdata", Description = "Random Description", TypeTag = mdInfo.TypeTag, XorName = mdInfo.Name }; var actKey = Utilities.GetRandomString(10).ToUtfBytes(); var actValue = Utilities.GetRandomString(10).ToUtfBytes(); using (var userSignKeyHandle = await _session.Crypto.AppPubSignKeyAsync()) using (var permissionsHandle = await _session.MDataPermissions.NewAsync()) { var permissionSet = new PermissionSet { Read = true, Insert = true, Delete = true, Update = true, ManagePermissions = true }; await _session.MDataPermissions.InsertAsync(permissionsHandle, userSignKeyHandle, permissionSet); using (var entriesHandle = await _session.MDataEntries.NewAsync()) { var encMetaData = await _session.MData.EncodeMetadata(metadata); await _session.MDataEntries.InsertAsync(entriesHandle, Encoding.UTF8.GetBytes(SafeApp.Utilities.AppConstants.MDataMetaDataKey).ToList(), encMetaData); var key = await _session.MDataInfoActions.EncryptEntryKeyAsync(mdInfo, actKey); var value = await _session.MDataInfoActions.EncryptEntryKeyAsync(mdInfo, actValue); await _session.MDataEntries.InsertAsync(entriesHandle, key, value); await _session.MData.PutAsync(mdInfo, permissionsHandle, entriesHandle); } } return(new ShareMDataModel((ulong)typeTag, mdInfo.Name)); }
public void UploadAggStatMetadata(IDictionary<string, object> context) { var user = ((IAaaUser)context["AS.Services.ThomsonReuters.Eikon.Toolkit.Interfaces.IAaaUser"]); MetadataResponse response = null; string body = (string)context["AS.RequestBody"]; HttpFormFileContentPaarser httpParser = new HttpFormFileContentPaarser(body, "metadata"); //var product = string.IsNullOrEmpty(httpParser.Product) ? null : new List<string>() { httpParser.Product }; var product = new List<string> { "est" }; if (!Permission.IsAllowToUploadMetadata(user, product)) { response = new MetadataResponse { success = false, description = "Failed - no permission to upload the metadata" }; } else { using (var opsConsoleSvc = new OpsConsoleServiceClient(RouterBindings.Local, RouterAddresses.Local.RequestReply)) { string data = httpParser.FileContents.Replace("\r\n", string.Empty).Replace("\t", string.Empty).Trim(); if (string.IsNullOrEmpty(data)) { response = new MetadataResponse { success = false, description = "Failed - no metadata content" }; } else { try { var req = JsonConvert.DeserializeObject<SetAggStatMetadataRequest>(data); //req.product = httpParser.Product; req.product = "est"; response = opsConsoleSvc.SetAggStatMetadata(req); } catch (Exception ex) { response = new MetadataResponse { success = false, description = "Failed - " + ex.Message }; } } } } string returnedResult = string.Empty; returnedResult = JsonConvert.SerializeObject(response); context["AS.ResponseBody"] = returnedResult; }
private IEnumerable<MetadataValidationResult> ValidateResponse(MetadataResponse metadata) { foreach (var broker in metadata.Brokers) { yield return ValidateBroker(broker); } foreach (var topic in metadata.Topics) { yield return ValidateTopic(topic); } }
public void UploadMetadata(IDictionary<string, object> context) { var user = ((IAaaUser)context["AS.Services.ThomsonReuters.Eikon.Toolkit.Interfaces.IAaaUser"]); MetadataResponse response = null; string body = (string)context["AS.RequestBody"]; HttpFormFileContentPaarser httpParser = new HttpFormFileContentPaarser(body, "metadata"); //var product = string.IsNullOrEmpty(httpParser.Product) ? null : new List<string>() { httpParser.Product }; var product = new List<string> { "est" }; if (!Permission.IsAllowToUploadMetadata(user, product)) { response = new MetadataResponse { success = false, description = "Failed - no permission to upload the metadata" }; } else { using (var opsConsoleSvc = new OpsConsoleServiceClient(RouterBindings.Local, RouterAddresses.Local.RequestReply)) { string data = httpParser.FileContents.Replace("\r\n", string.Empty).Replace("\t", string.Empty).Trim(); if (string.IsNullOrEmpty(data)) { response = new MetadataResponse { success = false, description = "Failed - no metadata content" }; } else if (data.Length > 4194304) // limit to 4MB { response = new MetadataResponse() { success = false, description = "Failed - the file is too big" }; } else if (data[0] != '{' || data[data.Length - 1] != '}') { response = new MetadataResponse { success = false, description = "Failed - the content is invalid" }; } else { var setMreq = new SetMetadataRequest(); setMreq.metadata = data; //setMreq.product = httpParser.Product; setMreq.product = "est"; response = opsConsoleSvc.SetMetadataEx(setMreq); if (response == null) { response = new MetadataResponse() { success = false, description = "Failed - no response from the OpsConsole service" }; } } } } string returnedResult = string.Empty; returnedResult = JsonConvert.SerializeObject(response); context["AS.ResponseBody"] = returnedResult; }
private void UpdateInternalMetadataCache(MetadataResponse metadata) { var noLeaderElectedForPartition = metadata.Topics.Select(x => new {topic = x.Name, partition = x.Partitions.FirstOrDefault(i => i.LeaderId == -1)}) .FirstOrDefault(x => x.partition != null); if (noLeaderElectedForPartition != null) throw new NoLeaderElectedForPartition(string.Format("topic:{0} partition:{1}", noLeaderElectedForPartition.topic, noLeaderElectedForPartition.partition)); //resolve each broker var brokerEndpoints = metadata.Brokers.Select(broker => new { Broker = broker, Endpoint = _kafkaOptions.KafkaConnectionFactory.Resolve(broker.Address, _kafkaOptions.Log) }); foreach (var broker in brokerEndpoints) { //if the connection is in our default connection index already, remove it and assign it to the broker index. IKafkaConnection connection; _defaultConnectionIndex.TryRemove(broker.Endpoint, out connection); Func<int, IKafkaConnection> connectionFactory = (i) => connection ?? _kafkaOptions.KafkaConnectionFactory.Create(broker.Endpoint, _kafkaOptions.ResponseTimeoutMs, _kafkaOptions.Log, _kafkaOptions.MaxRetry); UpsertConnectionToBrokerConnectionIndex(broker.Broker.BrokerId, broker.Endpoint, connectionFactory); } foreach (var topic in metadata.Topics) { var localTopic = new Tuple<Topic, DateTime>(topic, DateTime.Now); _topicIndex.AddOrUpdate(topic.Name, s => localTopic, (s, existing) => localTopic); } }
private void UpdateInternalMetadataCache(MetadataResponse metadata) { foreach (var broker in metadata.Brokers) { var localBroker = broker; _brokerConnectionIndex.AddOrUpdate(broker.BrokerId, i => { return _kafkaOptions.KafkaConnectionFactory.Create(localBroker.Address, _kafkaOptions.ResponseTimeoutMs, _kafkaOptions.Log); }, (i, connection) => { //if a connection changes for a broker close old connection and create a new one if (connection.KafkaUri == localBroker.Address) return connection; _kafkaOptions.Log.WarnFormat("Broker:{0} Uri changed from:{1} to {2}", localBroker.BrokerId, connection.KafkaUri, localBroker.Address); using (connection) { return _kafkaOptions.KafkaConnectionFactory.Create(localBroker.Address, _kafkaOptions.ResponseTimeoutMs, _kafkaOptions.Log); } }); } foreach (var topic in metadata.Topics) { var localTopic = topic; _topicIndex.AddOrUpdate(topic.Name, s => localTopic, (s, existing) => localTopic); } }
private void UpdateInternalMetadataCache(MetadataResponse metadata) { //resolve each broker var brokerEndpoints = metadata.Brokers.Select(broker => new { Broker = broker, Endpoint = _kafkaOptions.KafkaConnectionFactory.Resolve(broker.Address, _kafkaOptions.Log) }); foreach (var broker in brokerEndpoints) { //if the connection is in our default connection index already, remove it and assign it to the broker index. IKafkaConnection connection; if (_defaultConnectionIndex.TryRemove(broker.Endpoint, out connection)) { UpsertConnectionToBrokerConnectionIndex(broker.Broker.BrokerId, connection); } else { connection = _kafkaOptions.KafkaConnectionFactory.Create(broker.Endpoint, _kafkaOptions.ResponseTimeoutMs, _kafkaOptions.Log); UpsertConnectionToBrokerConnectionIndex(broker.Broker.BrokerId, connection); } } foreach (var topic in metadata.Topics) { var localTopic = topic; _topicIndex.AddOrUpdate(topic.Name, s => localTopic, (s, existing) => localTopic); } }