public async Task SourceWithOffsetContext_at_least_once_consuming_should_work() { var topic = CreateTopic(1); var settings = CreateConsumerSettings <string>(CreateGroup(1)); var elementCount = 10; var batchSize = 2; var messages = Enumerable.Range(1, elementCount).ToList(); await ProduceStrings(topic, messages, ProducerSettings); var committerSettings = CommitterSettings.WithMaxBatch(batchSize); var(control, probe) = KafkaConsumer.SourceWithOffsetContext(settings, Subscriptions.Topics(topic)) .SelectAsync(10, message => Task.FromResult(Done.Instance)) .Via(Committer.FlowWithOffsetContext <Done>(committerSettings)) .AsSource() .ToMaterialized(this.SinkProbe <(NotUsed, ICommittableOffsetBatch)>(), Keep.Both) .Run(Materializer); probe.Request(10); var committedBatches = probe.Within(TimeSpan.FromSeconds(10), () => probe.ExpectNextN(elementCount / batchSize)); probe.Cancel(); AwaitCondition(() => control.IsShutdown.IsCompletedSuccessfully, TimeSpan.FromSeconds(10)); committedBatches.Select(r => r.Item2).Sum(batch => batch.BatchSize).Should().Be(10); }
private GitHubCommit CreateCommit(int offsetDays, Repository repository) { var origin = new DateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeKind.Utc); var diff = (DateTime.UtcNow - TimeSpan.FromDays(offsetDays)) - origin; var author = new Committer("", "", DateTimeOffset.FromUnixTimeSeconds((long)Math.Floor(diff.TotalSeconds))); var commit = new Commit("", "", "", "", "mockshalol", null, repository, "", author, null, null, new List <GitReference>().AsReadOnly(), 0, null); return(new GitHubCommit("", "", "", "", "mockshalol", null, repository, null, "", commit, null, "", null, new List <GitReference>().AsReadOnly(), null)); }
public (object, string[]) Resolve(string[] path) { if (path?.Length == 0) { return(null, Array.Empty <string>()); } switch (path[0]) { case "parents" when(path.Length == 1): return(Parents, Array.Empty <string>()); case "parents": if (!int.TryParse(path[1], out int i)) { return(null, Array.Empty <string>()); } if (i < 0 || i >= Parents.Length) { return(null, Array.Empty <string>()); } return(new Link { Cid = Parents[i] }, path.Skip(2)); case "author" when(path.Length == 1): return(Author, Array.Empty <string>()); case "author": return(Author.Resolve(path.Skip(1))); case "committer" when(path.Length == 1): return(Committer, Array.Empty <string>()); case "commiter": return(Committer.Resolve(path.Skip(1))); case "signature": return(Signature.Text, path.Skip(1)); case "message": return(Message, path.Skip(1)); case "tree": return(new Link { Cid = GitTree }, path.Skip(1)); default: return(null, Array.Empty <string>()); } }
public void Then_the_provided_data_should_be_set_in_properties() { var committer = new Committer("John Doe", 42, "http://www.foo.com/img.png"); var name = committer.Name; var commits = committer.Commits; var uri = committer.ImageUri; Assert.AreEqual("John Doe", name); Assert.AreEqual(42, commits); Assert.AreEqual("http://www.foo.com/img.png", uri.AbsoluteUri); }
public void BindDataToCell(Committer committer, float percent) { cell.SelectionStyle = UITableViewCellSelectionStyle.None; nameLabel.Text = committer.Name; commitLabel.Text = committer.Commits.ToString(); nameLabel.TextColor = StyleExtensions.lightGrayText; StyleProgressBar(percent); DisplayImage(committer.ImageUri); }
public static async Task Main(string[] args) { Config fallbackConfig = ConfigurationFactory.ParseString(@" akka.suppress-json-serializer-warning=true akka.loglevel = DEBUG ").WithFallback(ConfigurationFactory.FromResource <ConsumerSettings <object, object> >("Akka.Streams.Kafka.reference.conf")); var system = ActorSystem.Create("TestKafka", fallbackConfig); var materializer = system.Materializer(); var consumerSettings = ConsumerSettings <Null, string> .Create(system, null, null) .WithBootstrapServers($"{EventHubNamespace}.servicebus.windows.net:9093") .WithGroupId(EventHubConsumerGroup) .WithProperties(new Dictionary <string, string> { { "security.protocol", "SASL_SSL" }, { "sasl.mechanism", "PLAIN" }, { "sasl.username", "$ConnectionString" }, { "sasl.password", EventHubConnectionString }, }); var subscription = Subscriptions.Topics(EventHubName); var committerDefaults = CommitterSettings.Create(system); // Comment for simple no-commit consumer DrainingControl <NotUsed> control = KafkaConsumer.CommittableSource(consumerSettings, subscription) .SelectAsync(1, msg => Business(msg.Record).ContinueWith(done => (ICommittable)msg.CommitableOffset)) .ToMaterialized( Committer.Sink(committerDefaults.WithMaxBatch(1)), DrainingControl <NotUsed> .Create) .Run(materializer); // Uncomment for simple no-commit consumer /* * await KafkaConsumer.PlainSource(consumerSettings, subscription) * .RunForeach(result => * { * Console.WriteLine($"Consumer: {result.Topic}/{result.Partition} {result.Offset}: {result.Value}"); * }, materializer); */ Console.WriteLine("Press any key to stop consumer."); Console.ReadKey(); // Comment for simple no-commit consumer await control.Stop(); await system.Terminate(); }
public string[] GetTree(string path, int depth) { if (depth != -1) { throw new NotImplementedException("Proper tree not yet implemented"); } return(new[] { "tree", "parents", "message", "gpgsig" } .Concat(Author.GetTree("author", depth)) .Concat(Committer.GetTree("committer", depth)) .Concat(Parents.Select((p, i) => $"parents/{i}")) .ToArray()); }
public async Task CommitterFlow_commits_offsets_from_CommittableSource(int batchSize) { var topic1 = CreateTopic(1); var topicPartition1 = new TopicPartition(topic1, 0); var group1 = CreateGroup(1); await GivenInitializedTopic(topicPartition1); await Source .From(Enumerable.Range(1, 100)) .Select(elem => new ProducerRecord <Null, string>(topicPartition1, elem.ToString())) .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer); var consumerSettings = CreateConsumerSettings <string>(group1); var committedElements = new ConcurrentQueue <string>(); var committerSettings = CommitterSettings.WithMaxBatch(batchSize); var(task, probe1) = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.Assignment(topicPartition1)) .WhereNot(c => c.Record.Value == InitialMsg) .SelectAsync(10, elem => { committedElements.Enqueue(elem.Record.Value); return(Task.FromResult(elem.CommitableOffset as ICommittable)); }) .Via(Committer.Flow(committerSettings)) .ToMaterialized(this.SinkProbe <Done>(), Keep.Both) .Run(Materializer); probe1.Request(25 / batchSize); foreach (var _ in Enumerable.Range(1, 25 / batchSize)) { probe1.ExpectNext(Done.Instance, TimeSpan.FromSeconds(10)); } probe1.Cancel(); AwaitCondition(() => task.IsShutdown.IsCompletedSuccessfully); var probe2 = KafkaConsumer.PlainSource(consumerSettings, Subscriptions.Assignment(new TopicPartition(topic1, 0))) .Select(_ => _.Value) .RunWith(this.SinkProbe <string>(), Materializer); probe2.Request(75); foreach (var i in Enumerable.Range(committedElements.Count + 1, 75).Select(c => c.ToString())) { probe2.ExpectNext(i, TimeSpan.FromSeconds(10)); } probe2.Cancel(); }
/// <summary>Format this builder's state as a commit object.</summary> /// <remarks>Format this builder's state as a commit object.</remarks> /// <returns> /// this object in the canonical commit format, suitable for storage /// in a repository. /// </returns> /// <exception cref="Sharpen.UnsupportedEncodingException"> /// the encoding specified by /// <see cref="Encoding()">Encoding()</see> /// is not /// supported by this Java runtime. /// </exception> public virtual byte[] Build() { ByteArrayOutputStream os = new ByteArrayOutputStream(); OutputStreamWriter w = new OutputStreamWriter(os, Encoding); try { os.Write(htree); os.Write(' '); TreeId.CopyTo(os); os.Write('\n'); foreach (ObjectId p in ParentIds) { os.Write(hparent); os.Write(' '); p.CopyTo(os); os.Write('\n'); } os.Write(hauthor); os.Write(' '); w.Write(Author.ToExternalString()); w.Flush(); os.Write('\n'); os.Write(hcommitter); os.Write(' '); w.Write(Committer.ToExternalString()); w.Flush(); os.Write('\n'); if (Encoding != Constants.CHARSET) { os.Write(hencoding); os.Write(' '); os.Write(Constants.EncodeASCII(Encoding.Name())); os.Write('\n'); } os.Write('\n'); if (Message != null) { w.Write(Message); w.Flush(); } } catch (IOException err) { // This should never occur, the only way to get it above is // for the ByteArrayOutputStream to throw, but it doesn't. // throw new RuntimeException(err); } return(os.ToByteArray()); }
public async Task DeleteDistrict() { if (District == null) { return; } // retrieve entire list of histories var histories = await Db.DataSyncHistories .Where(h => h.DistrictId == DistrictId) .ToListAsync(); // delete the histories in chunks await histories .AsQueryable() .ForEachInChunksAsync( chunkSize: ChunkSize, action: async history => { // for each history, delete the details associated with it as well var details = await Db.DataSyncHistoryDetails .Where(d => d.DataSyncHistoryId == history.DataSyncHistoryId) .ToListAsync(); Db.DataSyncHistoryDetails.RemoveRange(details); }, // commit changes after each chunk onChunkComplete: async() => await Committer.Invoke()); Db.DataSyncHistories.RemoveRange(histories); await Committer.Invoke(); // now delete the lines for (; ;) { var lines = await Lines().Take(ChunkSize).ToListAsync(); if (!lines.Any()) { break; } Db.DataSyncLines.RemoveRange(lines); await Committer.Invoke(); } // finally, delete the district itself! Db.Districts.Remove(District); await Committer.Invoke(); }
public async Task <bool> Output(IList <IFileModel> files, CancellationToken token) { var client = await GetGithubClient(OutputDetails); var user = await client.User.Current(); switch (user.Type) { case AccountType.Organization: case AccountType.User: var clientRepo = client.Repository; var repo = await clientRepo.Create(new NewRepository($"AutoCodeGeneration_{_systemId}_{files.First().FileExt}") { Private = true, Homepage = "https://autocodegen.danbuxton.co.uk", Description = "Created using AutomatedCodeGeneration", HasDownloads = false, HasIssues = false, HasWiki = false }); var author = new Committer("Automated Code Generator", "*****@*****.**", new DateTimeOffset(new DateTime(2021, 5, 25, 23, 59, 59))); await Task.Run(async() => { foreach (var file in files) { await clientRepo.Content.CreateFile(repo.Id, $"{file.FileName}.{file.FileExt}", new CreateFileRequest($"Generated {file.FileName}", file.Generate().ToString()) { Author = author }); } }, token); return(true); case AccountType.Bot: throw new IOException("Account type not supported"); default: return(false); } }
public void MergeFrom(CommitWire other) { if (other == null) { return; } parents_.Add(other.parents_); if (other.treeId_ != null) { if (treeId_ == null) { treeId_ = new global::SourceCode.Chasm.IO.Proto.Wire.Sha1Wire(); } TreeId.MergeFrom(other.TreeId); } if (other.author_ != null) { if (author_ == null) { author_ = new global::SourceCode.Chasm.IO.Proto.Wire.AuditWire(); } Author.MergeFrom(other.Author); } if (other.committer_ != null) { if (committer_ == null) { committer_ = new global::SourceCode.Chasm.IO.Proto.Wire.AuditWire(); } Committer.MergeFrom(other.Committer); } if (other.message_ != null) { if (message_ == null) { message_ = new global::Google.Protobuf.WellKnownTypes.StringValue(); } Message.MergeFrom(other.Message); } }
public override int GetHashCode() { int hash = 1; hash ^= parents_.GetHashCode(); if (treeId_ != null) { hash ^= TreeId.GetHashCode(); } if (author_ != null) { hash ^= Author.GetHashCode(); } if (committer_ != null) { hash ^= Committer.GetHashCode(); } if (Message.Length != 0) { hash ^= Message.GetHashCode(); } return(hash); }
//JAVA TO C# CONVERTER TODO TASK: Most Java annotations will not have direct .NET equivalent attributes: //ORIGINAL LINE: @Test public void shouldWithstandHighStressAndStillKeepOrder() throws Exception //JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: public virtual void ShouldWithstandHighStressAndStillKeepOrder() { // GIVEN an ordering queue w/ low initial size as to also exercise resize under stress VerifyingIdOrderingQueue queue = new VerifyingIdOrderingQueue(new SynchronizedArrayIdOrderingQueue()); Committer[] committers = new Committer[20]; System.Threading.CountdownEvent readySignal = new System.Threading.CountdownEvent(committers.Length); AtomicBoolean end = new AtomicBoolean(); System.Threading.CountdownEvent startSignal = new System.Threading.CountdownEvent(1); LongIterator idSource = NeverEndingIdStream(); for (int i = 0; i < committers.Length; i++) { committers[i] = new Committer(queue, idSource, end, readySignal, startSignal); } // WHEN GO! readySignal.await(); startSignal.Signal(); long startTime = currentTimeMillis(); long endTime = startTime + SECONDS.toMillis(20); // worst-case while (currentTimeMillis() < endTime && queue.NumberOfOrderlyRemovedIds < THRESHOLD) { Thread.Sleep(100); } end.set(true); foreach (Committer committer in committers) { committer.AwaitFinish(); } // THEN there should have been at least a few ids processed. The order of those // are verified as they go, by the VerifyingIdOrderingQueue assertTrue("Would have wanted at least a few ids to be processed, but only saw " + queue.NumberOfOrderlyRemovedIds, queue.NumberOfOrderlyRemovedIds >= THRESHOLD); }
public void MergeFrom(CommitWire other) { if (other == null) { return; } parents_.Add(other.parents_); if (other.treeId_ != null) { if (treeId_ == null) { treeId_ = new global::SourceCode.Chasm.IO.Proto.Wire.Sha1Wire(); } TreeId.MergeFrom(other.TreeId); } if (other.author_ != null) { if (author_ == null) { author_ = new global::SourceCode.Chasm.IO.Proto.Wire.AuditWire(); } Author.MergeFrom(other.Author); } if (other.committer_ != null) { if (committer_ == null) { committer_ = new global::SourceCode.Chasm.IO.Proto.Wire.AuditWire(); } Committer.MergeFrom(other.Committer); } if (other.Message.Length != 0) { Message = other.Message; } }
private static string ToStr(this Committer user, string prefix) => user != null ? $" ({prefix} {user.Name})" : "";
public void Then_assure_a_default_image_url_is_inserted_for_null_images() { var committer = new Committer("John Doe", 42, null); Assert.IsNotNull(committer.ImageUri); }
public UpdateFileContentModel(string message, Committer committer, string content, string sha, string branch) => (CommitMessage, Committer, ContentAsBase64String, Sha, Branch) = (message, committer, content, sha, branch);
public async Task ProducerFlowWithContext_should_work_with_source_with_context() { bool Duplicate(string value) => value == "1"; bool Ignore(string value) => value == "2"; var consumerSettings = CreateConsumerSettings <string, string>(CreateGroup(1)); var topic1 = CreateTopic(1); var topic2 = CreateTopic(2); var topic3 = CreateTopic(3); var topic4 = CreateTopic(4); var producerSettings = BuildProducerSettings <string, string>(); var committerSettings = CommitterSettings; var totalMessages = 10; var totalConsumed = 0; await ProduceStrings(topic1, Enumerable.Range(1, totalMessages), producerSettings); var(control2, result) = KafkaConsumer.PlainSource(consumerSettings, Subscriptions.Topics(topic2, topic3, topic4)) .Scan(0, (c, _) => c + 1) .Select(consumed => { totalConsumed = consumed; return(consumed); }) .ToMaterialized(Sink.Last <int>(), Keep.Both) .Run(Materializer); var control = KafkaConsumer.SourceWithOffsetContext(consumerSettings, Subscriptions.Topics(topic1)) .Select(record => { IEnvelope <string, string, NotUsed> output; if (Duplicate(record.Message.Value)) { output = ProducerMessage.Multi(new[] { new ProducerRecord <string, string>(topic2, record.Message.Key, record.Message.Value), new ProducerRecord <string, string>(topic3, record.Message.Key, record.Message.Value) }.ToImmutableSet()); } else if (Ignore(record.Message.Value)) { output = ProducerMessage.PassThrough <string, string>(); } else { output = ProducerMessage.Single(new ProducerRecord <string, string>(topic4, record.Message.Key, record.Message.Value)); } Log.Debug($"Giving message of type {output.GetType().Name}"); return(output); }) .Via(KafkaProducer.FlowWithContext <string, string, ICommittableOffset>(producerSettings)) .AsSource() .Log("Produced messages", r => $"Committing {r.Item2.Offset.Topic}:{r.Item2.Offset.Partition}[{r.Item2.Offset.Offset}]") .ToMaterialized(Committer.SinkWithOffsetContext <IResults <string, string, ICommittableOffset> >(committerSettings), Keep.Both) .MapMaterializedValue(tuple => DrainingControl <NotUsed> .Create(tuple.Item1, tuple.Item2)) .Run(Materializer); // One by one, wait while all `totalMessages` will be consumed for (var i = 1; i < totalMessages; ++i) { var consumedExpect = i; Log.Info($"Waiting for {consumedExpect} to be consumed..."); try { await AwaitConditionAsync(() => totalConsumed >= consumedExpect, TimeSpan.FromSeconds(30)); } finally { Log.Info($"Finished waiting for {consumedExpect} messages. Total: {totalConsumed}"); } Log.Info($"Confirmed that {consumedExpect} messages are consumed"); } AssertTaskCompletesWithin(TimeSpan.FromSeconds(10), control.DrainAndShutdown()); AssertTaskCompletesWithin(TimeSpan.FromSeconds(10), control2.Shutdown()); AssertTaskCompletesWithin(TimeSpan.FromSeconds(10), result).Should().Be(totalConsumed); }
public async Task SupervisionStrategy_Decider_on_complex_stream_should_work() { var topic = CreateTopic(1); var group = CreateGroup(1); var topicPartition = new TopicPartition(topic, 0); var committedTopicPartition = new TopicPartition($"{topic}-done", 0); var callCount = 0; Directive Decider(Exception cause) { callCount++; return(Directive.Resume); } var committerSettings = CommitterSettings.Create(Sys); var consumerSettings = CreateConsumerSettings <string>(group); var counter = 0; // arrange await Source.From(Enumerable.Range(1, 10)) .Select(elem => new ProducerRecord <Null, string>(topicPartition, elem.ToString())) .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer); // act var drainingControl = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.Assignment(topicPartition)) .Via(Flow.Create <CommittableMessage <Null, string> >().Select(x => { counter++; // Exception happened here, fail once, when counter is 5 if (counter == 5) { throw new Exception("BOOM!"); } return(x); })) .WithAttributes(Attributes.CreateName("CommitableSource").And(ActorAttributes.CreateSupervisionStrategy(Decider))) .Select(c => (c.Record.Topic, c.Record.Message.Value, c.CommitableOffset)) .SelectAsync(1, async t => { Log.Info($"[{t.Topic}]: {t.Value}"); // simulate a request-response call that takes 10ms to complete here await Task.Delay(10); return(t); }) .Select(t => ProducerMessage.Single(new ProducerRecord <Null, string>(committedTopicPartition, t.Value), t.CommitableOffset)) .Via(KafkaProducer.FlexiFlow <Null, string, ICommittableOffset>(ProducerSettings)).WithAttributes(Attributes.CreateName("FlexiFlow")) .Select(m => (ICommittable)m.PassThrough) .AlsoToMaterialized(Committer.Sink(committerSettings), DrainingControl <NotUsed> .Create) .To(Flow.Create <ICommittable>() .Async() .GroupedWithin(1000, TimeSpan.FromSeconds(1)) .Select(c => c.Count()) .Log("MsgCount").AddAttributes(Attributes.CreateLogLevels(LogLevel.InfoLevel)) .To(Sink.Ignore <int>())) .Run(Sys.Materializer()); await Task.Delay(TimeSpan.FromSeconds(5)); await GuardWithTimeoutAsync(drainingControl.DrainAndShutdown(), TimeSpan.FromSeconds(10)); // There should be only 1 decider call callCount.Should().Be(1); // Assert that all of the messages, except for those that failed in the stage, got committed var settings = CreateConsumerSettings <Null, string>(group); var probe = KafkaConsumer .PlainSource(settings, Subscriptions.Assignment(committedTopicPartition)) .Select(c => c.Message.Value) .RunWith(this.SinkProbe <string>(), Materializer); probe.Request(9); var messages = new List <string>(); for (var i = 0; i < 9; ++i) { var message = probe.RequestNext(); messages.Add(message); } // Message "5" is missing because the exception happened downstream of the source and we chose to // ignore it in the decider messages.Should().BeEquivalentTo(new[] { "1", "2", "3", "4", "6", "7", "8", "9", "10" }); probe.Cancel(); }
public override int GetHashCode() { unchecked { return((Url != null ? Url.GetHashCode() : 0) ^ (Sha != null ? Sha.GetHashCode() : 0) ^ (Author != null ? Author.GetHashCode() : 0) ^ (Committer != null ? Committer.GetHashCode() : 0) ^ (Message != null ? Message.GetHashCode() : 0) ^ (Tree != null ? Tree.GetHashCode() : 0)); } }
public override int GetHashCode() { unchecked { return((Url != null ? Url.GetHashCode() : 0) ^ (HtmlUrl != null ? HtmlUrl.GetHashCode() : 0) ^ (CommentsUrl != null ? CommentsUrl.GetHashCode() : 0) ^ (Sha != null ? Sha.GetHashCode() : 0) ^ (Commit != null ? Commit.GetHashCode() : 0) ^ (Author != null ? Author.GetHashCode() : 0) ^ (Committer != null ? Committer.GetHashCode() : 0) ^ (Parents != null ? Parents.GetHashCode() : 0) ^ (Stats != null ? Stats.GetHashCode() : 0) ^ (Files != null ? Files.GetHashCode() : 0)); } }