/// <summary> /// Emit a batch of log events, running to completion asynchronously. /// </summary> /// <param name="events">The events to be logged to Kinesis</param> protected override void EmitBatch(IEnumerable <LogEvent> events) { var request = new PutRecordsRequest { StreamName = _state.Options.StreamName }; foreach (var logEvent in events) { var json = new StringWriter(); _state.Formatter.Format(logEvent, json); var bytes = Encoding.UTF8.GetBytes(json.ToString()); var entry = new PutRecordsRequestEntry { PartitionKey = Guid.NewGuid().ToString(), Data = new MemoryStream(bytes), }; request.Records.Add(entry); } _state.KinesisClient.PutRecords(request); }
/// <summary> /// Transfers data to Realeyes services via AWS Kinesis /// </summary> /// <param name="events"></param> /// <returns></returns> private bool SendEvents(List <BaseEvent> events) { var result = true; var batch = new PutRecordsRequest(); batch.StreamName = "live-outside-data-ingestion"; foreach (var ev in events) { var entity = new PutRecordsRequestEntry(); entity.PartitionKey = $"{ev.AccountHash}-{ev.ParticipantId}"; string dataAsJson = JsonConvert.SerializeObject(ev); byte[] dataAsBytes = Encoding.UTF8.GetBytes(dataAsJson); entity.Data = new MemoryStream(dataAsBytes); batch.Records.Add(entity); } try { var response = _kinesisClient.PutRecordsAsync(batch).Result; Console.WriteLine("Successfully sent to Kinesis"); result = true; } catch (Exception ex) { Console.WriteLine("Failed to send record: '{0}'", ex.Message); result = false; } finally { batch.Records.ForEach(r => r.Data?.Dispose()); } return(result); }
/// <summary> /// 上传数据 /// </summary> /// <param name="streamName">通道名称</param> /// <param name="shardId">分区ID</param> public static PutRecordsResult RunProducerDemo(string streamName, string shardId) { var dic = new DISIngestionClient(); PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.StreamName = streamName; var putRecordsRequestEntries = new List <PutRecordsRequestEntry>(); for (int i = 0; i < 3; i++) { string a = shardId + i; var putRecordsRequestEntry = new PutRecordsRequestEntry { //需要上传的数据 Data = Encoding.UTF8.GetBytes(a), //用于明确数据需要写入分区的哈希值,此哈希值将覆盖“PartitionKey”的哈希值 //ExplicitHashKey = "0", //如果传了PartitionId参数,则优先使用PartitionId参数。如果PartitionId没有传,则使用PartitionKey //PartitionKey = new Random().Next().ToString(), //分区的唯一标识符 PartitionId = shardId, }; putRecordsRequestEntries.Add(putRecordsRequestEntry); } putRecordsRequest.Records = putRecordsRequestEntries; PutRecordsResult response = dic.PutRecords(putRecordsRequest); foreach (var item in response.Records) { Console.WriteLine(item); } return(response); }
/// <inheritdoc/> public override async Task <IList <string> > PublishAsync <TEvent>(IList <EventContext <TEvent> > events, EventRegistration registration, DateTimeOffset?scheduled = null, CancellationToken cancellationToken = default) { // log warning when trying to publish scheduled message if (scheduled != null) { Logger.LogWarning("Amazon Kinesis does not support delay or scheduled publish"); } using var scope = CreateScope(); var records = new List <PutRecordsRequestEntry>(); // work on each event foreach (var @event in events) { using var ms = new MemoryStream(); await SerializeAsync(body : ms, @event : @event, registration : registration, scope : scope, cancellationToken : cancellationToken); var record = new PutRecordsRequestEntry { Data = ms, PartitionKey = TransportOptions.PartitionKeyResolver(@event), }; records.Add(record); } // prepare the request var streamName = registration.EventName; var request = new PutRecordsRequest { StreamName = streamName, Records = records, }; // send the events Logger.LogInformation("Sending {EventsCount} messages to '{StreamName}'. Scheduled: {Scheduled}. Events:\r\n- {Ids}", events.Count, streamName, scheduled, string.Join("\r\n- ", events.Select(e => e.Id))); var response = await kinesisClient.PutRecordsAsync(request, cancellationToken); response.EnsureSuccess(); // Should we check for failed records and throw exception? // return the sequence numbers return(response.Records.Select(m => m.SequenceNumber.ToString()).ToList()); }
private async Task <int> UploadFile(string streamName, string file, string fileName) { int failedRecordCount = 0; //最多上传128M的文件,这里先设置为最多上传500k int splitFileSize = 500 * 1024; FileInfo fileInfo = new FileInfo(file); //分几次传 int steps = (int)Math.Ceiling((decimal)fileInfo.Length / splitFileSize); string deliverDataId = Guid.NewGuid().ToString("N"); using (FileStream fs = new FileStream(file, FileMode.Open, FileAccess.Read)) { using (BinaryReader br = new BinaryReader(fs)) { for (int i = 0; i < steps; i++) { byte[] input = br.ReadBytes(splitFileSize); { PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.StreamName = streamName; var putRecordsRequestEntries = new List <PutRecordsRequestEntry>(); var putRecordsRequestEntry = new PutRecordsRequestEntry { Data = input, ExtenedInfo = new PutRecordsRequestEntryExtendedInfo() { FileName = fileName, DeliverDataId = deliverDataId, SequenceNumber = i, EndFlag = i == steps - 1 ? true : false, } }; putRecordsRequestEntries.Add(putRecordsRequestEntry); putRecordsRequest.Records = putRecordsRequestEntries; PutRecordsResult response = await this.UploadFile(putRecordsRequest); failedRecordCount += response.FailedRecordCount; //Thread.Sleep(500); foreach (var item in response.Records) { Console.WriteLine("异步" + item); } } } } } return(failedRecordCount); }
/// <summary> /// 同步方式上传文件 /// </summary> /// <param name="streamName">通道名称</param> /// <param name="file">文件名</param> public static int UploadFileDemo(string streamName, string file) { int failedRecordCount = 0; var dic = new DISIngestionClient(); //最多上传128M的文件,这里先设置为最多上传500k int splitFileSize = 500 * 1024; string deliverDataId = Guid.NewGuid().ToString("N"); FileInfo fileInfo = new FileInfo(file); //分几次传 int steps = (int)Math.Ceiling((decimal)fileInfo.Length / splitFileSize); using (FileStream fs = new FileStream(file, FileMode.Open, FileAccess.Read)) { using (BinaryReader br = new BinaryReader(fs)) { for (int i = 0; i < steps; i++) { byte[] input = br.ReadBytes(splitFileSize); { PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.StreamName = streamName; var putRecordsRequestEntries = new List <PutRecordsRequestEntry>(); var putRecordsRequestEntry = new PutRecordsRequestEntry { Data = input, ExtenedInfo = new PutRecordsRequestEntryExtendedInfo() { FileName = @"thisisshawnfolder/2018/07/04/14/56/Penguins2.jpg", DeliverDataId = deliverDataId, SequenceNumber = i, EndFlag = i == steps - 1 ? true : false, } }; putRecordsRequestEntries.Add(putRecordsRequestEntry); putRecordsRequest.Records = putRecordsRequestEntries; PutRecordsResult response = dic.PutFileRecords(putRecordsRequest); failedRecordCount += response.FailedRecordCount; //Thread.Sleep(500); foreach (var item in response.Records) { Console.WriteLine("同步" + item); } } } } } return(failedRecordCount); }
void OnTick() { try { var count = 0; do { // Locking the bookmark ensures that though there may be multiple instances of this // class running, only one will ship logs at a time. using (var bookmark = File.Open(_bookmarkFilename, FileMode.OpenOrCreate, FileAccess.ReadWrite, FileShare.Read)) { long startingOffset; long nextLineBeginsAtOffset; string currentFilePath; TryReadBookmark(bookmark, out nextLineBeginsAtOffset, out currentFilePath); SelfLog.WriteLine("Bookmark is currently at offset {0} in '{1}'", nextLineBeginsAtOffset, currentFilePath); var fileSet = GetFileSet(); if (currentFilePath == null || !File.Exists(currentFilePath)) { nextLineBeginsAtOffset = 0; currentFilePath = fileSet.FirstOrDefault(); } if (currentFilePath != null) { count = 0; var records = new List <PutRecordsRequestEntry>(); using (var current = File.Open(currentFilePath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)) { startingOffset = current.Position = nextLineBeginsAtOffset; string nextLine; while (count < _batchPostingLimit && TryReadLine(current, ref nextLineBeginsAtOffset, out nextLine)) { ++count; var bytes = Encoding.UTF8.GetBytes(nextLine); var record = new PutRecordsRequestEntry { PartitionKey = Guid.NewGuid().ToString(), Data = new MemoryStream(bytes) }; records.Add(record); } } if (count > 0) { var request = new PutRecordsRequest { StreamName = _state.Options.StreamName, Records = records }; SelfLog.WriteLine("Writing {0} records to kinesis", count); PutRecordsResponse response = _state.KinesisClient.PutRecords(request); SelfLog.WriteLine("Advancing bookmark from '{0}' to '{1}'", startingOffset, nextLineBeginsAtOffset); WriteBookmark(bookmark, nextLineBeginsAtOffset, currentFilePath); if (response.FailedRecordCount > 0) { foreach (var record in response.Records) { SelfLog.WriteLine("Kinesis failed to index record in stream '{0}'. {1} {2} ", _state.Options.StreamName, record.ErrorCode, record.ErrorMessage); } // fire event OnLogSendError(new LogSendErrorEventArgs(string.Format("Error writing records to {0} ({1} of {2} records failed)", _state.Options.StreamName, response.FailedRecordCount, count), null)); } } else { SelfLog.WriteLine("Found no records to process"); // Only advance the bookmark if no other process has the // current file locked, and its length is as we found it. var bufferedFilesCount = fileSet.Length; var isProcessingFirstFile = fileSet.First().Equals(currentFilePath, StringComparison.InvariantCultureIgnoreCase); var isFirstFileUnlocked = IsUnlockedAtLength(currentFilePath, nextLineBeginsAtOffset); //SelfLog.WriteLine("BufferedFilesCount: {0}; IsProcessingFirstFile: {1}; IsFirstFileUnlocked: {2}", bufferedFilesCount, isProcessingFirstFile, isFirstFileUnlocked); if (bufferedFilesCount == 2 && isProcessingFirstFile && isFirstFileUnlocked) { SelfLog.WriteLine("Advancing bookmark from '{0}' to '{1}'", currentFilePath, fileSet[1]); WriteBookmark(bookmark, 0, fileSet[1]); } if (bufferedFilesCount > 2) { // Once there's a third file waiting to ship, we do our // best to move on, though a lock on the current file // will delay this. SelfLog.WriteLine("Deleting '{0}'", fileSet[0]); File.Delete(fileSet[0]); } } } } }while (count == _batchPostingLimit); } catch (Exception ex) { SelfLog.WriteLine("Exception while emitting periodic batch from {0}: {1}", this, ex); OnLogSendError(new LogSendErrorEventArgs(string.Format("Error in shipping logs to '{0}' stream)", _state.Options.StreamName), ex)); } finally { lock (_stateLock) { if (!_unloading) { SetTimer(); } } } }
private static int GetPayloadByteSize(PutRecordsRequestEntry request) => request.PartitionKey.Length + (int)request.Data.Position;