/// <summary> /// 上传数据 /// </summary> /// <param name="streamName">通道名称</param> /// <param name="shardId">分区ID</param> public static PutRecordsResult RunProducerDemo(string streamName, string shardId) { var dic = new DISIngestionClient(); PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.StreamName = streamName; var putRecordsRequestEntries = new List <PutRecordsRequestEntry>(); for (int i = 0; i < 3; i++) { string a = shardId + i; var putRecordsRequestEntry = new PutRecordsRequestEntry { //需要上传的数据 Data = Encoding.UTF8.GetBytes(a), //用于明确数据需要写入分区的哈希值,此哈希值将覆盖“PartitionKey”的哈希值 //ExplicitHashKey = "0", //如果传了PartitionId参数,则优先使用PartitionId参数。如果PartitionId没有传,则使用PartitionKey //PartitionKey = new Random().Next().ToString(), //分区的唯一标识符 PartitionId = shardId, }; putRecordsRequestEntries.Add(putRecordsRequestEntry); } putRecordsRequest.Records = putRecordsRequestEntries; PutRecordsResult response = dic.PutRecords(putRecordsRequest); foreach (var item in response.Records) { Console.WriteLine(item); } return(response); }
protected PutRecordsResult InnerPutRecordsSupportingCache(PutRecordsRequest putRecordsParam, PutRecordMethod putRecordMethod) { if (_disConfig.IsDataCacheEnabled()) { // 开启本地缓存 PutRecordsResult putRecordsResult = null; try { putRecordsResult = InnerPutRecordsWithRetry(putRecordsParam, putRecordMethod); } catch (Exception e) { string errorMsg = e.InnerException.Message; int statusCode = int.Parse(errorMsg.Split('\n')[0]); // 如果不是可以重试的异常 或者 已达到重试次数,则直接抛出异常 if (Utils.Utils.IsCacheData(statusCode)) { // 网络异常 全部记录上传失败 logger.Info("Local data cache is enabled, try to put failed records to local."); CacheUtils.PutToCache(putRecordsParam, _disConfig); // 写入本地缓存 } throw e; } try { // 部分记录上传失败 if (putRecordsResult.FailedRecordCount > 0) { // 过滤出上传失败的记录 List <PutRecordsResultEntry> putRecordsResultEntries = putRecordsResult.Records; List <PutRecordsRequestEntry> failedPutRecordsRequestEntries = new List <PutRecordsRequestEntry>(); int index = 0; foreach (PutRecordsResultEntry putRecordsResultEntry in putRecordsResultEntries) { if (!String.IsNullOrEmpty(putRecordsResultEntry.ErrorCode)) { failedPutRecordsRequestEntries.Add(putRecordsParam.Records[index]); } index++; } putRecordsParam.Records = failedPutRecordsRequestEntries; logger.Info("Local data cache is enabled, try to put failed records to local."); CacheUtils.PutToCache(putRecordsParam, _disConfig); // 写入本地缓存 } } catch (Exception e) { throw e; } return(putRecordsResult); } else { return(InnerPutRecordsWithRetry(putRecordsParam, putRecordMethod)); } }
/// <summary> /// Emit a batch of log events, running to completion asynchronously. /// </summary> /// <param name="events">The events to be logged to Kinesis</param> protected override void EmitBatch(IEnumerable <LogEvent> events) { var request = new PutRecordsRequest { StreamName = _state.Options.StreamName }; foreach (var logEvent in events) { var json = new StringWriter(); _state.Formatter.Format(logEvent, json); var bytes = Encoding.UTF8.GetBytes(json.ToString()); var entry = new PutRecordsRequestEntry { PartitionKey = Guid.NewGuid().ToString(), Data = new MemoryStream(bytes), }; request.Records.Add(entry); } _state.KinesisClient.PutRecords(request); }
/// <summary> /// Transfers data to Realeyes services via AWS Kinesis /// </summary> /// <param name="events"></param> /// <returns></returns> private bool SendEvents(List <BaseEvent> events) { var result = true; var batch = new PutRecordsRequest(); batch.StreamName = "live-outside-data-ingestion"; foreach (var ev in events) { var entity = new PutRecordsRequestEntry(); entity.PartitionKey = $"{ev.AccountHash}-{ev.ParticipantId}"; string dataAsJson = JsonConvert.SerializeObject(ev); byte[] dataAsBytes = Encoding.UTF8.GetBytes(dataAsJson); entity.Data = new MemoryStream(dataAsBytes); batch.Records.Add(entity); } try { var response = _kinesisClient.PutRecordsAsync(batch).Result; Console.WriteLine("Successfully sent to Kinesis"); result = true; } catch (Exception ex) { Console.WriteLine("Failed to send record: '{0}'", ex.Message); result = false; } finally { batch.Records.ForEach(r => r.Data?.Dispose()); } return(result); }
private static void ProcessFile(object a) { ThreadInfo threadInfo = a as ThreadInfo; PutRecordsRequest putRecordsRequest = threadInfo.putRecordsRequest; DISConfig disConfig = threadInfo.disConfig; PutToCache(putRecordsRequest, disConfig); }
public void PutRecordsRequetsUnitTest_StreamName() { PutRecordsRequest prr = new PutRecordsRequest(); var kr = make_kinesis_record(); prr.add(kr); Assert.AreEqual(prr.stream(), "myStream"); }
/// <inheritdoc/> public override async Task <IList <string> > PublishAsync <TEvent>(IList <EventContext <TEvent> > events, EventRegistration registration, DateTimeOffset?scheduled = null, CancellationToken cancellationToken = default) { // log warning when trying to publish scheduled message if (scheduled != null) { Logger.LogWarning("Amazon Kinesis does not support delay or scheduled publish"); } using var scope = CreateScope(); var records = new List <PutRecordsRequestEntry>(); // work on each event foreach (var @event in events) { using var ms = new MemoryStream(); await SerializeAsync(body : ms, @event : @event, registration : registration, scope : scope, cancellationToken : cancellationToken); var record = new PutRecordsRequestEntry { Data = ms, PartitionKey = TransportOptions.PartitionKeyResolver(@event), }; records.Add(record); } // prepare the request var streamName = registration.EventName; var request = new PutRecordsRequest { StreamName = streamName, Records = records, }; // send the events Logger.LogInformation("Sending {EventsCount} messages to '{StreamName}'. Scheduled: {Scheduled}. Events:\r\n- {Ids}", events.Count, streamName, scheduled, string.Join("\r\n- ", events.Select(e => e.Id))); var response = await kinesisClient.PutRecordsAsync(request, cancellationToken); response.EnsureSuccess(); // Should we check for failed records and throw exception? // return the sequence numbers return(response.Records.Select(m => m.SequenceNumber.ToString()).ToList()); }
public static void Work(PutRecordsRequest putRecordsRequest, DISConfig disConfig) { ThreadInfo threadInfo = new ThreadInfo() { putRecordsRequest = putRecordsRequest, disConfig = disConfig }; ThreadPool.SetMinThreads(DEFAULT_THREAD_POOL_SIZE, 10); ThreadPool.QueueUserWorkItem(new WaitCallback(ProcessFile), threadInfo); }
public KinesisTask(Executor executor, PutRecordsRequest request, DescribeStreamRequest dsRequest, KinesisRquestType reqType, object context, ResponseCallback response_cb, KinesisClientReturn kinesisClientReturn, DateTime deadline, DateTime expiration, int timeout) : base(deadline, expiration) { executor_ = executor; context_ = context; response_cb_ = response_cb; dsRequest_ = dsRequest; timeout_ = timeout; finished_ = false; reqType_ = reqType; prRequest_ = request; }
public static void PutToCache(PutRecordsRequest putRecordsRequest, DISConfig disConfig) { try { CacheManager cacheManager = CacheManager.GetInstance(disConfig); cacheManager.PutToCache(putRecordsRequest); } catch (Exception e) { // Failed to put failed records to local cache file, continue. LOGGER.Error("Failed to write failed records to local cache file.", e); } }
protected override PutRecordsResponse SendRecords(List <PutRecordsRequestEntry> records, out bool successful) { var request = new PutRecordsRequest { StreamName = _streamName, Records = records }; SelfLog.WriteLine("Writing {0} records to kinesis", records.Count); var putRecordBatchTask = _kinesisClient.PutRecordsAsync(request); successful = putRecordBatchTask.GetAwaiter().GetResult().FailedRecordCount == 0; return(putRecordBatchTask.Result); }
protected override PutRecordsResponse SendRecords(List <PutRecordsRequestEntry> records, out bool successful) { var request = new PutRecordsRequest { StreamName = _streamName, Records = records }; Logger.TraceFormat("Writing {0} records to kinesis", records.Count); var response = _kinesisClient.PutRecords(request); successful = response.FailedRecordCount == 0; return(response); }
private async Task <int> UploadFile(string streamName, string file, string fileName) { int failedRecordCount = 0; //最多上传128M的文件,这里先设置为最多上传500k int splitFileSize = 500 * 1024; FileInfo fileInfo = new FileInfo(file); //分几次传 int steps = (int)Math.Ceiling((decimal)fileInfo.Length / splitFileSize); string deliverDataId = Guid.NewGuid().ToString("N"); using (FileStream fs = new FileStream(file, FileMode.Open, FileAccess.Read)) { using (BinaryReader br = new BinaryReader(fs)) { for (int i = 0; i < steps; i++) { byte[] input = br.ReadBytes(splitFileSize); { PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.StreamName = streamName; var putRecordsRequestEntries = new List <PutRecordsRequestEntry>(); var putRecordsRequestEntry = new PutRecordsRequestEntry { Data = input, ExtenedInfo = new PutRecordsRequestEntryExtendedInfo() { FileName = fileName, DeliverDataId = deliverDataId, SequenceNumber = i, EndFlag = i == steps - 1 ? true : false, } }; putRecordsRequestEntries.Add(putRecordsRequestEntry); putRecordsRequest.Records = putRecordsRequestEntries; PutRecordsResult response = await this.UploadFile(putRecordsRequest); failedRecordCount += response.FailedRecordCount; //Thread.Sleep(500); foreach (var item in response.Records) { Console.WriteLine("异步" + item); } } } } } return(failedRecordCount); }
public async Task <PutRecordsResponse> PutKinesisRecords(IEnumerable <string> data, string partitionKey) { var putRecordsRequest = new PutRecordsRequest { StreamName = _StreamName, Records = data.Select( d => new PutRecordsRequestEntry { Data = new MemoryStream(Encoding.UTF8.GetBytes(d)), PartitionKey = partitionKey }).ToList() }; return(await _AmazonKinesisClient.PutRecordsAsync(putRecordsRequest)); }
/// <summary> /// 同步方式上传文件 /// </summary> /// <param name="streamName">通道名称</param> /// <param name="file">文件名</param> public static int UploadFileDemo(string streamName, string file) { int failedRecordCount = 0; var dic = new DISIngestionClient(); //最多上传128M的文件,这里先设置为最多上传500k int splitFileSize = 500 * 1024; string deliverDataId = Guid.NewGuid().ToString("N"); FileInfo fileInfo = new FileInfo(file); //分几次传 int steps = (int)Math.Ceiling((decimal)fileInfo.Length / splitFileSize); using (FileStream fs = new FileStream(file, FileMode.Open, FileAccess.Read)) { using (BinaryReader br = new BinaryReader(fs)) { for (int i = 0; i < steps; i++) { byte[] input = br.ReadBytes(splitFileSize); { PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.StreamName = streamName; var putRecordsRequestEntries = new List <PutRecordsRequestEntry>(); var putRecordsRequestEntry = new PutRecordsRequestEntry { Data = input, ExtenedInfo = new PutRecordsRequestEntryExtendedInfo() { FileName = @"thisisshawnfolder/2018/07/04/14/56/Penguins2.jpg", DeliverDataId = deliverDataId, SequenceNumber = i, EndFlag = i == steps - 1 ? true : false, } }; putRecordsRequestEntries.Add(putRecordsRequestEntry); putRecordsRequest.Records = putRecordsRequestEntries; PutRecordsResult response = dic.PutFileRecords(putRecordsRequest); failedRecordCount += response.FailedRecordCount; //Thread.Sleep(500); foreach (var item in response.Records) { Console.WriteLine("同步" + item); } } } } } return(failedRecordCount); }
private async Task <PutRecordsResult> InnerUploadFile(PutRecordsRequest putRecordsParam) { ObsWebServiceRequest obsWebServiceRequest = new DISWebServiceRequest(); OBS.Runtime.Internal.IRequest requestobs = new DISDefaultRequest(obsWebServiceRequest, Constants.SERVICENAME) { HttpMethod = HttpMethodName.POST.ToString() }; string resourcePath = ResourcePathBuilder.Standard() .WithProjectId(_disConfig.GetProjectId()) .WithResource(new RecordResource(null)) .Build(); requestobs.ResourcePath = resourcePath; var results = await Request <PutRecordsResult>(putRecordsParam, requestobs); return(results); }
public PutRecordsResponse Write(List <string> logMessages, string streamName) { var request = new PutRecordsRequest { StreamName = streamName }; foreach (string message in logMessages) { using (var ms = GenerateStreamFromString(message)) { request.Records.Add(new PutRecordsRequestEntry { Data = ms, PartitionKey = Guid.NewGuid().ToString() }); } } return(m_client.PutRecordsAsync(request).Result); }
protected PutRecordsResult InnerPutRecords(PutRecordsRequest putRecordsParam) { // Decorate PutRecordsRequest if needed putRecordsParam = DecorateRecords(putRecordsParam); ObsWebServiceRequest obsWebServiceRequest = new DISWebServiceRequest(); OBS.Runtime.Internal.IRequest requestobs = new DISDefaultRequest(obsWebServiceRequest, Constants.SERVICENAME) { HttpMethod = HttpMethodName.POST.ToString() }; string resourcePath = ResourcePathBuilder.Standard() .WithProjectId(_disConfig.GetProjectId()) .WithResource(new RecordResource(null)) .Build(); requestobs.ResourcePath = resourcePath; var results = Request <PutRecordsResult>(putRecordsParam, requestobs); return(results); }
//public PutRecordResult PutRecord(PutRecordRequest putRecordParam) //{ // ObsWebServiceRequest obsWebServiceRequest = new DISWebServiceRequest(); // OBS.Runtime.Internal.IRequest requestobs = new DISDefaultRequest(obsWebServiceRequest, Constants.SERVICENAME) // { // HttpMethod = HttpMethodName.POST.ToString() // }; // string resourcePath = ResourcePathBuilder.Standard() // .WithProjectId(_disConfig.GetProjectId()) // .WithResource(new RecordResource(null)) // .Build(); // requestobs.ResourcePath = resourcePath; // var results = Request<PutRecordResult>(putRecordParam, requestobs); // return results; //} protected PutRecordsRequest DecorateRecords(PutRecordsRequest putRecordsParam) { //加密 if (IsEncrypt()) { if (putRecordsParam.Records != null) { for (int i = 0; i < putRecordsParam.Records.Count; i++) { putRecordsParam.Records[i].Data = Encrypt(new MemoryStream(putRecordsParam.Records[i].Data)); } } } //压缩 if (_disConfig.IsDataCompressEnabled()) { if (putRecordsParam.Records != null) { for (int i = 0; i < putRecordsParam.Records.Count; i++) { byte[] input = putRecordsParam.Records[i].Data; try { byte[] compressedInput = CompressUtils.Compress(input); putRecordsParam.Records[i].Data = compressedInput; } catch (IOException e) { logger.Error(e.Message, e); throw new Exception(e.Message); } } } } return(putRecordsParam); }
private async Task SendBatchToStream(BatchRequest request) { var items = GenerateRecords(request); var skip = 0; for (var i = 0; i < items.Count; i += BatchSize) { var chunk = items.Skip(skip).Take(BatchSize).ToList(); var batchRequest = new PutRecordsRequest { StreamName = StreamName, Records = GetStreamRecords(chunk) }; var result = await _stream.PutRecordsAsync(batchRequest); if (result.FailedRecordCount > 0) { LogInfo($"Failed Items: {result.FailedRecordCount}"); } } LogInfo($"Completed. {DateTime.UtcNow}"); }
public void PutRecordsRequetsUnitTest_SizePrediction() { PutRecordsRequest prr = new PutRecordsRequest(); Assert.AreEqual((int)prr.accurate_size(), 0); Assert.AreEqual(prr.Estimated_size(), prr.accurate_size()); Stack <int> sizes = new Stack <int>(); sizes.Push(0); int N = 100; for (int i = 0; i < N; i++) { var kr = make_kinesis_record(); prr.add(kr); int expected_growth = kr.serialize().Length + kr.partition_key().Length; int expected_size = sizes.Peek() + expected_growth; Assert.AreEqual((int)prr.accurate_size(), (expected_size - 16)); Assert.AreEqual(prr.Estimated_size(), prr.accurate_size()); sizes.Push(expected_size - 16); } for (int i = 0; i < N; i++) { Assert.AreEqual((int)prr.accurate_size(), sizes.Peek()); Assert.AreEqual(prr.Estimated_size(), prr.accurate_size()); sizes.Pop(); prr.remove_last(); } Assert.AreEqual((int)prr.accurate_size(), 0); Assert.AreEqual(prr.Estimated_size(), prr.accurate_size()); }
private List <Amazon.Kinesis.Model.PutRecordsRequestEntry> BuildPutRecordsRequestEntries(PutRecordsRequest request_) { if (request_ == null) { StdErrorOut.Instance.StdOut(LogLevel.debug, "NULL PutRecordsRequest"); return(null); } else { StdErrorOut.Instance.StdOut(LogLevel.debug, "request_.serialize() = " + request_.serialize()); } var retVal = new List <Amazon.Kinesis.Model.PutRecordsRequestEntry>(); foreach (var item in request_.Items()) { var reqEntry = new Amazon.Kinesis.Model.PutRecordsRequestEntry(); reqEntry.PartitionKey = item.partition_key(); reqEntry.ExplicitHashKey = item.explicit_hash_key(); if (item.is_aggregated) { reqEntry.Data = new System.IO.MemoryStream(item.SerializedAggregatedRecord); } else { reqEntry.Data = new System.IO.MemoryStream(Encoding.UTF8.GetBytes(item.serialize())); } retVal.Add(reqEntry); } return(retVal); }
void OnTick() { try { var count = 0; do { // Locking the bookmark ensures that though there may be multiple instances of this // class running, only one will ship logs at a time. using (var bookmark = File.Open(_bookmarkFilename, FileMode.OpenOrCreate, FileAccess.ReadWrite, FileShare.Read)) { long startingOffset; long nextLineBeginsAtOffset; string currentFilePath; TryReadBookmark(bookmark, out nextLineBeginsAtOffset, out currentFilePath); SelfLog.WriteLine("Bookmark is currently at offset {0} in '{1}'", nextLineBeginsAtOffset, currentFilePath); var fileSet = GetFileSet(); if (currentFilePath == null || !File.Exists(currentFilePath)) { nextLineBeginsAtOffset = 0; currentFilePath = fileSet.FirstOrDefault(); } if (currentFilePath != null) { count = 0; var records = new List <PutRecordsRequestEntry>(); using (var current = File.Open(currentFilePath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)) { startingOffset = current.Position = nextLineBeginsAtOffset; string nextLine; while (count < _batchPostingLimit && TryReadLine(current, ref nextLineBeginsAtOffset, out nextLine)) { ++count; var bytes = Encoding.UTF8.GetBytes(nextLine); var record = new PutRecordsRequestEntry { PartitionKey = Guid.NewGuid().ToString(), Data = new MemoryStream(bytes) }; records.Add(record); } } if (count > 0) { var request = new PutRecordsRequest { StreamName = _state.Options.StreamName, Records = records }; SelfLog.WriteLine("Writing {0} records to kinesis", count); PutRecordsResponse response = _state.KinesisClient.PutRecords(request); SelfLog.WriteLine("Advancing bookmark from '{0}' to '{1}'", startingOffset, nextLineBeginsAtOffset); WriteBookmark(bookmark, nextLineBeginsAtOffset, currentFilePath); if (response.FailedRecordCount > 0) { foreach (var record in response.Records) { SelfLog.WriteLine("Kinesis failed to index record in stream '{0}'. {1} {2} ", _state.Options.StreamName, record.ErrorCode, record.ErrorMessage); } // fire event OnLogSendError(new LogSendErrorEventArgs(string.Format("Error writing records to {0} ({1} of {2} records failed)", _state.Options.StreamName, response.FailedRecordCount, count), null)); } } else { SelfLog.WriteLine("Found no records to process"); // Only advance the bookmark if no other process has the // current file locked, and its length is as we found it. var bufferedFilesCount = fileSet.Length; var isProcessingFirstFile = fileSet.First().Equals(currentFilePath, StringComparison.InvariantCultureIgnoreCase); var isFirstFileUnlocked = IsUnlockedAtLength(currentFilePath, nextLineBeginsAtOffset); //SelfLog.WriteLine("BufferedFilesCount: {0}; IsProcessingFirstFile: {1}; IsFirstFileUnlocked: {2}", bufferedFilesCount, isProcessingFirstFile, isFirstFileUnlocked); if (bufferedFilesCount == 2 && isProcessingFirstFile && isFirstFileUnlocked) { SelfLog.WriteLine("Advancing bookmark from '{0}' to '{1}'", currentFilePath, fileSet[1]); WriteBookmark(bookmark, 0, fileSet[1]); } if (bufferedFilesCount > 2) { // Once there's a third file waiting to ship, we do our // best to move on, though a lock on the current file // will delay this. SelfLog.WriteLine("Deleting '{0}'", fileSet[0]); File.Delete(fileSet[0]); } } } } }while (count == _batchPostingLimit); } catch (Exception ex) { SelfLog.WriteLine("Exception while emitting periodic batch from {0}: {1}", this, ex); OnLogSendError(new LogSendErrorEventArgs(string.Format("Error in shipping logs to '{0}' stream)", _state.Options.StreamName), ex)); } finally { lock (_stateLock) { if (!_unloading) { SetTimer(); } } } }
public async Task <int> PutFilesAsync(PutRecordsRequest putFilesRequest) { var result = await this.UploadFile(putFilesRequest.StreamName, putFilesRequest.FilePath, putFilesRequest.FileName); return(result); }
private async Task <PutRecordsResult> UploadFile(PutRecordsRequest putRecordsParam) { return(await InnerPutRecordsSupportingCache(putRecordsParam, new PutRecordAsyncMethod(InnerUploadFile))); }
protected PutRecordsResult InnerPutRecordsWithRetry(PutRecordsRequest putRecordsParam, PutRecordMethod putRecordMethod) { //数据上传的结果集 PutRecordsResult putRecordsResult = null; //用该数组来汇总每次请求后的结果 PutRecordsResultEntry[] putRecordsResultEntryList = null; //记录每次请求失败的下标位置 int[] retryIndex = null; //每次需要上传的请求数据 PutRecordsRequest retryPutRecordsRequest = putRecordsParam; int retryCount = -1; int currentFailed = 0; ExponentialBackOff backOff = null; try { do { retryCount++; if (retryCount > 0) { // 等待一段时间再发起重试 if (backOff == null) { Monitor.Enter(objlock); logger.Info("Put records retry lock."); backOff = new ExponentialBackOff(ExponentialBackOff.DEFAULT_INITIAL_INTERVAL, ExponentialBackOff.DEFAULT_MULTIPLIER, _disConfig.GetBackOffMaxIntervalMs(), ExponentialBackOff.DEFAULT_MAX_ELAPSED_TIME); } if (putRecordsResult != null && currentFailed != putRecordsResult.Records.Count) { // 部分失败则重置退避时间 backOff.resetCurrentInterval(); } long sleepMs = backOff.getNextBackOff(); if (retryPutRecordsRequest.Records.Count > 0) { logger.DebugFormat( "Put {0} records but {1} failed, will re-try after backoff {2} ms, current retry count is {3}.", putRecordsResult != null ? putRecordsResult.Records.Count : putRecordsParam.Records.Count, currentFailed, sleepMs, retryCount); } backOff.backOff(sleepMs); } try { putRecordsResult = putRecordMethod(retryPutRecordsRequest); } catch (Exception t) { if (putRecordsResultEntryList != null) { logger.Error(t.Message, t); break; } throw t; } if (putRecordsResult != null) { currentFailed = putRecordsResult.FailedRecordCount; if (putRecordsResultEntryList == null && currentFailed == 0 || _disConfig.GetRecordsRetries() == 0) { // 第一次发送全部成功或者不需要重试,则直接返回结果 return(putRecordsResult); } if (putRecordsResultEntryList == null) { // 存在发送失败的情况,需要重试,则使用数组来汇总每次请求后的结果。 putRecordsResultEntryList = new PutRecordsResultEntry[putRecordsParam.Records.Count]; } // 需要重试发送数据的原始下标 List <int> retryIndexTemp = new List <int>(currentFailed); if (currentFailed > 0) { // 初始化重试发送的数据请求 retryPutRecordsRequest = new PutRecordsRequest(); retryPutRecordsRequest.StreamName = putRecordsParam.StreamName; retryPutRecordsRequest.Records = new List <PutRecordsRequestEntry>(currentFailed); } // 对每条结果分析,更新结果数据 for (int i = 0; i < putRecordsResult.Records.Count; i++) { // 获取重试数据在原始数据中的下标位置 int originalIndex = retryIndex == null ? i : retryIndex[i]; PutRecordsResultEntry putRecordsResultEntry = putRecordsResult.Records[i]; // 对所有异常进行重试 && "DIS.4303".equals(putRecordsResultEntry.getErrorCode()) if (!string.IsNullOrEmpty(putRecordsResultEntry.ErrorCode)) { retryIndexTemp.Add(originalIndex); retryPutRecordsRequest.Records.Add(putRecordsParam.Records[originalIndex]); } putRecordsResultEntryList[originalIndex] = putRecordsResultEntry; } retryIndex = retryIndexTemp.Count > 0 ? retryIndexTemp.ToArray() : new int[0]; } } while ((retryIndex == null || retryIndex.Length > 0) && retryCount < _disConfig.GetRecordsRetries()); } finally { if (retryCount > 0) { Monitor.Exit(objlock); logger.Info("Put records retry unlock."); } } putRecordsResult = new PutRecordsResult(); if (retryIndex == null) { // 不可能存在此情况,完全没有发送出去会直接抛出异常 putRecordsResult.FailedRecordCount = putRecordsParam.Records.Count; } else { putRecordsResult.FailedRecordCount = retryIndex.Length; putRecordsResult.Records = new List <PutRecordsResultEntry>(putRecordsResultEntryList); } return(putRecordsResult); }
public static void Main(string[] args) { string streamName = "streamName"; //string streamName = "dis_file_test"; const string shardId = "shardId-0000000000"; string appName = "testAppName"; #region MyRegion //创建通道 try { DISClient.CreateStream(streamName, 1); } catch (Exception e) { Console.WriteLine(e.Message); } //上传数据 try { DISClient.RunProducerDemo(streamName, shardId); } catch (Exception e) { Console.WriteLine(e.Message); } //下载数据 try { DISClient.RunConsumerDemo(streamName, shardId); } catch (Exception e) { Console.WriteLine(e.Message); } //同步上传小文件 try { int result = DISClient.UploadFileDemo("dis_file_test", @"C:\Users\Public\Pictures\Sample Pictures\Penguins.jpg"); if (result == 0) { Console.WriteLine("Success."); } else { Console.WriteLine("Fail."); } } catch (Exception e) { Console.WriteLine(e.Message); } //异步上传小文件 try { var dic = new DISAsync(); PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.StreamName = "dis_file_test"; putRecordsRequest.FileName = @"thisisshawnfolder/2018/Penguins1110.jpg"; putRecordsRequest.FilePath = @"C:\Users\Public\Pictures\Sample Pictures\Penguins.jpg"; var taskResult = dic.PutFilesAsync(putRecordsRequest); if (taskResult.GetAwaiter().GetResult() == 0) { Console.WriteLine("Success."); } else { Console.WriteLine("Fail."); } } catch (Exception e) { Console.WriteLine(e.Message); } //查询通道列表 try { DISClient.DescribeStreamList(null, 100); } catch (Exception e) { Console.WriteLine(e.Message); } //查询通道详情 try { DISClient.DescribeStream(streamName, null, 100); } catch (Exception e) { Console.WriteLine(e.Message); } //获取数据游标 try { DISClient.GetCursorDemo(streamName, shardId); } catch (Exception e) { Console.Out.WriteLine(e.Message); } //创建APP try { ResponseResult response = DISClient.CreateAPPDemo(appName); Console.Out.WriteLine(response); } catch (Exception e) { Console.WriteLine(e.Message); } //查询APP列表 try { DescribeAppListResult response = DISClient.DescribeAppList("", null); var reqJson = JsonConvert.SerializeObject(response); Console.WriteLine(reqJson); } catch (Exception e) { Console.WriteLine(e.Message); } //查询APP详情 try { DISClient.DescribeApp("testAppName"); } catch (Exception e) { Console.WriteLine(e.Message); } //新增Checkpoint try { DISClient.AddCheckPointDemo(streamName, appName); } catch (Exception e) { Console.WriteLine(e.Message); } //查询Checkpoint try { DISClient.GetCheckPointDemo(streamName, appName); } catch (Exception e) { Console.WriteLine(e.Message); } //变更分区数量 //Custom file stream is not suitable for operating. try { DISClient.UpdatePartitionCount(streamName, 1); } catch (Exception e) { Console.WriteLine(e.Message); } //添加OBS转储服务 try { DISClient.CreateTransferTaskWithOBS(streamName); } catch (Exception e) { Console.WriteLine(e.Message); } //添加DLI转储任务 try { DISClient.CreateTransferTaskWithDLI(streamName); } catch (Exception e) { Console.WriteLine(e.Message); } //查询通道监控信息 try { DISClient.GetStreamMetricInfo(streamName, "total_put_bytes_per_stream", DISClient.GetTimeStamp() - 3 * 24 * 60 * 60, DISClient.GetTimeStamp()); } catch (Exception e) { Console.WriteLine(e.Message); } //查询分区监控信息 try { DISClient.GetPartitionMetricInfo(streamName, "0", "total_get_records_per_partition", DISClient.GetTimeStamp() - 3 * 24 * 60 * 60, DISClient.GetTimeStamp()); } catch (Exception e) { Console.WriteLine(e.Message); } //查询转储任务列表 try { DISClient.GetStreamTransferTaskList(streamName); } catch (Exception e) { Console.WriteLine(e.Message); } //查询转储任务详情 try { DISClient.GetStreamTransferTaskDetail(streamName, "task_1234"); } catch (Exception e) { Console.WriteLine(e.Message); } //获取流消费信息 try { DISClient.GetStreamConsumingInfo(streamName, appName, "", null); } catch (Exception e) { Console.WriteLine(e.Message); } //删除Checkpoint try { DISClient.DeleteCheckpoint(streamName, appName, "0", "LAST_READ"); } catch (Exception e) { Console.WriteLine(e.Message); } //删除APP try { ResponseResult response = DISClient.DeleteAPPDemo(appName); Console.Out.WriteLine(response); } catch (Exception e) { Console.WriteLine(e.Message); } //删除转储任务 try { DISClient.DeleteTransferTask(streamName, "task_1234"); } catch (Exception e) { Console.WriteLine(e.Message); } //删除通道 try { DISClient.DeleteStream(streamName); } catch (Exception e) { Console.WriteLine(e.Message); } #endregion }
public void PutRecordsAsync(PutRecordsRequest request, AmazonServiceCallback <PutRecordsRequest, PutRecordsResponse> callback, AsyncOptions options = null) { throw new System.NotImplementedException(); }
/// <summary> /// 文件上传 /// </summary> /// <param name="putRecordsParam"></param> /// <returns></returns> public PutRecordsResult PutFileRecords(PutRecordsRequest putRecordsParam) { return(InnerPutRecordsSupportingCache(putRecordsParam, new PutRecordMethod(InnerPutFileRecords))); }
public void PutRecordsRequest(PutRecordsRequest request, ResponseCallback cb, object context, DateTime deadline, DateTime expiration) { var task = new KinesisTask(executor_, request, null, KinesisRquestType.PutRecordsRequest, context, cb, (client) => { }, deadline, expiration, (int)requestTimeout); new Thread(() => task.run(GetNextLeastUsedClient())).Start(); }