/// <summary> /// Unmarshaller the response from the service to the response class. /// </summary> /// <param name="context"></param> /// <returns></returns> public override AmazonWebServiceResponse Unmarshall(JsonUnmarshallerContext context) { PutRecordsResponse response = new PutRecordsResponse(); context.Read(); int targetDepth = context.CurrentDepth; while (context.ReadAtDepth(targetDepth)) { if (context.TestExpression("EncryptionType", targetDepth)) { var unmarshaller = StringUnmarshaller.Instance; response.EncryptionType = unmarshaller.Unmarshall(context); continue; } if (context.TestExpression("FailedRecordCount", targetDepth)) { var unmarshaller = IntUnmarshaller.Instance; response.FailedRecordCount = unmarshaller.Unmarshall(context); continue; } if (context.TestExpression("Records", targetDepth)) { var unmarshaller = new ListUnmarshaller <PutRecordsResultEntry, PutRecordsResultEntryUnmarshaller>(PutRecordsResultEntryUnmarshaller.Instance); response.Records = unmarshaller.Unmarshall(context); continue; } } return(response); }
private void WriteToAmazon(object sender, ElapsedEventArgs args) { if (m_logMessages.Count == 0 || m_logMessages.Count < BatchSize && m_lastProcessed.AddSeconds(BatchInterval) > DateTime.Now) { return; } m_timer.Stop(); try { if (m_upload == null) { m_upload = new ConnatixKinesisUpload(AwsKey, AwsSecret, AwsRegion); } if (m_logMessages.Count > 0) { List <string> messagesToWrite = new List <string>(); for (int i = 0; i < BatchSize; i++) { if (m_logMessages.TryDequeue(out var message)) { messagesToWrite.Add(message); } else { break; } } if (messagesToWrite.Count > 0) { PutRecordsResponse response = m_upload.Write(messagesToWrite, Stream); if (response.FailedRecordCount > 0) { for (int i = 0; i < response.Records.Count; i++) { var record = response.Records[i]; if (!string.IsNullOrEmpty(record.ErrorCode)) { m_logMessages.Enqueue(messagesToWrite[i]); } } } } } } catch { } finally { m_lastProcessed = DateTime.Now; m_timer.Start(); } }
void OnTick() { try { var count = 0; do { // Locking the bookmark ensures that though there may be multiple instances of this // class running, only one will ship logs at a time. using (var bookmark = File.Open(_bookmarkFilename, FileMode.OpenOrCreate, FileAccess.ReadWrite, FileShare.Read)) { long startingOffset; long nextLineBeginsAtOffset; string currentFilePath; TryReadBookmark(bookmark, out nextLineBeginsAtOffset, out currentFilePath); SelfLog.WriteLine("Bookmark is currently at offset {0} in '{1}'", nextLineBeginsAtOffset, currentFilePath); var fileSet = GetFileSet(); if (currentFilePath == null || !File.Exists(currentFilePath)) { nextLineBeginsAtOffset = 0; currentFilePath = fileSet.FirstOrDefault(); } if (currentFilePath != null) { count = 0; var records = new List <PutRecordsRequestEntry>(); using (var current = File.Open(currentFilePath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)) { startingOffset = current.Position = nextLineBeginsAtOffset; string nextLine; while (count < _batchPostingLimit && TryReadLine(current, ref nextLineBeginsAtOffset, out nextLine)) { ++count; var bytes = Encoding.UTF8.GetBytes(nextLine); var record = new PutRecordsRequestEntry { PartitionKey = Guid.NewGuid().ToString(), Data = new MemoryStream(bytes) }; records.Add(record); } } if (count > 0) { var request = new PutRecordsRequest { StreamName = _state.Options.StreamName, Records = records }; SelfLog.WriteLine("Writing {0} records to kinesis", count); PutRecordsResponse response = _state.KinesisClient.PutRecords(request); SelfLog.WriteLine("Advancing bookmark from '{0}' to '{1}'", startingOffset, nextLineBeginsAtOffset); WriteBookmark(bookmark, nextLineBeginsAtOffset, currentFilePath); if (response.FailedRecordCount > 0) { foreach (var record in response.Records) { SelfLog.WriteLine("Kinesis failed to index record in stream '{0}'. {1} {2} ", _state.Options.StreamName, record.ErrorCode, record.ErrorMessage); } // fire event OnLogSendError(new LogSendErrorEventArgs(string.Format("Error writing records to {0} ({1} of {2} records failed)", _state.Options.StreamName, response.FailedRecordCount, count), null)); } } else { SelfLog.WriteLine("Found no records to process"); // Only advance the bookmark if no other process has the // current file locked, and its length is as we found it. var bufferedFilesCount = fileSet.Length; var isProcessingFirstFile = fileSet.First().Equals(currentFilePath, StringComparison.InvariantCultureIgnoreCase); var isFirstFileUnlocked = IsUnlockedAtLength(currentFilePath, nextLineBeginsAtOffset); //SelfLog.WriteLine("BufferedFilesCount: {0}; IsProcessingFirstFile: {1}; IsFirstFileUnlocked: {2}", bufferedFilesCount, isProcessingFirstFile, isFirstFileUnlocked); if (bufferedFilesCount == 2 && isProcessingFirstFile && isFirstFileUnlocked) { SelfLog.WriteLine("Advancing bookmark from '{0}' to '{1}'", currentFilePath, fileSet[1]); WriteBookmark(bookmark, 0, fileSet[1]); } if (bufferedFilesCount > 2) { // Once there's a third file waiting to ship, we do our // best to move on, though a lock on the current file // will delay this. SelfLog.WriteLine("Deleting '{0}'", fileSet[0]); File.Delete(fileSet[0]); } } } } }while (count == _batchPostingLimit); } catch (Exception ex) { SelfLog.WriteLine("Exception while emitting periodic batch from {0}: {1}", this, ex); OnLogSendError(new LogSendErrorEventArgs(string.Format("Error in shipping logs to '{0}' stream)", _state.Options.StreamName), ex)); } finally { lock (_stateLock) { if (!_unloading) { SetTimer(); } } } }
private void RunSyncOperationAsync(int retry, Action <RunSyncOperationResponse> callback) { if (retry < 0) { callback(new RunSyncOperationResponse(false, null)); return; } long lastSyncCount = _local.GetLastSyncCount(GetIdentityId(), _datasetName); // if dataset is deleted locally, push it to remote if (lastSyncCount == -1) { #if DELETE_METHOD_SUPPORT _remote.DeleteDatasetAsync(_datasetName, delegate(AmazonCognitoResult result) { if (result.Exception != null) { var e = result.Exception as DataStorageException; AmazonLogging.LogError(AmazonLogging.AmazonLoggingLevel.Errors, "CognitoSyncManager", "OnSyncFailure" + e.Message); this.FireSyncFailureEvent(e); callback(new RunSyncOperationResponse(false, null)); return; } _local.PurgeDataset(GetIdentityId(), _datasetName); AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", "OnSyncSuccess: dataset delete is pushed to remote"); this.FireSyncSuccessEvent(new List <Record>()); callback(new RunSyncOperationResponse(true, null)); return; }, null); #endif // invalid scenario AmazonLogging.LogError(AmazonLogging.AmazonLoggingLevel.Critical, "CognitoSyncManager", "OnSyncFailure: DeleteDataset is an invalid operation"); FireSyncFailureEvent(new DataStorageException("DeleteDataset is an invalid operation")); callback(new RunSyncOperationResponse(false, null)); return; } // get latest modified records from remote AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", "get latest modified records since " + lastSyncCount); _remote.ListUpdatesAsync(_datasetName, lastSyncCount, delegate(AmazonCognitoResult listUpdatesResult) { RemoteDataStorage.DatasetUpdates datasetUpdates = null; if (listUpdatesResult == null || listUpdatesResult.Exception != null) { var e = listUpdatesResult.Exception as DataStorageException; AmazonLogging.LogException(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", e); FireSyncFailureEvent(e); callback(new RunSyncOperationResponse(false, listUpdatesResult.Exception)); return; } ListUpdatesResponse listUpdatesResponse = listUpdatesResult.Response as ListUpdatesResponse; datasetUpdates = listUpdatesResponse.DatasetUpdates; if (datasetUpdates.MergedDatasetNameList.Count != 0 && this.OnDatasetMerged != null) { bool resume = this.OnDatasetMerged(this, datasetUpdates.MergedDatasetNameList); if (resume) { this.RunSyncOperationAsync(--retry, callback); return; } else { AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", "OnSyncFailure: Manual cancel"); FireSyncFailureEvent(new DataStorageException("Manual cancel")); callback(new RunSyncOperationResponse(false, null)); return; } } // if the dataset doesn't exist or is deleted, trigger onDelete if (lastSyncCount != 0 && !datasetUpdates.Exists || datasetUpdates.Deleted && this.OnDatasetDeleted != null) { bool resume = this.OnDatasetDeleted(this); if (resume) { // remove both records and metadata _local.DeleteDataset(GetIdentityId(), _datasetName); _local.PurgeDataset(GetIdentityId(), _datasetName); AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", "OnSyncSuccess"); FireSyncSuccessEvent(new List <Record>()); callback(new RunSyncOperationResponse(true, null)); return; } else { AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", "OnSyncFailure"); FireSyncFailureEvent(new DataStorageException("Manual cancel")); callback(new RunSyncOperationResponse(false, null)); return; } } List <Record> remoteRecords = datasetUpdates.Records; if (remoteRecords.Count != 0) { // if conflict, prompt developer/user with callback List <SyncConflict> conflicts = new List <SyncConflict>(); List <Record> conflictRecords = new List <Record>(); foreach (Record remoteRecord in remoteRecords) { Record localRecord = _local.GetRecord(GetIdentityId(), _datasetName, remoteRecord.Key); // only when local is changed and its value is different if (localRecord != null && localRecord.Modified && !StringUtils.Equals(localRecord.Value, remoteRecord.Value)) { conflicts.Add(new SyncConflict(remoteRecord, localRecord)); conflictRecords.Add(remoteRecord); } } // retaining only non-conflict records remoteRecords.RemoveAll(t => conflictRecords.Contains(t)); if (conflicts.Count > 0) { AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", String.Format("{0} records in conflict!", conflicts.Count)); bool syncConflictResult = false; if (this.OnSyncConflict == null) { // delegate is not implemented so the conflict resolution is applied syncConflictResult = this.DefaultConflictResolution(conflicts); } else { syncConflictResult = this.OnSyncConflict(this, conflicts); } if (!syncConflictResult) { AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", "User cancelled conflict resolution"); callback(new RunSyncOperationResponse(false, null)); return; } } // save to local if (remoteRecords.Count > 0) { AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", String.Format("save {0} records to local", remoteRecords.Count)); _local.PutRecords(GetIdentityId(), _datasetName, remoteRecords); } // new last sync count AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", String.Format("updated sync count {0}", datasetUpdates.SyncCount)); _local.UpdateLastSyncCount(GetIdentityId(), _datasetName, datasetUpdates.SyncCount); } // push changes to remote List <Record> localChanges = this.GetModifiedRecords(); if (localChanges.Count != 0) { AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", String.Format("push {0} records to remote", localChanges.Count)); _remote.PutRecordsAsync(_datasetName, localChanges, datasetUpdates.SyncSessionToken, delegate(AmazonCognitoResult putRecordsResult) { if (putRecordsResult.Exception != null) { if (putRecordsResult.Exception.GetType() == typeof(DataConflictException)) { AmazonLogging.LogError(AmazonLogging.AmazonLoggingLevel.Warnings, "CognitoSyncManager", "Conflicts detected when pushing changes to remote: " + putRecordsResult.Exception.Message); this.RunSyncOperationAsync(--retry, callback); return; } else if (putRecordsResult.Exception.GetType() == typeof(DataStorageException)) { AmazonLogging.LogError(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", "OnSyncFailure" + putRecordsResult.Exception.Message); FireSyncFailureEvent(putRecordsResult.Exception); callback(new RunSyncOperationResponse(false, null)); return; } } PutRecordsResponse putRecordsResponse = putRecordsResult.Response as PutRecordsResponse; List <Record> result = putRecordsResponse.UpdatedRecords; // update local meta data _local.PutRecords(GetIdentityId(), _datasetName, result); // verify the server sync count is increased exactly by one, aka no // other updates were made during this update. long newSyncCount = 0; foreach (Record record in result) { newSyncCount = newSyncCount < record.SyncCount ? record.SyncCount : newSyncCount; } if (newSyncCount == lastSyncCount + 1) { AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Info, "DefaultDataset", String.Format("updated sync count %d", newSyncCount)); _local.UpdateLastSyncCount(GetIdentityId(), _datasetName, newSyncCount); } AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", "OnSyncSuccess"); // call back FireSyncSuccessEvent(remoteRecords); callback(new RunSyncOperationResponse(true, null)); return; }, null); return; } AmazonLogging.Log(AmazonLogging.AmazonLoggingLevel.Verbose, "CognitoSyncManager", "OnSyncSuccess"); // call back FireSyncSuccessEvent(remoteRecords); callback(new RunSyncOperationResponse(true, null)); return; }, null); }