public void LoadCsv(S3EventNotification s3Event, ILambdaContext context) { LambdaLogger.Log("Processing request started"); try { var connection = _databaseActions.SetupDatabase(context); foreach (var record in s3Event.Records) { LambdaLogger.Log("Inside of the s3 events loop"); var s3 = record.S3; try { //truncate correct table _databaseActions.TruncateTable(context, Environment.GetEnvironmentVariable("DB_TABLE_NAME")); // load csv data into table _databaseActions.CopyDataToDatabase(context, record.AwsRegion, s3.Bucket.Name, s3.Object.Key); } catch (NpgsqlException ex) { LambdaLogger.Log($"Npgsql Exception has occurred - {ex.Message} {ex.InnerException} {ex.StackTrace}"); throw ex; } //close db connection connection.Close(); LambdaLogger.Log("End of function"); } } catch (Exception ex) { LambdaLogger.Log($"Exception has occurred - {ex.Message} {ex.InnerException} {ex.StackTrace}"); throw ex; } }
public async Task TransformImageAsync(S3EventNotification request) { foreach (var each in request.Records) { await TransformOneAsync(each.S3.Object.Key); } }
public void ParseS3EventTest() { var eventRecords = S3EventNotification.ParseJson(SAMPLE_EVENT_JSON); Assert.AreEqual(1, eventRecords.Records.Count); var record = eventRecords.Records[0]; Assert.AreEqual("2.0", record.EventVersion); Assert.AreEqual("aws:s3", record.EventSource); Assert.AreEqual("us-east-1", record.AwsRegion); Assert.AreEqual(1970, record.EventTime.ToUniversalTime().Year); Assert.AreEqual(EventType.ObjectCreatedPut, record.EventName); Assert.AreEqual("AIDAJDPLRKLG7UEXAMPLE", record.UserIdentity.PrincipalId); Assert.AreEqual("127.0.0.1", record.RequestParameters.SourceIPAddress); Assert.AreEqual("C3D13FE58DE4C810", record.ResponseElements.XAmzRequestId); Assert.AreEqual("FMyUVURIY8/IgAtTv8xRjskZQpcIZ9KG4V5Wp6S7S/JRWeUWerMUE5JgHvANOjpD", record.ResponseElements.XAmzId2); Assert.AreEqual("1.0", record.S3.S3SchemaVersion); Assert.AreEqual("testConfigRule", record.S3.ConfigurationId); Assert.AreEqual("mybucket", record.S3.Bucket.Name); Assert.AreEqual("A3NL1KOZZKExample", record.S3.Bucket.OwnerIdentity.PrincipalId); Assert.AreEqual("arn:aws:s3:::mybucket", record.S3.Bucket.Arn); Assert.AreEqual("HappyFace.jpg", record.S3.Object.Key); Assert.AreEqual(1024, record.S3.Object.Size); Assert.AreEqual("d41d8cd98f00b204e9800998ecf8427e", record.S3.Object.ETag); Assert.AreEqual("096fKKXTRTtl3on89fVO.nfljtsv6qko", record.S3.Object.VersionId); }
public static List <S3SourceInfo> ToS3SourceInfos(this SQSEvent sqsEvent, string requestId) { return(sqsEvent.Records .Select(_ => Tuple.Create(S3EventNotification.ParseJson(_.Body), _.MessageId)) .SelectMany(_ => _.Item1.Records.Select(r => Tuple.Create(r, _.Item2))) .Select(_ => new S3SourceInfo(_.Item1.S3.Bucket.Name, _.Item1.S3.Object.Key, _.Item1.S3.Object.Size, _.Item2, requestId)) .ToList()); }
public void TestDynamoDBPut() { TestLambdaContext context = new TestLambdaContext(); var StartupProgram = new StartupProgram(s3Client); string testJsonInput = "{\"Records\":[{\"eventVersion\":\"2.0\",\"eventSource\":\"aws: s3\",\"awsRegion\":\"ap - south - 1\",\"eventTime\":\"1970 - 01 - 01T00: 00:00.000Z\",\"eventName\":\"ObjectCreated: Put\",\"userIdentity\":{\"principalId\":\"AIDAJDPLRKLG7UEXAMPLE\"},\"requestParameters\":{\"sourceIPAddress\":\"127.0.0.1\"},\"responseElements\":{\"x - amz - request - id\":\"C3D13FE58DE4C810\",\"x - amz - id - 2\":\"FMyUVURIY8 / IgAtTv8xRjskZQpcIZ9KG4V5Wp6S7S / JRWeUWerMUE5JgHvANOjpD\"},\"s3\":{\"s3SchemaVersion\":\"1.0\",\"configurationId\":\"testConfigRule\",\"bucket\":{\"name\":\"pwc - logfilesource\",\"ownerIdentity\":{\"principalId\":\"A3NL1KOZZKExample\"},\"arn\":\"arn: aws: s3:::pwc - logfilesource\"},\"object\":{\"key\":\"README.md\",\"size\":1024,\"eTag\":\"d41d8cd98f00b204e9800998ecf8427e\",\"versionId\":\"096fKKXTRTtl3on89fVO.nfljtsv6qko\",\"sequencer\":\"0055AED6DCD90281E5\"}}}]}"; S3Event testEvent = new S3Event(); S3EventNotification s3EventNotification = S3EventNotification.ParseJson(testJsonInput); testEvent.Records = s3EventNotification.Records; StartupProgram.ProcessS3Events(testEvent, context); }
public void CanLoadACsvIntoTheDatabase() { var mockDatabaseActions = new Mock <IDatabaseActions>(); var handler = new Handler(mockDatabaseActions.Object); var tableName = "test"; Environment.SetEnvironmentVariable("DB_TABLE_NAME", tableName); CreateTable("test"); var bucketData = new S3EventNotification.S3Entity { Bucket = new S3EventNotification.S3BucketEntity { Name = "testBucket" }, Object = new S3EventNotification.S3ObjectEntity { Key = "test/key.csv" } }; //S3 record mock var testRecord = new S3EventNotification.S3EventNotificationRecord(); testRecord.AwsRegion = "eu-west-2"; testRecord.S3 = bucketData; var s3EventMock = new S3EventNotification(); s3EventMock.Records = new List <S3EventNotification.S3EventNotificationRecord> { testRecord }; var contextMock = new Mock <ILambdaContext>(); //set up Database actions mockDatabaseActions.Setup(x => x.CopyDataToDatabase(tableName, contextMock.Object, testRecord.AwsRegion, bucketData.Bucket.Name, bucketData.Object.Key)); mockDatabaseActions.Setup(x => x.AddExtension(contextMock.Object)); mockDatabaseActions.Setup(x => x.CreateTable(contextMock.Object, It.IsAny <string>())); mockDatabaseActions.Setup(x => x.TruncateTable(contextMock.Object, It.IsAny <string>())); mockDatabaseActions.Setup(x => x.SetupDatabase(contextMock.Object)).Returns(() => new NpgsqlConnection()); Assert.DoesNotThrow(() => handler.LoadCsv(s3EventMock, contextMock.Object)); mockDatabaseActions.Verify(y => y.SetupDatabase(contextMock.Object), Times.Once); mockDatabaseActions.Verify(y => y.AddExtension(contextMock.Object), Times.Once); mockDatabaseActions.Verify(y => y.TruncateTable(contextMock.Object, It.IsAny <string>()), Times.Once); mockDatabaseActions.Verify(y => y.CreateTable(contextMock.Object, It.IsAny <string>()), Times.Once); mockDatabaseActions.Verify(y => y.CopyDataToDatabase(tableName, contextMock.Object, testRecord.AwsRegion, bucketData.Bucket.Name, bucketData.Object.Key), Times.Once); }
public void TransformImage(S3EventNotification request) { Console.Out.WriteLine($"Env: bucketName={bucketName} transformKeyPrefix={transformKeyPrefix}"); Console.Out.WriteLine("Start to transform"); try { TransformImageAsync(request).Wait(); } catch (Exception e) { Console.Out.WriteLine($"Error in transformation: {e.Message}"); } finally { CleanupTempDirectory(); } Console.Out.WriteLine("End of transform"); }
public async Task <string> FunctionHandler(S3EventNotification s3event, ILambdaContext context) { try { LambdaLogger.Log($"Calling function name: {context.FunctionName}\\n"); var record = s3event.Records[0]; var srcBucket = record.S3.Bucket.Name; var key = record.S3.Object.Key; LambdaLogger.Log($"Bucket: {srcBucket} Key: {key}"); var tempFilePath = "/tmp/" + key; var resizedFilePath = "/tmp/resized-" + key; await DownloadFileAsync(srcBucket, key, tempFilePath); // TODO: take into account the folder structure??? using (var image = new MagickImage(tempFilePath)) { image.Resize(size, size); image.Strip(); image.Quality = quality; image.Write(resizedFilePath); } await UploadFileAsync(_destinationBucket, resizedFilePath); return(resizedFilePath); } catch (Exception ex) { LambdaLogger.Log($"Exception: {ex.Message}"); } return(string.Empty); }
public static Task <StringResponse> Execute(S3EventNotification request) => logTerraformS3Event.Execute(request);
public async Task SetQueueConfigurationTests() { var filterRule = new FilterRule("Prefix", "test/"); using (var sqsClient = new AmazonSQSClient()) { string topicName = UtilityMethods.GenerateName("events-test"); var createResponse = await sqsClient.CreateQueueAsync(topicName); var bucketName = await UtilityMethods.CreateBucketAsync(Client, "SetQueueConfigurationTests"); try { var queueArn = await sqsClient.AuthorizeS3ToSendMessageAsync(createResponse.QueueUrl, bucketName); PutBucketNotificationRequest putRequest = new PutBucketNotificationRequest { BucketName = bucketName, QueueConfigurations = new List <QueueConfiguration> { new QueueConfiguration { Id = "the-queue-test", Queue = queueArn, Events = { EventType.ObjectCreatedPut }, Filter = new Filter { S3KeyFilter = new S3KeyFilter { FilterRules = new List <FilterRule> { filterRule } } } } } }; await Client.PutBucketNotificationAsync(putRequest); var getResponse = await Client.GetBucketNotificationAsync(bucketName); Assert.Equal(1, getResponse.QueueConfigurations.Count); Assert.Equal(1, getResponse.QueueConfigurations[0].Events.Count); Assert.Equal(EventType.ObjectCreatedPut, getResponse.QueueConfigurations[0].Events[0]); Assert.NotNull(getResponse.QueueConfigurations[0].Filter); Assert.NotNull(getResponse.QueueConfigurations[0].Filter.S3KeyFilter); Assert.NotNull(getResponse.QueueConfigurations[0].Filter.S3KeyFilter.FilterRules); Assert.Equal(1, getResponse.QueueConfigurations[0].Filter.S3KeyFilter.FilterRules.Count); Assert.Equal(filterRule.Name, getResponse.QueueConfigurations[0].Filter.S3KeyFilter.FilterRules[0].Name); Assert.Equal(filterRule.Value, getResponse.QueueConfigurations[0].Filter.S3KeyFilter.FilterRules[0].Value); Assert.Equal("the-queue-test", getResponse.QueueConfigurations[0].Id); Assert.Equal(queueArn, getResponse.QueueConfigurations[0].Queue); // Purge queue to remove test message sent configuration was setup. await sqsClient.PurgeQueueAsync(createResponse.QueueUrl); Thread.Sleep(TimeSpan.FromSeconds(1)); var putObjectRequest = new PutObjectRequest { BucketName = bucketName, Key = "test/data.txt", ContentBody = "Important Data" }; await Client.PutObjectAsync(putObjectRequest); string messageBody = null; for (int i = 0; i < 5 && messageBody == null; i++) { var receiveResponse = await sqsClient.ReceiveMessageAsync(new ReceiveMessageRequest { QueueUrl = createResponse.QueueUrl, WaitTimeSeconds = 20 }); if (receiveResponse.Messages.Count != 0) { messageBody = receiveResponse.Messages[0].Body; } } var evnt = S3EventNotification.ParseJson(messageBody); Assert.Equal(1, evnt.Records.Count); Assert.Equal(putObjectRequest.BucketName, evnt.Records[0].S3.Bucket.Name); Assert.Equal(putObjectRequest.Key, evnt.Records[0].S3.Object.Key); Assert.Equal(putObjectRequest.ContentBody.Length, evnt.Records[0].S3.Object.Size); } finally { await sqsClient.DeleteQueueAsync(createResponse.QueueUrl); await UtilityMethods.DeleteBucketWithObjectsAsync(Client, bucketName); } } }
private readonly int writeThreshold = 6 * (int)Math.Pow(2, 20); // 5 MB #endregion /// <summary> /// You can modify this code as you wish. /// /// Make sure not to change the decryption api interface logic to ensure the successful decryption of your data. /// </summary> /// <param name="input"></param> /// <param name="context"></param> /// <returns></returns> /// <exception cref="InvalidOperationException"></exception> /// <exception cref="FileNotFoundException"></exception> public async Task <bool> FunctionHandler(S3EventNotification input, ILambdaContext context) { try { ivArray = ArrayPool <byte> .Shared.Rent(12); this.targetFileName = this.sourceFileName = input.Records[0].S3.Object.Key; LambdaLogger.Log("Loading and checking source/destination buckets, file name..."); this.sourceBucketName = Environment.GetEnvironmentVariable("sourcebucket"); this.targetBucketName = Environment.GetEnvironmentVariable("targetbucket"); this.keyId = Environment.GetEnvironmentVariable("privatekeyid"); // validate information ValidateEnvironment(); LambdaLogger.Log(Environment.GetEnvironmentVariable("AWS_REGION")); LambdaLogger.Log("Loading ciphertext..."); GetObjectRequest readRequest = new GetObjectRequest { BucketName = this.sourceBucketName, Key = this.sourceFileName, }; this.client = new AmazonS3Client(RegionEndpoint.USEast1); using GetObjectResponse response = await this.client.GetObjectAsync(readRequest); if (response.HttpStatusCode != HttpStatusCode.OK) { throw new FileNotFoundException("Could not retrieve file from source bucket."); } LambdaLogger.Log("Loading metadata..."); using CsvProcessor csvProcessor = new CsvProcessor(await GetMetaDataAsync()); // decrypt aes session key byte[] sessionKey = await DecryptSessionKey(csvProcessor.EncryptedSessionKey); LambdaLogger.Log( $"Preparing multipart upload with a minimal part size of {this.writeThreshold.ToString()} bytes"); this.outputStream = new MemoryStream(this.partSize); await InitPartUploadAsync(); LambdaLogger.Log("Decrypting..."); using (this.aesGcm = new AesGcm(sessionKey)) { await csvProcessor.ProcessDataAsync(DecryptCell, response.ResponseStream.ReadAsync, WritePartAsync, CancellationToken.None); } LambdaLogger.Log("Completing multipart upload..."); if (this.outputStream.Length > 0) { await WritePartInternalAsync(); } await CompleteMultipartUploadAsync(); return(true); } catch (Exception ex) { LambdaLogger.Log($"Exception in PutS3Object: {ex}"); if (!string.IsNullOrWhiteSpace(this.uploadId)) { // Abort the upload. AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest { BucketName = this.targetBucketName, Key = this.targetFileName, UploadId = this.uploadId }; await this.client.AbortMultipartUploadAsync(abortRequest); } return(false); } finally { // cleanup some resources ArrayPool <byte> .Shared.Return(ivArray); this.client?.Dispose(); } }
public async Task <string> FunctionHandler(S3EventNotification evnt, ILambdaContext context) { // S3 Client and the associated Call Variables IAmazonS3 S3Client = new AmazonS3Client(); S3EventNotification.S3Entity s3Event = new S3EventNotification.S3Entity(); GetObjectRequest ParameterFileRequest = new GetObjectRequest(); GetObjectResponse ParameterFileResponse = new GetObjectResponse(); GetPreSignedUrlRequest PresignedKeyRequest = new GetPreSignedUrlRequest(); GetObjectResponse CommitInfoResponse = new GetObjectResponse(); // Cloudformation Client and associated Variables AmazonCloudFormationClient CloudformationClient = new AmazonCloudFormationClient(); List <Parameter> Parameters = new List <Parameter>(); DescribeStacksRequest CurrentStacksRequest = new DescribeStacksRequest(); DescribeStacksResponse CurrentStacksResponse = new DescribeStacksResponse(); // A flag to determine if I want to create a new stack or update an existing one bool newstack = new bool(); // The name of the stack which will be generated by the deployment process. string TargetStackName; // The fixed file name values the deployment process will require. If all three files are not present no deployment will take place. string DeploymentFileName = "master.template"; string ParameterFileName = "Parameters.txt"; string CommitInfoFileName = "commitinfo.txt"; // Write details of the s3 event to my logger s3Event = evnt.Records?[0].S3; context.Logger.Log("S3 Cloudformation Template Upload Event Recieved. Processing potential deployment."); context.Logger.Log("Bucket: " + s3Event.Bucket.Name); context.Logger.Log("Key: " + s3Event.Object.Key); // Preform a check to make sure the filename matches the standard required. if (s3Event.Object.Key.EndsWith(DeploymentFileName)) { context.Logger.Log("S3 Event corresponds to a properly formatted master.template CloudFormation document. Commencing deployment."); } else { context.Logger.Log("S3 Event does not match deployment requirements. Candidates for deployment must contain the primary CloudFormation template in a master.template file."); return("Impropper filename. No deployment processed"); } // Display the commitinfo from the deployment string CommitInfoKeyName = s3Event.Object.Key.Replace(DeploymentFileName, CommitInfoFileName); context.Logger.Log($"Looking for accompanying commitinfo file: {CommitInfoKeyName}"); try { CommitInfoResponse = await S3Client.GetObjectAsync(s3Event.Bucket.Name, CommitInfoKeyName); using (StreamReader reader = new StreamReader(CommitInfoResponse.ResponseStream)) { string contents = reader.ReadToEnd(); context.Logger.Log(contents); } } catch (Exception e) { context.Logger.Log(e.Message); context.Logger.Log("No commitinfo.txt file detected. Aborting Deployment"); return("No accompanying commitinfo.txt. No deployment Processed"); } // Get and set associated parameters string ParameterKeyName = s3Event.Object.Key.Replace(DeploymentFileName, ParameterFileName); context.Logger.Log($"Looking for accompanying parameter file: {ParameterKeyName}"); try { ParameterFileResponse = await S3Client.GetObjectAsync(s3Event.Bucket.Name, ParameterKeyName); StreamReader reader = new StreamReader(ParameterFileResponse.ResponseStream); string paramline = reader.ReadLine(); context.Logger.Log("Parameter file line being processed: " + paramline); while (!string.IsNullOrWhiteSpace(paramline)) { string[] paramstrings = paramline.Split(':'); if (paramstrings.Length == 2) { Parameters.Add(new Parameter() { ParameterKey = paramstrings[0], ParameterValue = paramstrings[1] }); } paramline = reader.ReadLine(); context.Logger.Log("Parameter file line being processed: " + paramline); } } catch (Exception e) { context.Logger.Log(e.Message); context.Logger.Log("No parameter file detected. Aborting Deployment."); return("No accompanying commitinfo.txt.No deployment Processed"); } // The name of the stack will be based on the folder structure containing the master.template document. // As an example, a template deployed to the S3 key Knect/RCC/master.template would generate the stack Knect-RCC TargetStackName = s3Event.Object.Key.Replace("/", "-"); TargetStackName = TargetStackName.Replace("-" + DeploymentFileName, ""); context.Logger.Log("Cloudformation Stack Name: " + TargetStackName); // Gets a presigned url for the cloudformation client so it can access the master.template document. PresignedKeyRequest.BucketName = s3Event.Bucket.Name; PresignedKeyRequest.Key = s3Event.Object.Key; PresignedKeyRequest.Expires = DateTime.Now.AddMinutes(5); string PresignedS3Key = S3Client.GetPreSignedURL(PresignedKeyRequest); // If a stack with the target name already exists I want to update it. Otherwise I want to create a new stack. try { CurrentStacksRequest.StackName = TargetStackName; CurrentStacksResponse = await CloudformationClient.DescribeStacksAsync(CurrentStacksRequest); context.Logger.Log("A stack for the target name already exists. The existing stack will be updated."); newstack = false; } catch { context.Logger.Log("No stack with the target name exists. A new stack will be created."); newstack = true; } foreach (Parameter param in Parameters) { context.Logger.Log($"Parameter is set Key: {param.ParameterKey} with value {param.ParameterValue}"); } // If there is an existing stack I will update it. Otherwise I will create a new stack if (newstack == true) { // Create a new stack CreateStackRequest CreateStack = new CreateStackRequest(); CreateStack.StackName = TargetStackName; CreateStack.TemplateURL = PresignedS3Key; CreateStack.Parameters = Parameters; CreateStack.Capabilities.Add("CAPABILITY_NAMED_IAM"); await CloudformationClient.CreateStackAsync(CreateStack); return("A stack creation request was successfully generated"); } else { UpdateStackRequest updatereq = new UpdateStackRequest(); updatereq.StackName = TargetStackName; updatereq.TemplateURL = PresignedS3Key; updatereq.Parameters = Parameters; updatereq.Capabilities.Add("CAPABILITY_NAMED_IAM"); await CloudformationClient.UpdateStackAsync(updatereq); return("A stack update request was successfully generated"); } }
public void ImportFormData(S3EventNotification s3Event, ILambdaContext context) { //Add proper triggers and env vars once POC phase is done List <BsonDocument> csvToBsonRecords = new List <BsonDocument>(); //check the events foreach (var record in s3Event.Records) { //only handle single, predefined import file for now if (record.S3.Object.Key.ToUpper() == _importFileName.ToUpper()) { LambdaLogger.Log($"File to be processed: {record.AwsRegion}/{record.S3.Bucket.Name}/{record.S3.Object.Key}, processing"); //load the file from the bucket using (var s3Client = new AmazonS3Client(Amazon.RegionEndpoint.EUWest2)) { GetObjectRequest request = new GetObjectRequest { BucketName = record.S3.Bucket.Name, Key = record.S3.Object.Key }; LambdaLogger.Log($"getting object:{ request.BucketName}{ request.Key}"); GetObjectResponse response = null; try { response = s3Client.GetObjectAsync(request).Result; LambdaLogger.Log("response:" + response.HttpStatusCode.ToString()); } catch (Exception ex) { LambdaLogger.Log("s3 client connection error:" + ex.Message); } if (response?.ResponseStream != null) { try { using (var reader = new StreamReader(response.ResponseStream)) using (var csv = new CsvReader(reader, CultureInfo.InvariantCulture)) { csvToBsonRecords = csv.GetRecords <FormRecord>().Select(x => x.ToBsonDocument()).ToList(); } LambdaLogger.Log($"{csvToBsonRecords.Count} records to be processed"); } catch (Exception ex) { LambdaLogger.Log("csv stream handling error:" + ex.Message); } } else { LambdaLogger.Log("stream from S3 is null"); } }; } else { LambdaLogger.Log("nothing to process"); } } //check if we have anything to process if (csvToBsonRecords.Count > 0) { //import the records try { var mongoClient = new MongoClient(Environment.GetEnvironmentVariable("SCCV_MONGO_CONN_STRING")); var database = mongoClient.GetDatabase(Environment.GetEnvironmentVariable("SCCV_MONGO_DB_NAME")); var collection = database.GetCollection <BsonDocument>(_collectionName); var records = csvToBsonRecords.Select(record => new ReplaceOneModel <BsonDocument>(new BsonDocument(_uniqueId, record.GetValue(_uniqueId)), record) { IsUpsert = true }); var bulkUpsertResult = collection.BulkWrite(records, new BulkWriteOptions { IsOrdered = false }); var upserts = bulkUpsertResult.Upserts.ToList(); Console.WriteLine($"Bulk upsert operation successful? {bulkUpsertResult.IsAcknowledged}"); Console.WriteLine($"Updated: {bulkUpsertResult.ModifiedCount} records"); Console.WriteLine($"Added: {bulkUpsertResult.Upserts.Count} records"); } catch (Exception ex) { LambdaLogger.Log($"Import failed: {ex.Message}"); } } }
public void SetQueueConfigurationTests() { var s3Config = new AmazonS3Config(); using (var s3Client = new AmazonS3Client(s3Config)) using (var sqsClient = new AmazonSQSClient()) { var createResponse = sqsClient.CreateQueue("events-test-" + DateTime.Now.Ticks); var bucketName = S3TestUtils.CreateBucket(s3Client); try { var queueArn = sqsClient.AuthorizeS3ToSendMessage(createResponse.QueueUrl, bucketName); PutBucketNotificationRequest putRequest = new PutBucketNotificationRequest { BucketName = bucketName, QueueConfigurations = new List <QueueConfiguration> { new QueueConfiguration { Id = "the-queue-test", Queue = queueArn, Events = { EventType.ObjectCreatedPut } } } }; s3Client.PutBucketNotification(putRequest); var getResponse = s3Client.GetBucketNotification(bucketName); Assert.AreEqual(1, getResponse.QueueConfigurations.Count); Assert.AreEqual(1, getResponse.QueueConfigurations[0].Events.Count); Assert.AreEqual(EventType.ObjectCreatedPut, getResponse.QueueConfigurations[0].Events[0]); Assert.AreEqual("the-queue-test", getResponse.QueueConfigurations[0].Id); Assert.AreEqual(queueArn, getResponse.QueueConfigurations[0].Queue); // Purge queue to remove test message sent configuration was setup. sqsClient.PurgeQueue(createResponse.QueueUrl); Thread.Sleep(1000); var putObjectRequest = new PutObjectRequest { BucketName = bucketName, Key = "data.txt", ContentBody = "Important Data" }; s3Client.PutObject(putObjectRequest); string messageBody = null; for (int i = 0; i < 5 && messageBody == null; i++) { var receiveResponse = sqsClient.ReceiveMessage(new ReceiveMessageRequest { QueueUrl = createResponse.QueueUrl, WaitTimeSeconds = 20 }); if (receiveResponse.Messages.Count != 0) { messageBody = receiveResponse.Messages[0].Body; } } var evnt = S3EventNotification.ParseJson(messageBody); Assert.AreEqual(1, evnt.Records.Count); Assert.AreEqual(putObjectRequest.BucketName, evnt.Records[0].S3.Bucket.Name); Assert.AreEqual(putObjectRequest.Key, evnt.Records[0].S3.Object.Key); Assert.AreEqual(putObjectRequest.ContentBody.Length, evnt.Records[0].S3.Object.Size); } finally { sqsClient.DeleteQueue(createResponse.QueueUrl); AmazonS3Util.DeleteS3BucketWithObjects(s3Client, bucketName); } } }
public static Task <IEnumerable <GetObjectResponse> > GetBucketObjectsAsync(this IAmazonS3 s3, S3EventNotification s3EventNotification) => s3.GetBucketObjects(Observable.Return(s3EventNotification)).ToList().Cast <IEnumerable <GetObjectResponse> >().ToTask();
public void ParseS3EventInvalidJson() { var eventRecords = S3EventNotification.ParseJson("{"); }
public void SetQueueConfigurationTests() { var filterRule = new FilterRule("Prefix", "test/"); var s3Config = new AmazonS3Config(); using (var s3Client = new AmazonS3Client(s3Config)) using (var sqsClient = new AmazonSQSClient()) using (var stsClient = new AmazonSecurityTokenServiceClient()) { var createResponse = sqsClient.CreateQueue("events-test-" + DateTime.Now.Ticks); var bucketName = S3TestUtils.CreateBucketWithWait(s3Client); try { var queueArn = sqsClient.AuthorizeS3ToSendMessage(createResponse.QueueUrl, bucketName); PutBucketNotificationRequest putRequest = new PutBucketNotificationRequest { BucketName = bucketName, QueueConfigurations = new List <QueueConfiguration> { new QueueConfiguration { Id = "the-queue-test", Queue = queueArn, Events = { EventType.ObjectCreatedPut }, Filter = new Filter { S3KeyFilter = new S3KeyFilter { FilterRules = new List <FilterRule> { filterRule } } } } } }; s3Client.PutBucketNotification(putRequest); var getResponse = S3TestUtils.WaitForConsistency(() => { var res = s3Client.GetBucketNotification(bucketName); return(res.QueueConfigurations?.Count > 0 && res.QueueConfigurations[0].Id == "the-queue-test" ? res : null); }); var getAttributeResponse = sqsClient.GetQueueAttributes(new GetQueueAttributesRequest { QueueUrl = createResponse.QueueUrl, AttributeNames = new List <string> { "All" } }); var policy = Policy.FromJson(getAttributeResponse.Policy); var conditions = policy.Statements[0].Conditions; Assert.AreEqual(2, conditions.Count); var accountCondition = conditions.FirstOrDefault(x => string.Equals(x.ConditionKey, ConditionFactory.SOURCE_ACCOUNT_KEY)); Assert.IsNotNull(accountCondition); Assert.AreEqual(ConditionFactory.StringComparisonType.StringEquals.ToString(), accountCondition.Type); Assert.AreEqual(12, accountCondition.Values[0].Length); var currentAccountId = stsClient.GetCallerIdentity(new GetCallerIdentityRequest()).Account; Assert.AreEqual(currentAccountId, accountCondition.Values[0]); Assert.AreEqual(1, getResponse.QueueConfigurations.Count); Assert.AreEqual(1, getResponse.QueueConfigurations[0].Events.Count); Assert.AreEqual(EventType.ObjectCreatedPut, getResponse.QueueConfigurations[0].Events[0]); Assert.IsNotNull(getResponse.QueueConfigurations[0].Filter); Assert.IsNotNull(getResponse.QueueConfigurations[0].Filter.S3KeyFilter); Assert.IsNotNull(getResponse.QueueConfigurations[0].Filter.S3KeyFilter.FilterRules); Assert.AreEqual(1, getResponse.QueueConfigurations[0].Filter.S3KeyFilter.FilterRules.Count); Assert.AreEqual(filterRule.Name, getResponse.QueueConfigurations[0].Filter.S3KeyFilter.FilterRules[0].Name); Assert.AreEqual(filterRule.Value, getResponse.QueueConfigurations[0].Filter.S3KeyFilter.FilterRules[0].Value); Assert.AreEqual("the-queue-test", getResponse.QueueConfigurations[0].Id); Assert.AreEqual(queueArn, getResponse.QueueConfigurations[0].Queue); // Purge queue to remove test message sent configuration was setup. sqsClient.PurgeQueue(createResponse.QueueUrl); //We must wait 60 seconds or the next message being sent to the queue could be deleted while the queue is being purged. Thread.Sleep(TimeSpan.FromSeconds(60)); var putObjectRequest = new PutObjectRequest { BucketName = bucketName, Key = "test/data.txt", ContentBody = "Important Data" }; s3Client.PutObject(putObjectRequest); string messageBody = null; for (int i = 0; i < 5 && messageBody == null; i++) { var receiveResponse = sqsClient.ReceiveMessage(new ReceiveMessageRequest { QueueUrl = createResponse.QueueUrl, WaitTimeSeconds = 20 }); if (receiveResponse.Messages.Count != 0) { messageBody = receiveResponse.Messages[0].Body; } } var evnt = S3EventNotification.ParseJson(messageBody); Assert.AreEqual(1, evnt.Records.Count); Assert.AreEqual(putObjectRequest.BucketName, evnt.Records[0].S3.Bucket.Name); Assert.AreEqual(putObjectRequest.Key, evnt.Records[0].S3.Object.Key); Assert.AreEqual(putObjectRequest.ContentBody.Length, evnt.Records[0].S3.Object.Size); Assert.IsNotNull(evnt.Records[0].S3.Object.Sequencer); } finally { sqsClient.DeleteQueue(createResponse.QueueUrl); AmazonS3Util.DeleteS3BucketWithObjects(s3Client, bucketName); } } }