From e610d056ec55db0f47d2eb09fff86c25806a0898 Mon Sep 17 00:00:00 2001 From: Mohamed ElAsmar Date: Sun, 15 Sep 2024 21:31:21 -0700 Subject: [PATCH] add integ test to test too many sources --- .../integ.bucket-deployment-big-response.ts | 70 +++++++++++++++++++ .../bucket-deployment-handler/index.py | 3 +- .../aws-cdk-lib/aws-s3-deployment/README.md | 2 +- 3 files changed, 72 insertions(+), 3 deletions(-) create mode 100644 packages/@aws-cdk-testing/framework-integ/test/aws-s3-deployment/test/integ.bucket-deployment-big-response.ts diff --git a/packages/@aws-cdk-testing/framework-integ/test/aws-s3-deployment/test/integ.bucket-deployment-big-response.ts b/packages/@aws-cdk-testing/framework-integ/test/aws-s3-deployment/test/integ.bucket-deployment-big-response.ts new file mode 100644 index 0000000000000..aa66025e050b8 --- /dev/null +++ b/packages/@aws-cdk-testing/framework-integ/test/aws-s3-deployment/test/integ.bucket-deployment-big-response.ts @@ -0,0 +1,70 @@ +import * as os from 'os'; +import * as fs from 'fs'; +import * as path from 'path'; +import * as s3 from 'aws-cdk-lib/aws-s3'; +import * as cdk from 'aws-cdk-lib'; +import * as integ from '@aws-cdk/integ-tests-alpha'; +import { Construct } from 'constructs'; +import * as s3deploy from 'aws-cdk-lib/aws-s3-deployment'; +import { CfnOutput, Fn } from 'aws-cdk-lib'; +import { ExpectedResult } from '@aws-cdk/integ-tests-alpha'; + +const numFiles = 50; + +class TestBucketDeployment extends cdk.Stack { + public readonly destinationBucket: s3.IBucket; + constructor(scope: Construct, id: string, props?: cdk.StackProps) { + super(scope, id, props); + + this.destinationBucket = new s3.Bucket(this, 'Destination', { + removalPolicy: cdk.RemovalPolicy.DESTROY, + autoDeleteObjects: true, // needed for integration test cleanup + }); + + const sources = []; + for (let i = 0; i < numFiles; i++) { + const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'tmpcdk')); + fs.mkdirSync(tempDir, { recursive: true }); + const fileName = `${i+1}.txt`; + const filePath = path.join(tempDir, fileName); + fs.writeFileSync(filePath, `This is file number ${i + 1}`); + sources.push(s3deploy.Source.asset(tempDir)); + } + + const deploymentBucket = new s3deploy.BucketDeployment(this, 'DeployMe', { + sources: sources, + destinationBucket: this.destinationBucket, + memoryLimit: 2048, + retainOnDelete: false, // default is true, which will block the integration test cleanup + outputObjectKeys: false, + }); + + new CfnOutput(this, 'customResourceData', { + value: Fn.join(',', deploymentBucket.objectKeys), + }); + } +} + +const app = new cdk.App(); +const testCase = new TestBucketDeployment(app, 'test-bucket-deployments-too-many-sources'); + +// Assert that DeployMeWithoutExtractingFilesOnDestination deploys a zip file to bucket4 +const integTest = new integ.IntegTest(app, 'integ-test-bucket-deployments', { + testCases: [testCase], + diffAssets: true, +}); + +for (let i = 0; i < numFiles; i++) { + const apiCall = integTest.assertions.awsApiCall('S3', 'getObject', { + Bucket: testCase.destinationBucket.bucketName, + Key: `${i+1}.txt`, + }); + apiCall.provider.addToRolePolicy({ + Effect: 'Allow', + Action: ['s3:GetObject', 's3:ListBucket'], + Resource: ['*'], + }); + apiCall.assertAtPath('Body', ExpectedResult.stringLikeRegexp(`This is file number ${i + 1}`)); +} + +app.synth(); \ No newline at end of file diff --git a/packages/@aws-cdk/custom-resource-handlers/lib/aws-s3-deployment/bucket-deployment-handler/index.py b/packages/@aws-cdk/custom-resource-handlers/lib/aws-s3-deployment/bucket-deployment-handler/index.py index 3c8ad63e8c18c..8a7928a4c350d 100644 --- a/packages/@aws-cdk/custom-resource-handlers/lib/aws-s3-deployment/bucket-deployment-handler/index.py +++ b/packages/@aws-cdk/custom-resource-handlers/lib/aws-s3-deployment/bucket-deployment-handler/index.py @@ -136,7 +136,7 @@ def cfn_error(message=None): cfn_send(event, context, CFN_SUCCESS, physicalResourceId=physical_id, responseData={ # Passing through the ARN sequences dependencees on the deployment 'DestinationBucketArn': props.get('DestinationBucketArn'), - **({'SourceObjectKeys': props.get('SourceObjectKeys')} if output_object_keys else {}) + **({'SourceObjectKeys': props.get('SourceObjectKeys')} if output_object_keys else {'SourceObjectKeys': []}) }) except KeyError as e: cfn_error("invalid request. Missing key %s" % str(e)) @@ -334,4 +334,3 @@ def replace_markers(filename, markers): # # delete the original file and rename the new one to the original os.remove(filename) os.rename(outfile, filename) - \ No newline at end of file diff --git a/packages/aws-cdk-lib/aws-s3-deployment/README.md b/packages/aws-cdk-lib/aws-s3-deployment/README.md index e196e357fa492..fd4b28dbfcb0c 100644 --- a/packages/aws-cdk-lib/aws-s3-deployment/README.md +++ b/packages/aws-cdk-lib/aws-s3-deployment/README.md @@ -471,7 +471,7 @@ new cdk.CfnOutput(this, 'ObjectKey', { ## Controlling the Output of Source Object Keys -By default, the keys of the source objects copied to the destination bucket are returned in the Data property of the custom resource. However, you can disable this behavior by setting the outputObjectKeys property to false. This is particularly useful when the number of objects is too large and might exceed the size limit of the data property. +By default, the keys of the source objects copied to the destination bucket are returned in the Data property of the custom resource. However, you can disable this behavior by setting the outputObjectKeys property to false. This is particularly useful when the number of objects is too large and might exceed the size limit of the responseData property. ```ts import * as cdk from 'aws-cdk-lib';