diff --git a/packages/@aws-cdk/aws-ec2/lib/connections.ts b/packages/@aws-cdk/aws-ec2/lib/connections.ts index 0ecccea97fdb2..b68c04299cdf3 100644 --- a/packages/@aws-cdk/aws-ec2/lib/connections.ts +++ b/packages/@aws-cdk/aws-ec2/lib/connections.ts @@ -20,6 +20,9 @@ import { ISecurityGroup } from './security-group'; * An object that has a Connections object */ export interface IConnectable { + /** + * The network connections associated with this resource. + */ readonly connections: Connections; } diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/.eslintrc.js b/packages/@aws-cdk/aws-kinesisfirehose-destinations/.eslintrc.js new file mode 100644 index 0000000000000..61dd8dd001f63 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/.eslintrc.js @@ -0,0 +1,3 @@ +const baseConfig = require('cdk-build-tools/config/eslintrc'); +baseConfig.parserOptions.project = __dirname + '/tsconfig.json'; +module.exports = baseConfig; diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/.gitignore b/packages/@aws-cdk/aws-kinesisfirehose-destinations/.gitignore new file mode 100644 index 0000000000000..147448f7df4fe --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/.gitignore @@ -0,0 +1,19 @@ +*.js +tsconfig.json +*.js.map +*.d.ts +*.generated.ts +dist +lib/generated/resources.ts +.jsii + +.LAST_BUILD +.nyc_output +coverage +nyc.config.js +.LAST_PACKAGE +*.snk +!.eslintrc.js +!jest.config.js + +junit.xml \ No newline at end of file diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/.npmignore b/packages/@aws-cdk/aws-kinesisfirehose-destinations/.npmignore new file mode 100644 index 0000000000000..aaabf1df59065 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/.npmignore @@ -0,0 +1,28 @@ +# Don't include original .ts files when doing `npm pack` +*.ts +!*.d.ts +coverage +.nyc_output +*.tgz + +dist +.LAST_PACKAGE +.LAST_BUILD +!*.js + +# Include .jsii +!.jsii + +*.snk + +*.tsbuildinfo + +tsconfig.json +.eslintrc.js +jest.config.js + +# exclude cdk artifacts +**/cdk.out +junit.xml +test/ +!*.lit.ts \ No newline at end of file diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/LICENSE b/packages/@aws-cdk/aws-kinesisfirehose-destinations/LICENSE new file mode 100644 index 0000000000000..28e4bdcec77ec --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/NOTICE b/packages/@aws-cdk/aws-kinesisfirehose-destinations/NOTICE new file mode 100644 index 0000000000000..5fc3826926b5b --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/NOTICE @@ -0,0 +1,2 @@ +AWS Cloud Development Kit (AWS CDK) +Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/README.md b/packages/@aws-cdk/aws-kinesisfirehose-destinations/README.md new file mode 100644 index 0000000000000..efe753ddbd6a6 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/README.md @@ -0,0 +1,22 @@ +# Amazon Kinesis Data Firehose Destinations Library + + +--- + +![cdk-constructs: Experimental](https://img.shields.io/badge/cdk--constructs-experimental-important.svg?style=for-the-badge) + +> The APIs of higher level constructs in this module are experimental and under active development. +> They are subject to non-backward compatible changes or removal in any future version. These are +> not subject to the [Semantic Versioning](https://semver.org/) model and breaking changes will be +> announced in the release notes. This means that while you may use them, you may need to update +> your source code when upgrading to a newer version of this package. + +--- + + + +This library provides constructs for adding destinations to a Amazon Kinesis Data Firehose +delivery stream. Destinations can be added by specifying the `destination` prop when +defining a delivery stream. + +See [Amazon Kinesis Data Firehose module README](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-kinesisfirehose-readme.html) for usage examples. diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/jest.config.js b/packages/@aws-cdk/aws-kinesisfirehose-destinations/jest.config.js new file mode 100644 index 0000000000000..54e28beb9798b --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/jest.config.js @@ -0,0 +1,2 @@ +const baseConfig = require('cdk-build-tools/config/jest.config'); +module.exports = baseConfig; diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/lib/index.ts b/packages/@aws-cdk/aws-kinesisfirehose-destinations/lib/index.ts new file mode 100644 index 0000000000000..cb717f27167ea --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/lib/index.ts @@ -0,0 +1 @@ +export * from './s3'; diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/lib/s3.ts b/packages/@aws-cdk/aws-kinesisfirehose-destinations/lib/s3.ts new file mode 100644 index 0000000000000..4425e02865593 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/lib/s3.ts @@ -0,0 +1,64 @@ +import * as iam from '@aws-cdk/aws-iam'; +import * as firehose from '@aws-cdk/aws-kinesisfirehose'; +import { CfnDeliveryStream } from '@aws-cdk/aws-kinesisfirehose'; +import * as s3 from '@aws-cdk/aws-s3'; +import { Construct } from 'constructs'; + +/** + * Props for defining an S3 destination of a Kinesis Data Firehose delivery stream. + */ +export interface S3Props extends firehose.DestinationProps, firehose.CommonS3Props { } + +/** + * An S3 bucket destination for data from a Kinesis Data Firehose delivery stream. + */ +export class S3 extends firehose.DestinationBase { + constructor(private readonly bucket: s3.IBucket, private readonly s3Props: S3Props = {}) { + super(s3Props); + } + + bind(scope: Construct, options: firehose.DestinationBindOptions): firehose.DestinationConfig { + return { + properties: { + extendedS3DestinationConfiguration: this.createExtendedS3DestinationConfiguration(scope, options.deliveryStream), + }, + }; + } + + private createExtendedS3DestinationConfiguration( + scope: Construct, + deliveryStream: firehose.IDeliveryStream, + ): CfnDeliveryStream.ExtendedS3DestinationConfigurationProperty { + this.bucket.grantReadWrite(deliveryStream); + return { + cloudWatchLoggingOptions: this.createLoggingOptions(scope, deliveryStream, 'S3Destination'), + processingConfiguration: this.createProcessingConfig(deliveryStream), + roleArn: (deliveryStream.grantPrincipal as iam.IRole).roleArn, + s3BackupConfiguration: this.createBackupConfig(scope, deliveryStream), + s3BackupMode: this.getS3BackupMode(), + bufferingHints: this.createBufferingHints(this.s3Props.bufferingInterval, this.s3Props.bufferingSize), + bucketArn: this.bucket.bucketArn, + compressionFormat: this.s3Props.compression?.value, + encryptionConfiguration: this.createEncryptionConfig(deliveryStream, this.s3Props.encryptionKey), + errorOutputPrefix: this.s3Props.errorOutputPrefix, + prefix: this.s3Props.prefix, + }; + } + + private getS3BackupMode(): string | undefined { + if (this.s3Props.backupConfiguration?.backupBucket && !this.s3Props.backupConfiguration.backupMode) { + return 'Enabled'; + } + + switch (this.s3Props.backupConfiguration?.backupMode) { + case firehose.BackupMode.ALL: + return 'Enabled'; + case firehose.BackupMode.DISABLED: + return 'Disabled'; + case firehose.BackupMode.FAILED: + throw new Error('S3 destinations do not support BackupMode.FAILED'); + default: + return undefined; + } + } +} diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/package.json b/packages/@aws-cdk/aws-kinesisfirehose-destinations/package.json new file mode 100644 index 0000000000000..70d67a1dc9103 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/package.json @@ -0,0 +1,115 @@ +{ + "name": "@aws-cdk/aws-kinesisfirehose-destinations", + "version": "0.0.0", + "description": "CDK Destinations Constructs for AWS Kinesis Firehose", + "main": "lib/index.js", + "types": "lib/index.d.ts", + "jsii": { + "outdir": "dist", + "targets": { + "java": { + "package": "software.amazon.awscdk.services.kinesisfirehose.destinations", + "maven": { + "groupId": "software.amazon.awscdk", + "artifactId": "kinesisfirehose-destinations" + } + }, + "dotnet": { + "namespace": "Amazon.CDK.AWS.KinesisFirehose.Destinations", + "packageId": "Amazon.CDK.AWS.KinesisFirehose.Destinations", + "iconUrl": "https://raw.githubusercontent.com/aws/aws-cdk/master/logo/default-256-dark.png" + }, + "python": { + "distName": "aws-cdk.aws-kinesisfirehose-destinations", + "module": "aws_cdk.aws_kinesisfirehose_destinations", + "classifiers": [ + "Framework :: AWS CDK", + "Framework :: AWS CDK :: 1" + ] + } + }, + "projectReferences": true + }, + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-cdk.git", + "directory": "packages/@aws-cdk/aws-kinesisfirehose-destinations" + }, + "scripts": { + "build": "cdk-build", + "watch": "cdk-watch", + "lint": "cdk-lint", + "test": "cdk-test", + "integ": "cdk-integ", + "pkglint": "pkglint -f", + "package": "cdk-package", + "awslint": "cdk-awslint", + "build+test+package": "yarn build+test && yarn package", + "build+test": "yarn build && yarn test", + "compat": "cdk-compat", + "rosetta:extract": "yarn --silent jsii-rosetta extract", + "build+extract": "yarn build && yarn rosetta:extract", + "build+test+extract": "yarn build+test && yarn rosetta:extract" + }, + "keywords": [ + "aws", + "cdk", + "constructs", + "kinesisfirehose" + ], + "author": { + "name": "Amazon Web Services", + "url": "https://aws.amazon.com", + "organization": true + }, + "license": "Apache-2.0", + "devDependencies": { + "@types/aws-lambda": "^8.10.77", + "@types/jest": "^26.0.23", + "cdk-build-tools": "0.0.0", + "cdk-integ-tools": "0.0.0", + "cfn2ts": "0.0.0", + "jest": "^26.6.3", + "pkglint": "0.0.0", + "@aws-cdk/assert-internal": "0.0.0", + "@aws-cdk/aws-kms": "0.0.0", + "@aws-cdk/aws-lambda": "0.0.0", + "@aws-cdk/aws-lambda-nodejs": "0.0.0", + "@aws-cdk/aws-logs": "0.0.0" + }, + "dependencies": { + "@aws-cdk/aws-iam": "0.0.0", + "@aws-cdk/aws-kinesisfirehose": "0.0.0", + "@aws-cdk/aws-s3": "0.0.0", + "@aws-cdk/core": "0.0.0", + "constructs": "^3.3.69" + }, + "homepage": "https://github.com/aws/aws-cdk", + "peerDependencies": { + "@aws-cdk/aws-iam": "0.0.0", + "@aws-cdk/aws-kinesisfirehose": "0.0.0", + "@aws-cdk/aws-s3": "0.0.0", + "@aws-cdk/core": "0.0.0", + "constructs": "^3.3.69" + }, + "engines": { + "node": ">= 10.13.0 <13 || >=13.7.0" + }, + "stability": "experimental", + "maturity": "experimental", + "awslint": { + "exclude": [] + }, + "awscdkio": { + "announce": false + }, + "cdk-build": { + "jest": true, + "env": { + "AWSLINT_BASE_CONSTRUCT": true + } + }, + "publishConfig": { + "tag": "latest" + } +} diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-kinesisfirehose-destinations/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..8c002eb1618f7 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/rosetta/default.ts-fixture @@ -0,0 +1,11 @@ +// Fixture with packages imported, but nothing else +import { Construct } from '@aws-cdk/core'; +import { S3 } from '@aws-cdk/aws-kinesisfirehose-destinations'; + +class Fixture extends Construct { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-all-properties.expected.json b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-all-properties.expected.json new file mode 100644 index 0000000000000..828e541c2193e --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-all-properties.expected.json @@ -0,0 +1,842 @@ +{ + "Resources": { + "Bucket83908E77": { + "Type": "AWS::S3::Bucket", + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "BucketPolicyE9A3008A": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "Bucket": { + "Ref": "Bucket83908E77" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "s3:GetBucket*", + "s3:List*", + "s3:DeleteObject*" + ], + "Effect": "Allow", + "Principal": { + "AWS": { + "Fn::GetAtt": [ + "CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092", + "Arn" + ] + } + }, + "Resource": [ + { + "Fn::GetAtt": [ + "Bucket83908E77", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "Bucket83908E77", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + } + } + }, + "BucketAutoDeleteObjectsCustomResourceBAFD23C2": { + "Type": "Custom::S3AutoDeleteObjects", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "CustomS3AutoDeleteObjectsCustomResourceProviderHandler9D90184F", + "Arn" + ] + }, + "BucketName": { + "Ref": "Bucket83908E77" + } + }, + "DependsOn": [ + "BucketPolicyE9A3008A" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ] + }, + "ManagedPolicyArns": [ + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + } + ] + } + }, + "CustomS3AutoDeleteObjectsCustomResourceProviderHandler9D90184F": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParameters1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1S3BucketF01ADF6B" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1S3VersionKey6FC34F51" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1S3VersionKey6FC34F51" + } + ] + } + ] + } + ] + ] + } + }, + "Timeout": 900, + "MemorySize": 128, + "Handler": "__entrypoint__.handler", + "Role": { + "Fn::GetAtt": [ + "CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092", + "Arn" + ] + }, + "Runtime": "nodejs12.x", + "Description": { + "Fn::Join": [ + "", + [ + "Lambda function for auto-deleting objects in ", + { + "Ref": "Bucket83908E77" + }, + " S3 bucket." + ] + ] + } + }, + "DependsOn": [ + "CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092" + ] + }, + "LogGroupF5B46931": { + "Type": "AWS::Logs::LogGroup", + "Properties": { + "RetentionInDays": 731 + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "LogGroupS3Destination70CE1003": { + "Type": "AWS::Logs::LogStream", + "Properties": { + "LogGroupName": { + "Ref": "LogGroupF5B46931" + } + }, + "UpdateReplacePolicy": "Retain", + "DeletionPolicy": "Retain" + }, + "DataProcessorFunctionServiceRole12E05500": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + } + }, + "DataProcessorFunctionAD472B9A": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParameters66e1a8cc6cff75df045ec65bb22ef80b4c00485bab6397c62b4da1a538f19c99S3BucketD955E0E0" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters66e1a8cc6cff75df045ec65bb22ef80b4c00485bab6397c62b4da1a538f19c99S3VersionKey55241E28" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters66e1a8cc6cff75df045ec65bb22ef80b4c00485bab6397c62b4da1a538f19c99S3VersionKey55241E28" + } + ] + } + ] + } + ] + ] + } + }, + "Role": { + "Fn::GetAtt": [ + "DataProcessorFunctionServiceRole12E05500", + "Arn" + ] + }, + "Environment": { + "Variables": { + "AWS_NODEJS_CONNECTION_REUSE_ENABLED": "1" + } + }, + "Handler": "index.handler", + "Runtime": "nodejs14.x", + "Timeout": 60 + }, + "DependsOn": [ + "DataProcessorFunctionServiceRole12E05500" + ] + }, + "BackupBucket26B8E51C": { + "Type": "AWS::S3::Bucket", + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "BackupBucketPolicy8C403F71": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "Bucket": { + "Ref": "BackupBucket26B8E51C" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "s3:GetBucket*", + "s3:List*", + "s3:DeleteObject*" + ], + "Effect": "Allow", + "Principal": { + "AWS": { + "Fn::GetAtt": [ + "CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092", + "Arn" + ] + } + }, + "Resource": [ + { + "Fn::GetAtt": [ + "BackupBucket26B8E51C", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "BackupBucket26B8E51C", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + } + } + }, + "BackupBucketAutoDeleteObjectsCustomResourceD2F511C5": { + "Type": "Custom::S3AutoDeleteObjects", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "CustomS3AutoDeleteObjectsCustomResourceProviderHandler9D90184F", + "Arn" + ] + }, + "BucketName": { + "Ref": "BackupBucket26B8E51C" + } + }, + "DependsOn": [ + "BackupBucketPolicy8C403F71" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "Key961B73FD": { + "Type": "AWS::KMS::Key", + "Properties": { + "KeyPolicy": { + "Statement": [ + { + "Action": "kms:*", + "Effect": "Allow", + "Principal": { + "AWS": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::", + { + "Ref": "AWS::AccountId" + }, + ":root" + ] + ] + } + }, + "Resource": "*" + } + ], + "Version": "2012-10-17" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "BackupKey60B97760": { + "Type": "AWS::KMS::Key", + "Properties": { + "KeyPolicy": { + "Statement": [ + { + "Action": "kms:*", + "Effect": "Allow", + "Principal": { + "AWS": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::", + { + "Ref": "AWS::AccountId" + }, + ":root" + ] + ] + } + }, + "Resource": "*" + } + ], + "Version": "2012-10-17" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "DeliveryStreamServiceRole964EEBCC": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "firehose.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + } + } + }, + "DeliveryStreamServiceRoleDefaultPolicyB87D9ACF": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "s3:GetObject*", + "s3:GetBucket*", + "s3:List*", + "s3:DeleteObject*", + "s3:PutObject", + "s3:Abort*" + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "Bucket83908E77", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "Bucket83908E77", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "LogGroupF5B46931", + "Arn" + ] + } + }, + { + "Action": "lambda:InvokeFunction", + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "DataProcessorFunctionAD472B9A", + "Arn" + ] + } + }, + { + "Action": [ + "s3:GetObject*", + "s3:GetBucket*", + "s3:List*", + "s3:DeleteObject*", + "s3:PutObject", + "s3:Abort*" + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "BackupBucket26B8E51C", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "BackupBucket26B8E51C", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + }, + { + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*" + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "BackupKey60B97760", + "Arn" + ] + } + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "DeliveryStreamBackupLogGroup3F700BD5", + "Arn" + ] + } + }, + { + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*" + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "Key961B73FD", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "DeliveryStreamServiceRoleDefaultPolicyB87D9ACF", + "Roles": [ + { + "Ref": "DeliveryStreamServiceRole964EEBCC" + } + ] + } + }, + "DeliveryStreamBackupLogGroup3F700BD5": { + "Type": "AWS::Logs::LogGroup", + "Properties": { + "RetentionInDays": 731 + }, + "UpdateReplacePolicy": "Retain", + "DeletionPolicy": "Retain" + }, + "DeliveryStreamBackupLogGroupS3Backup2970341F": { + "Type": "AWS::Logs::LogStream", + "Properties": { + "LogGroupName": { + "Ref": "DeliveryStreamBackupLogGroup3F700BD5" + } + }, + "UpdateReplacePolicy": "Retain", + "DeletionPolicy": "Retain" + }, + "DeliveryStreamF6D5572D": { + "Type": "AWS::KinesisFirehose::DeliveryStream", + "Properties": { + "DeliveryStreamType": "DirectPut", + "ExtendedS3DestinationConfiguration": { + "BucketARN": { + "Fn::GetAtt": [ + "Bucket83908E77", + "Arn" + ] + }, + "BufferingHints": { + "IntervalInSeconds": 60, + "SizeInMBs": 1 + }, + "CloudWatchLoggingOptions": { + "Enabled": true, + "LogGroupName": { + "Ref": "LogGroupF5B46931" + }, + "LogStreamName": { + "Ref": "LogGroupS3Destination70CE1003" + } + }, + "CompressionFormat": "ZIP", + "EncryptionConfiguration": { + "KMSEncryptionConfig": { + "AWSKMSKeyARN": { + "Fn::GetAtt": [ + "Key961B73FD", + "Arn" + ] + } + } + }, + "ErrorOutputPrefix": "errorPrefix", + "Prefix": "regularPrefix", + "ProcessingConfiguration": { + "Enabled": true, + "Processors": [ + { + "Parameters": [ + { + "ParameterName": "RoleArn", + "ParameterValue": { + "Fn::GetAtt": [ + "DeliveryStreamServiceRole964EEBCC", + "Arn" + ] + } + }, + { + "ParameterName": "LambdaArn", + "ParameterValue": { + "Fn::GetAtt": [ + "DataProcessorFunctionAD472B9A", + "Arn" + ] + } + }, + { + "ParameterName": "BufferIntervalInSeconds", + "ParameterValue": "60" + }, + { + "ParameterName": "BufferSizeInMBs", + "ParameterValue": "1" + }, + { + "ParameterName": "NumberOfRetries", + "ParameterValue": "1" + } + ], + "Type": "Lambda" + } + ] + }, + "RoleARN": { + "Fn::GetAtt": [ + "DeliveryStreamServiceRole964EEBCC", + "Arn" + ] + }, + "S3BackupConfiguration": { + "BucketARN": { + "Fn::GetAtt": [ + "BackupBucket26B8E51C", + "Arn" + ] + }, + "BufferingHints": { + "IntervalInSeconds": 60, + "SizeInMBs": 1 + }, + "CloudWatchLoggingOptions": { + "Enabled": true, + "LogGroupName": { + "Ref": "DeliveryStreamBackupLogGroup3F700BD5" + }, + "LogStreamName": { + "Ref": "DeliveryStreamBackupLogGroupS3Backup2970341F" + } + }, + "CompressionFormat": "GZIP", + "EncryptionConfiguration": { + "KMSEncryptionConfig": { + "AWSKMSKeyARN": { + "Fn::GetAtt": [ + "BackupKey60B97760", + "Arn" + ] + } + } + }, + "ErrorOutputPrefix": "backupErrorPrefix", + "Prefix": "backupPrefix", + "RoleARN": { + "Fn::GetAtt": [ + "DeliveryStreamServiceRole964EEBCC", + "Arn" + ] + } + }, + "S3BackupMode": "Enabled" + } + }, + "DependsOn": [ + "DeliveryStreamServiceRoleDefaultPolicyB87D9ACF", + "DeliveryStreamServiceRole964EEBCC" + ] + } + }, + "Parameters": { + "AssetParameters1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1S3BucketF01ADF6B": { + "Type": "String", + "Description": "S3 bucket for asset \"1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1\"" + }, + "AssetParameters1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1S3VersionKey6FC34F51": { + "Type": "String", + "Description": "S3 key for asset version \"1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1\"" + }, + "AssetParameters1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1ArtifactHash9ECACDFD": { + "Type": "String", + "Description": "Artifact hash for asset \"1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1\"" + }, + "AssetParameters66e1a8cc6cff75df045ec65bb22ef80b4c00485bab6397c62b4da1a538f19c99S3BucketD955E0E0": { + "Type": "String", + "Description": "S3 bucket for asset \"66e1a8cc6cff75df045ec65bb22ef80b4c00485bab6397c62b4da1a538f19c99\"" + }, + "AssetParameters66e1a8cc6cff75df045ec65bb22ef80b4c00485bab6397c62b4da1a538f19c99S3VersionKey55241E28": { + "Type": "String", + "Description": "S3 key for asset version \"66e1a8cc6cff75df045ec65bb22ef80b4c00485bab6397c62b4da1a538f19c99\"" + }, + "AssetParameters66e1a8cc6cff75df045ec65bb22ef80b4c00485bab6397c62b4da1a538f19c99ArtifactHash998CCC14": { + "Type": "String", + "Description": "Artifact hash for asset \"66e1a8cc6cff75df045ec65bb22ef80b4c00485bab6397c62b4da1a538f19c99\"" + } + }, + "Mappings": { + "DeliveryStreamFirehoseCIDRMappingE9233479": { + "af-south-1": { + "FirehoseCidrBlock": "13.244.121.224/27" + }, + "ap-east-1": { + "FirehoseCidrBlock": "18.162.221.32/27" + }, + "ap-northeast-1": { + "FirehoseCidrBlock": "13.113.196.224/27" + }, + "ap-northeast-2": { + "FirehoseCidrBlock": "13.209.1.64/27" + }, + "ap-northeast-3": { + "FirehoseCidrBlock": "13.208.177.192/27" + }, + "ap-south-1": { + "FirehoseCidrBlock": "13.232.67.32/27" + }, + "ap-southeast-1": { + "FirehoseCidrBlock": "13.228.64.192/27" + }, + "ap-southeast-2": { + "FirehoseCidrBlock": "13.210.67.224/27" + }, + "ca-central-1": { + "FirehoseCidrBlock": "35.183.92.128/27" + }, + "cn-north-1": { + "FirehoseCidrBlock": "52.81.151.32/27" + }, + "cn-northwest-1": { + "FirehoseCidrBlock": "161.189.23.64/27" + }, + "eu-central-1": { + "FirehoseCidrBlock": "35.158.127.160/27" + }, + "eu-north-1": { + "FirehoseCidrBlock": "13.53.63.224/27" + }, + "eu-south-1": { + "FirehoseCidrBlock": "15.161.135.128/27" + }, + "eu-west-1": { + "FirehoseCidrBlock": "52.19.239.192/27" + }, + "eu-west-2": { + "FirehoseCidrBlock": "18.130.1.96/27" + }, + "eu-west-3": { + "FirehoseCidrBlock": "35.180.1.96/27" + }, + "me-south-1": { + "FirehoseCidrBlock": "15.185.91.0/27" + }, + "sa-east-1": { + "FirehoseCidrBlock": "18.228.1.128/27" + }, + "us-east-1": { + "FirehoseCidrBlock": "52.70.63.192/27" + }, + "us-east-2": { + "FirehoseCidrBlock": "13.58.135.96/27" + }, + "us-gov-east-1": { + "FirehoseCidrBlock": "18.253.138.96/27" + }, + "us-gov-west-1": { + "FirehoseCidrBlock": "52.61.204.160/27" + }, + "us-west-1": { + "FirehoseCidrBlock": "13.57.135.192/27" + }, + "us-west-2": { + "FirehoseCidrBlock": "52.89.255.224/27" + } + } + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-all-properties.ts b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-all-properties.ts new file mode 100644 index 0000000000000..a7b91aef819f9 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-all-properties.ts @@ -0,0 +1,70 @@ +#!/usr/bin/env node +import * as path from 'path'; +import * as firehose from '@aws-cdk/aws-kinesisfirehose'; +import * as kms from '@aws-cdk/aws-kms'; +import * as lambdanodejs from '@aws-cdk/aws-lambda-nodejs'; +import * as logs from '@aws-cdk/aws-logs'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as cdk from '@aws-cdk/core'; +import * as destinations from '../lib'; + +const app = new cdk.App(); + +const stack = new cdk.Stack(app, 'aws-cdk-firehose-delivery-stream-s3-all-properties'); + +const bucket = new s3.Bucket(stack, 'Bucket', { + removalPolicy: cdk.RemovalPolicy.DESTROY, + autoDeleteObjects: true, +}); + +const logGroup = new logs.LogGroup(stack, 'LogGroup', { + removalPolicy: cdk.RemovalPolicy.DESTROY, +}); + +const processor = new firehose.LambdaFunctionProcessor(new lambdanodejs.NodejsFunction(stack, 'DataProcessorFunction', { + entry: path.join(__dirname, 'processor.js'), + timeout: cdk.Duration.minutes(1), +}), { + bufferInterval: cdk.Duration.seconds(60), + bufferSize: cdk.Size.mebibytes(1), + retries: 1, +}); + +const backupBucket = new s3.Bucket(stack, 'BackupBucket', { + removalPolicy: cdk.RemovalPolicy.DESTROY, + autoDeleteObjects: true, +}); + +const key = new kms.Key(stack, 'Key', { + removalPolicy: cdk.RemovalPolicy.DESTROY, +}); + +const backupKey = new kms.Key(stack, 'BackupKey', { + removalPolicy: cdk.RemovalPolicy.DESTROY, +}); + +new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [new destinations.S3(bucket, { + logging: true, + logGroup: logGroup, + processors: [processor], + backupConfiguration: { + backupMode: firehose.BackupMode.ALL, + backupBucket: backupBucket, + prefix: 'backupPrefix', + errorOutputPrefix: 'backupErrorPrefix', + compression: firehose.Compression.GZIP, + bufferingInterval: cdk.Duration.seconds(60), + bufferingSize: cdk.Size.mebibytes(1), + encryptionKey: backupKey, + }, + prefix: 'regularPrefix', + errorOutputPrefix: 'errorPrefix', + compression: firehose.Compression.ZIP, + bufferingInterval: cdk.Duration.seconds(60), + bufferingSize: cdk.Size.mebibytes(1), + encryptionKey: key, + })], +}); + +app.synth(); \ No newline at end of file diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-basic.expected.json b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-basic.expected.json new file mode 100644 index 0000000000000..00550457040d9 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-basic.expected.json @@ -0,0 +1,395 @@ +{ + "Resources": { + "Bucket83908E77": { + "Type": "AWS::S3::Bucket", + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "BucketPolicyE9A3008A": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "Bucket": { + "Ref": "Bucket83908E77" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "s3:GetBucket*", + "s3:List*", + "s3:DeleteObject*" + ], + "Effect": "Allow", + "Principal": { + "AWS": { + "Fn::GetAtt": [ + "CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092", + "Arn" + ] + } + }, + "Resource": [ + { + "Fn::GetAtt": [ + "Bucket83908E77", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "Bucket83908E77", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + } + } + }, + "BucketAutoDeleteObjectsCustomResourceBAFD23C2": { + "Type": "Custom::S3AutoDeleteObjects", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "CustomS3AutoDeleteObjectsCustomResourceProviderHandler9D90184F", + "Arn" + ] + }, + "BucketName": { + "Ref": "Bucket83908E77" + } + }, + "DependsOn": [ + "BucketPolicyE9A3008A" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ] + }, + "ManagedPolicyArns": [ + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + } + ] + } + }, + "CustomS3AutoDeleteObjectsCustomResourceProviderHandler9D90184F": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParameters1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1S3BucketF01ADF6B" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1S3VersionKey6FC34F51" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1S3VersionKey6FC34F51" + } + ] + } + ] + } + ] + ] + } + }, + "Timeout": 900, + "MemorySize": 128, + "Handler": "__entrypoint__.handler", + "Role": { + "Fn::GetAtt": [ + "CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092", + "Arn" + ] + }, + "Runtime": "nodejs12.x", + "Description": { + "Fn::Join": [ + "", + [ + "Lambda function for auto-deleting objects in ", + { + "Ref": "Bucket83908E77" + }, + " S3 bucket." + ] + ] + } + }, + "DependsOn": [ + "CustomS3AutoDeleteObjectsCustomResourceProviderRole3B1BD092" + ] + }, + "DeliveryStreamServiceRole964EEBCC": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "firehose.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + } + } + }, + "DeliveryStreamServiceRoleDefaultPolicyB87D9ACF": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "s3:GetObject*", + "s3:GetBucket*", + "s3:List*", + "s3:DeleteObject*", + "s3:PutObject", + "s3:Abort*" + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "Bucket83908E77", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "Bucket83908E77", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "DeliveryStreamLogGroup9D8FA3BB", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "DeliveryStreamServiceRoleDefaultPolicyB87D9ACF", + "Roles": [ + { + "Ref": "DeliveryStreamServiceRole964EEBCC" + } + ] + } + }, + "DeliveryStreamLogGroup9D8FA3BB": { + "Type": "AWS::Logs::LogGroup", + "Properties": { + "RetentionInDays": 731 + }, + "UpdateReplacePolicy": "Retain", + "DeletionPolicy": "Retain" + }, + "DeliveryStreamLogGroupS3DestinationE25573DB": { + "Type": "AWS::Logs::LogStream", + "Properties": { + "LogGroupName": { + "Ref": "DeliveryStreamLogGroup9D8FA3BB" + } + }, + "UpdateReplacePolicy": "Retain", + "DeletionPolicy": "Retain" + }, + "DeliveryStreamF6D5572D": { + "Type": "AWS::KinesisFirehose::DeliveryStream", + "Properties": { + "DeliveryStreamType": "DirectPut", + "ExtendedS3DestinationConfiguration": { + "BucketARN": { + "Fn::GetAtt": [ + "Bucket83908E77", + "Arn" + ] + }, + "CloudWatchLoggingOptions": { + "Enabled": true, + "LogGroupName": { + "Ref": "DeliveryStreamLogGroup9D8FA3BB" + }, + "LogStreamName": { + "Ref": "DeliveryStreamLogGroupS3DestinationE25573DB" + } + }, + "EncryptionConfiguration": { + "NoEncryptionConfig": "NoEncryption" + }, + "RoleARN": { + "Fn::GetAtt": [ + "DeliveryStreamServiceRole964EEBCC", + "Arn" + ] + } + } + }, + "DependsOn": [ + "DeliveryStreamServiceRoleDefaultPolicyB87D9ACF", + "DeliveryStreamServiceRole964EEBCC" + ] + } + }, + "Parameters": { + "AssetParameters1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1S3BucketF01ADF6B": { + "Type": "String", + "Description": "S3 bucket for asset \"1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1\"" + }, + "AssetParameters1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1S3VersionKey6FC34F51": { + "Type": "String", + "Description": "S3 key for asset version \"1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1\"" + }, + "AssetParameters1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1ArtifactHash9ECACDFD": { + "Type": "String", + "Description": "Artifact hash for asset \"1a8becf42c48697a059094af1e94aa6bc6df0512d30433db8c22618ca02dfca1\"" + } + }, + "Mappings": { + "DeliveryStreamFirehoseCIDRMappingE9233479": { + "af-south-1": { + "FirehoseCidrBlock": "13.244.121.224/27" + }, + "ap-east-1": { + "FirehoseCidrBlock": "18.162.221.32/27" + }, + "ap-northeast-1": { + "FirehoseCidrBlock": "13.113.196.224/27" + }, + "ap-northeast-2": { + "FirehoseCidrBlock": "13.209.1.64/27" + }, + "ap-northeast-3": { + "FirehoseCidrBlock": "13.208.177.192/27" + }, + "ap-south-1": { + "FirehoseCidrBlock": "13.232.67.32/27" + }, + "ap-southeast-1": { + "FirehoseCidrBlock": "13.228.64.192/27" + }, + "ap-southeast-2": { + "FirehoseCidrBlock": "13.210.67.224/27" + }, + "ca-central-1": { + "FirehoseCidrBlock": "35.183.92.128/27" + }, + "cn-north-1": { + "FirehoseCidrBlock": "52.81.151.32/27" + }, + "cn-northwest-1": { + "FirehoseCidrBlock": "161.189.23.64/27" + }, + "eu-central-1": { + "FirehoseCidrBlock": "35.158.127.160/27" + }, + "eu-north-1": { + "FirehoseCidrBlock": "13.53.63.224/27" + }, + "eu-south-1": { + "FirehoseCidrBlock": "15.161.135.128/27" + }, + "eu-west-1": { + "FirehoseCidrBlock": "52.19.239.192/27" + }, + "eu-west-2": { + "FirehoseCidrBlock": "18.130.1.96/27" + }, + "eu-west-3": { + "FirehoseCidrBlock": "35.180.1.96/27" + }, + "me-south-1": { + "FirehoseCidrBlock": "15.185.91.0/27" + }, + "sa-east-1": { + "FirehoseCidrBlock": "18.228.1.128/27" + }, + "us-east-1": { + "FirehoseCidrBlock": "52.70.63.192/27" + }, + "us-east-2": { + "FirehoseCidrBlock": "13.58.135.96/27" + }, + "us-gov-east-1": { + "FirehoseCidrBlock": "18.253.138.96/27" + }, + "us-gov-west-1": { + "FirehoseCidrBlock": "52.61.204.160/27" + }, + "us-west-1": { + "FirehoseCidrBlock": "13.57.135.192/27" + }, + "us-west-2": { + "FirehoseCidrBlock": "52.89.255.224/27" + } + } + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-basic.ts b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-basic.ts new file mode 100644 index 0000000000000..3bc9a03b254ac --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-basic.ts @@ -0,0 +1,20 @@ +#!/usr/bin/env node +import * as firehose from '@aws-cdk/aws-kinesisfirehose'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as cdk from '@aws-cdk/core'; +import * as destinations from '../lib'; + +const app = new cdk.App(); + +const stack = new cdk.Stack(app, 'aws-cdk-firehose-delivery-stream-s3-basic'); + +const bucket = new s3.Bucket(stack, 'Bucket', { + removalPolicy: cdk.RemovalPolicy.DESTROY, + autoDeleteObjects: true, +}); + +new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [new destinations.S3(bucket)], +}); + +app.synth(); \ No newline at end of file diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/processor.js b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/processor.js new file mode 100644 index 0000000000000..05838cca8796d --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/processor.js @@ -0,0 +1,11 @@ +export async function handler(event, context) { + /* Process the list of records and transform them */ + const output = event.records.map((record) => ({ + /* This transformation is the "identity" transformation, the data is left intact */ + recordId: record.recordId, + result: 'Ok', + data: record.data, + })); + console.log(`Processing completed. Successful records ${output.length}.`); + return { records: output }; +}; \ No newline at end of file diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/s3-destination.test.ts b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/s3-destination.test.ts new file mode 100644 index 0000000000000..2bc5dad13f6c9 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/s3-destination.test.ts @@ -0,0 +1,301 @@ +import { arrayWith } from '@aws-cdk/assert-internal/'; +import '@aws-cdk/assert-internal/jest'; +import * as iam from '@aws-cdk/aws-iam'; +import * as firehose from '@aws-cdk/aws-kinesisfirehose'; +import * as lambda from '@aws-cdk/aws-lambda'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as cdk from '@aws-cdk/core'; +import * as firehosedestinations from '../lib'; + +describe('S3 destination', () => { + let stack: cdk.Stack; + let bucket: s3.IBucket; + let deliveryStreamRole: iam.IRole; + let deliveryStream: firehose.IDeliveryStream; + + beforeEach(() => { + stack = new cdk.Stack(); + bucket = new s3.Bucket(stack, 'destination'); + deliveryStreamRole = iam.Role.fromRoleArn(stack, 'Delivery Stream Role', 'arn:aws:iam::111122223333:role/DeliveryStreamRole'); + deliveryStream = firehose.DeliveryStream.fromDeliveryStreamAttributes(stack, 'Delivery Stream', { + deliveryStreamName: 'mydeliverystream', + role: deliveryStreamRole, + }); + }); + + it('provides defaults when no configuration is provided', () => { + const destination = new firehosedestinations.S3(bucket); + + const destinationProperties = destination.bind(stack, { deliveryStream }).properties; + + expect(stack.resolve(destinationProperties)).toStrictEqual({ + extendedS3DestinationConfiguration: { + bucketArn: stack.resolve(bucket.bucketArn), + cloudWatchLoggingOptions: { + enabled: true, + logGroupName: { + Ref: 'LogGroupF5B46931', + }, + logStreamName: { + Ref: 'LogGroupS3Destination70CE1003', + }, + }, + encryptionConfiguration: { + noEncryptionConfig: 'NoEncryption', + }, + roleArn: stack.resolve(deliveryStreamRole.roleArn), + }, + }); + }); + + it('allows full configuration', () => { + const processor = lambda.Function.fromFunctionAttributes(stack, 'Processor', { + functionArn: 'arn:aws:lambda:us-west-2:111122223333:function/Processor', + sameEnvironment: true, + }); + const destination = new firehosedestinations.S3(bucket, { + logging: true, + processors: [ + new firehose.LambdaFunctionProcessor(processor, { + bufferInterval: cdk.Duration.seconds(60), + bufferSize: cdk.Size.mebibytes(1), + retries: 1, + }), + ], + backupConfiguration: { + backupMode: firehose.BackupMode.ALL, + prefix: 'backupPrefix', + errorOutputPrefix: 'backupErrorPrefix', + compression: firehose.Compression.GZIP, + bufferingInterval: cdk.Duration.seconds(60), + bufferingSize: cdk.Size.mebibytes(1), + }, + prefix: 'regularPrefix', + errorOutputPrefix: 'errorPrefix', + compression: firehose.Compression.ZIP, + bufferingInterval: cdk.Duration.seconds(60), + bufferingSize: cdk.Size.mebibytes(1), + }); + + const destinationProperties = destination.bind(stack, { deliveryStream }).properties; + + expect(stack.resolve(destinationProperties)).toStrictEqual({ + extendedS3DestinationConfiguration: { + bucketArn: stack.resolve(bucket.bucketArn), + bufferingHints: { + intervalInSeconds: 60, + sizeInMBs: 1, + }, + cloudWatchLoggingOptions: { + enabled: true, + logGroupName: { + Ref: 'LogGroupF5B46931', + }, + logStreamName: { + Ref: 'LogGroupS3Destination70CE1003', + }, + }, + compressionFormat: 'ZIP', + encryptionConfiguration: { + noEncryptionConfig: 'NoEncryption', + }, + errorOutputPrefix: 'errorPrefix', + prefix: 'regularPrefix', + processingConfiguration: { + enabled: true, + processors: [{ + parameters: [{ + parameterName: 'RoleArn', + parameterValue: deliveryStreamRole.roleArn, + }, { + parameterName: 'LambdaArn', + parameterValue: processor.functionArn, + }, { + parameterName: 'BufferIntervalInSeconds', + parameterValue: '60', + }, { + parameterName: 'BufferSizeInMBs', + parameterValue: '1', + }, { + parameterName: 'NumberOfRetries', + parameterValue: '1', + }], + type: 'Lambda', + }], + }, + roleArn: deliveryStreamRole.roleArn, + s3BackupConfiguration: { + bucketArn: { + 'Fn::GetAtt': [ + 'BackupBucket26B8E51C', + 'Arn', + ], + }, + bufferingHints: { + intervalInSeconds: 60, + sizeInMBs: 1, + }, + compressionFormat: 'GZIP', + encryptionConfiguration: { + noEncryptionConfig: 'NoEncryption', + }, + errorOutputPrefix: 'backupErrorPrefix', + prefix: 'backupPrefix', + roleArn: deliveryStreamRole.roleArn, + cloudWatchLoggingOptions: { + enabled: true, + logGroupName: { + Ref: 'BackupLogGroupB15A0768', + }, + logStreamName: { + Ref: 'BackupLogGroupS3BackupA7B3FB1E', + }, + }, + }, + s3BackupMode: 'Enabled', + }, + }); + }); + + it('grants read/write access to the bucket', () => { + const destination = new firehosedestinations.S3(bucket); + + destination.bind(stack, { deliveryStream }); + + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { + Roles: ['DeliveryStreamRole'], + PolicyDocument: { + Statement: [ + { + Action: [ + 's3:GetObject*', + 's3:GetBucket*', + 's3:List*', + 's3:DeleteObject*', + 's3:PutObject*', + 's3:Abort*', + ], + Effect: 'Allow', + Resource: [ + { + 'Fn::GetAtt': [ + 'destinationDB878FB5', + 'Arn', + ], + }, + { + 'Fn::Join': [ + '', + [ + { + 'Fn::GetAtt': [ + 'destinationDB878FB5', + 'Arn', + ], + }, + '/*', + ], + ], + }, + ], + }, + ], + }, + }); + }); + + it('grants read/write access to the backup bucket', () => { + const destination = new firehosedestinations.S3(bucket, { + backupConfiguration: { + backupMode: firehose.BackupMode.ALL, + }, + }); + + destination.bind(stack, { deliveryStream }); + + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { + Roles: ['DeliveryStreamRole'], + PolicyDocument: { + Statement: arrayWith( + { + Action: [ + 's3:GetObject*', + 's3:GetBucket*', + 's3:List*', + 's3:DeleteObject*', + 's3:PutObject*', + 's3:Abort*', + ], + Effect: 'Allow', + Resource: [ + { + 'Fn::GetAtt': [ + 'BackupBucket26B8E51C', + 'Arn', + ], + }, + { + 'Fn::Join': [ + '', + [ + { + 'Fn::GetAtt': [ + 'BackupBucket26B8E51C', + 'Arn', + ], + }, + '/*', + ], + ], + }, + ], + }, + ), + }, + }); + }); + + it('test BackupMode.ALL is converted to Enabled', () => { + const destination = new firehosedestinations.S3(bucket, { + backupConfiguration: { + backupMode: firehose.BackupMode.ALL, + }, + }); + + const destinationConfig = destination.bind(stack, { deliveryStream }); + expect(stack.resolve(destinationConfig)).toHaveProperty('properties.extendedS3DestinationConfiguration.s3BackupMode', 'Enabled'); + }); + + it('test BackupMode.DISABLED', () => { + const destination = new firehosedestinations.S3(bucket, { + backupConfiguration: { + backupMode: firehose.BackupMode.DISABLED, + }, + }); + + const destinationConfig = destination.bind(stack, { deliveryStream }); + expect(stack.resolve(destinationConfig)).toHaveProperty('properties.extendedS3DestinationConfiguration.s3BackupMode', 'Disabled'); + }); + + it('test BackupMode.FAILED throws error', () => { + const destination = new firehosedestinations.S3(bucket, { + backupConfiguration: { + backupMode: firehose.BackupMode.FAILED, + }, + }); + + expect(() => destination.bind(stack, { deliveryStream })).toThrowError('S3 destinations do not support BackupMode.FAILED'); + }); + + it('s3BackupMode is enabled when a backup bucket is provided', () => { + const backup = new s3.Bucket(stack, 'Backup'); + const destination = new firehosedestinations.S3(bucket, { + backupConfiguration: { + backupBucket: backup, + }, + }); + + const destinationConfig = destination.bind(stack, { deliveryStream }); + expect(stack.resolve(destinationConfig)).toHaveProperty('properties.extendedS3DestinationConfiguration.s3BackupMode', 'Enabled'); + }); +}); diff --git a/packages/@aws-cdk/aws-kinesisfirehose/README.md b/packages/@aws-cdk/aws-kinesisfirehose/README.md index 9c4d9f96c6f36..c3a1fcf4b42c0 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose/README.md +++ b/packages/@aws-cdk/aws-kinesisfirehose/README.md @@ -9,8 +9,461 @@ > > [CFN Resources]: https://docs.aws.amazon.com/cdk/latest/guide/constructs.html#constructs_lib +![cdk-constructs: Experimental](https://img.shields.io/badge/cdk--constructs-experimental-important.svg?style=for-the-badge) + +> The APIs of higher level constructs in this module are experimental and under active development. +> They are subject to non-backward compatible changes or removal in any future version. These are +> not subject to the [Semantic Versioning](https://semver.org/) model and breaking changes will be +> announced in the release notes. This means that while you may use them, you may need to update +> your source code when upgrading to a newer version of this package. + --- -This module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project. +[Amazon Kinesis Data Firehose](https://docs.aws.amazon.com/firehose/latest/dev/what-is-this-service.html) +is a service for fully-managed delivery of real-time streaming data to storage services +such as Amazon S3, Amazon Redshift, Amazon Elasticsearch, Splunk, or any custom HTTP +endpoint or third-party services such as Datadog, Dynatrace, LogicMonitor, MongoDB, New +Relic, and Sumo Logic. + +Kinesis Data Firehose delivery streams are distinguished from Kinesis data streams in +their models of consumtpion. Whereas consumers read from a data stream by actively pulling +data from the stream, a delivery stream pushes data to its destination on a regular +cadence. This means that data streams are intended to have consumers that do on-demand +processing, like AWS Lambda or Amazon EC2. On the other hand, delivery streams are +intended to have destinations that are sources for offline processing and analytics, such +as Amazon S3 and Amazon Redshift. + +This module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) +project. It allows you to define Kinesis Data Firehose delivery streams. + +## Defining a Delivery Stream + +In order to define a Delivery Stream, you must specify a destination. An S3 bucket can be +used as a destination. More supported destinations are covered [below](#destinations). + +```ts +import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations'; +import * as s3 from '@aws-cdk/aws-s3'; + +const bucket = new s3.Bucket(this, 'Bucket'); +new DeliveryStream(this, 'Delivery Stream', { + destinations: [new destinations.S3(bucket)], +}); +``` + +The above example defines the following resources: + +- An S3 bucket +- A Kinesis Data Firehose delivery stream with Direct PUT as the source and CloudWatch + error logging turned on. +- An IAM role which gives the delivery stream permission to write to the S3 bucket. + +## Sources + +There are two main methods of sourcing input data: Kinesis Data Streams and via a "direct +put". This construct library currently only supports "direct put". See [#15500](https://github.com/aws/aws-cdk/issues/15500) to track the status of adding support for Kinesis Data Streams. + +See: [Sending Data to a Delivery Stream](https://docs.aws.amazon.com/firehose/latest/dev/basic-write.html) +in the *Kinesis Data Firehose Developer Guide*. + +### Direct Put + +Data must be provided via "direct put", ie., by using a `PutRecord` or `PutRecordBatch` API call. There are a number of ways of doing +so, such as: + +- Kinesis Agent: a standalone Java application that monitors and delivers files while + handling file rotation, checkpointing, and retries. See: [Writing to Kinesis Data Firehose Using Kinesis Agent](https://docs.aws.amazon.com/firehose/latest/dev/writing-with-agents.html) + in the *Kinesis Data Firehose Developer Guide*. +- AWS SDK: a general purpose solution that allows you to deliver data to a delivery stream + from anywhere using Java, .NET, Node.js, Python, or Ruby. See: [Writing to Kinesis Data Firehose Using the AWS SDK](https://docs.aws.amazon.com/firehose/latest/dev/writing-with-sdk.html) + in the *Kinesis Data Firehose Developer Guide*. +- CloudWatch Logs: subscribe to a log group and receive filtered log events directly into + a delivery stream. See: [logs-destinations](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-logs-destinations-readme.html). +- Eventbridge: add an event rule target to send events to a delivery stream based on the + rule filtering. See: [events-targets](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-events-targets-readme.html). +- SNS: add a subscription to send all notifications from the topic to a delivery + stream. See: [sns-subscriptions](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-sns-subscriptions-readme.html). +- IoT: add an action to an IoT rule to send various IoT information to a delivery stream + +## Destinations + +The following destinations are supported. See [kinesisfirehose-destinations](https://docs.aws.amazon.com/cdk/api/latest/docs/aws-kinesisfirehose-destinations-readme.html) +for the implementations of these destinations. + +### S3 + +Defining a delivery stream with an S3 bucket destination: + +```ts +import * as s3 from '@aws-cdk/aws-s3'; +import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations'; + +const bucket = new s3.Bucket(this, 'Bucket'); + +const s3Destination = new destinations.S3(bucket); + +new DeliveryStream(this, 'Delivery Stream', { + destinations: [s3Destination], +}); +``` + +The S3 destination also supports custom dynamic prefixes. `prefix` will be used for files +successfully delivered to S3. `errorOutputPrefix` will be added to failed records before +writing them to S3. + +```ts fixture=with-bucket +const s3Destination = new destinations.S3(bucket, { + prefix: 'myFirehose/DeliveredYear=!{timestamp:yyyy}/anyMonth/rand=!{firehose:random-string}', + errorOutputPrefix: 'myFirehoseFailures/!{firehose:error-output-type}/!{timestamp:yyyy}/anyMonth/!{timestamp:dd}', +}); +``` + +See: [Custom S3 Prefixes](https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) in the *Kinesis Data Firehose Developer Guide*. + +## Server-side Encryption + +Enabling server-side encryption (SSE) requires Kinesis Data Firehose to encrypt all data +sent to delivery stream when it is stored at rest. This means that data is encrypted +before being written to the service's internal storage layer and decrypted after it is +received from the internal storage layer. The service manages keys and cryptographic +operations so that sources and destinations do not need to, as the data is encrypted and +decrypted at the boundaries of the service (ie., before the data is delivered to a +destination). By default, delivery streams do not have SSE enabled. + +The Key Management Service (KMS) Customer Managed Key (CMK) used for SSE can either be +AWS-owned or customer-managed. AWS-owned CMKs are keys that an AWS service (in this case +Kinesis Data Firehose) owns and manages for use in multiple AWS accounts. As a customer, +you cannot view, use, track, or manage these keys, and you are not charged for their +use. On the other hand, customer-managed CMKs are keys that are created and owned within +your account and managed entirely by you. As a customer, you are responsible for managing +access, rotation, aliases, and deletion for these keys, and you are changed for their +use. See: [Customer master keys](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys) +in the *KMS Developer Guide*. + +```ts fixture=with-destination +import * as kms from '@aws-cdk/aws-kms'; + +// SSE with an AWS-owned CMK +new DeliveryStream(this, 'Delivery Stream AWS Owned', { + encryption: StreamEncryption.AWS_OWNED, + destinations: [destination], +}); + +// SSE with an customer-managed CMK that is created automatically by the CDK +new DeliveryStream(this, 'Delivery Stream Implicit Customer Managed', { + encryption: StreamEncryption.CUSTOMER_MANAGED, + destinations: [destination], +}); + +// SSE with an customer-managed CMK that is explicitly specified +const key = new kms.Key(this, 'Key'); +new DeliveryStream(this, 'Delivery Stream Explicit Customer Managed'', { + encryptionKey: key, + destinations: [destination], +}); +``` + +See: [Data Protection](https://docs.aws.amazon.com/firehose/latest/dev/encryption.html) in +the *Kinesis Data Firehose Developer Guide*. + +## Monitoring + +Kinesis Data Firehose is integrated with CloudWatch, so you can monitor the performance of +your delivery streams via logs and metrics. + +### Logs + +Kinesis Data Firehose will send logs to CloudWatch when data transformation or data +delivery fails. The CDK will enable logging by default and create a CloudWatch LogGroup +and LogStream for your Delivery Stream. + +You can provide a specific log group to specify where the CDK will create the log streams +where log events will be sent: + +```ts fixture=with-destination +import * as logs from '@aws-cdk/aws-logs'; + +const logGroup = new logs.LogGroup(this, 'Log Group'); +new DeliveryStream(this, 'Delivery Stream', { + logGroup: logGroup, + destinations: [destination], +}); +``` + +Logging can also be disabled: + +```ts fixture=with-destination +new DeliveryStream(this, 'Delivery Stream', { + loggingEnabled: false, + destinations: [destination], +}); +``` + +See: [Monitoring using CloudWatch Logs](https://docs.aws.amazon.com/firehose/latest/dev/monitoring-with-cloudwatch-logs.html) +in the *Kinesis Data Firehose Developer Guide*. + +### Metrics + +Kinesis Data Firehose sends metrics to CloudWatch so that you can collect and analyze the +performance of the delivery stream, including data delivery, data ingestion, data +transformation, format conversion, API usage, encryption, and resource usage. You can then +use CloudWatch alarms to alert you, for example, when data freshness (the age of the +oldest record in the delivery stream) exceeds the buffering limit (indicating that data is +not being delivered to your destination), or when the rate of incoming records exceeds the +limit of records per second (indicating data is flowing into your delivery stream faster +than it is configured to process). + +CDK provides methods for accessing delivery stream metrics with default configuration, +such as `metricIncomingBytes`, and `metricIncomingRecords` (see [`IDeliveryStream`](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-kinesisfirehose.IDeliveryStream.html) +for a full list). CDK also provides a generic `metric` method that can be used to produce +metric configurations for any metric provided by Kinesis Data Firehose; the configurations +are pre-populated with the correct dimensions for the delivery stream. + +```ts fixture=with-delivery-stream +import * as cloudwatch from '@aws-cdk/aws-cloudwatch'; +// Alarm that triggers when the per-second average of incoming bytes exceeds 90% of the current service limit +const incomingBytesPercentOfLimit = new cloudwatch.MathExpression({ + expression: 'incomingBytes / 300 / bytePerSecLimit', + usingMetrics: { + incomingBytes: deliveryStream.metricIncomingBytes({ statistic: cloudwatch.Statistic.SUM }), + bytePerSecLimit: deliveryStream.metric('BytesPerSecondLimit'), + }, +}); +new Alarm(this, 'Alarm', { + metric: incomingBytesPercentOfLimit, + threshold: 0.9, + evaluationPeriods: 3, +}); +``` + +See: [Monitoring Using CloudWatch Metrics](https://docs.aws.amazon.com/firehose/latest/dev/monitoring-with-cloudwatch-metrics.html) +in the *Kinesis Data Firehose Developer Guide*. + +## Compression + +Your data can automatically be compressed when it is delivered to S3 as either a final or +an intermediary/backup destination. Supported compression formats are: gzip, Snappy, +Hadoop-compatible Snappy, and ZIP, except for Redshift destinations, where Snappy +(regardless of Hadoop-compatibility) and ZIP are not supported. By default, data is +delivered to S3 without compression. + +```ts fixture=with-bucket +// Compress data delivered to S3 using Snappy +const s3Destination = new destinations.S3(bucket, { + compression: Compression.SNAPPY, +}); +new DeliveryStream(this, 'Delivery Stream', { + destinations: [destination], +}); +``` + +## Buffering + +Incoming data is buffered before it is delivered to the specified destination. The +delivery stream will wait until the amount of incoming data has exceeded some threshold +(the "buffer size") or until the time since the last data delivery occurred exceeds some +threshold (the "buffer interval"), whichever happens first. You can configure these +thresholds based on the capabilities of the destination and your use-case. By default, the +buffer size is 3 MiB and the buffer interval is 1 minute. + +```ts fixture=with-bucket +// Increase the buffer interval and size to 5 minutes and 3 MiB, respectively +import * as cdk from '@aws-cdk/core'; + +const s3Destination = new destinations.S3(bucket, { + bufferingInterval: cdk.Duration.minutes(5), + bufferingSize: cdk.Size.mebibytes(8), +}); +new DeliveryStream(this, 'Delivery Stream', { + destinations: [destination], +}); +``` + +See: [Data Delivery Frequency](https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#frequency) +in the *Kinesis Data Firehose Developer Guide*. + +## Backup + +A delivery stream can be configured to backup data to S3 that it attempted to deliver to +the configured destination. Backed up data can be all the data that the delivery stream +attempted to deliver or just data that it failed to deliver (Redshift and S3 destinations +can only backup all data). CDK can create a new S3 bucket where it will back up data or +you can provide a bucket where data will be backed up. You can also provide prefix under +which your backed-up data will placed within the bucket. By default, source data is not +backed up to S3. + +```ts fixture=with-bucket +import * as destinations from '@aws-cdk/aws-kinesisfirehose-destinations'; +import * as s3 from '@aws-cdk/aws-s3'; + +// Enable backup of all source records (to an S3 bucket created by CDK) +const deliveryStream = new DeliveryStream(this, 'Delivery Stream Backup All', { + destinations: [ + new destinations.S3(bucket, { + backupConfiguration: { + backupMode: BackupMode.ALL, + } + }), + ], +}); + +// Explicitly provide an S3 bucket to which all source records will be backed up +const backupBucket = new s3.Bucket(this, 'Bucket'); +const deliveryStream = new DeliveryStream(this, 'Delivery Stream Backup All Explicit Bucket', { + destinations: [ + new destinations.S3(bucket, { + backupConfiguration: { + backupBucket: backupBucket, + } + }), + ], +}); + +// Explicitly provide an S3 prefix under which all source records will be backed up +const deliveryStream = new DeliveryStream(this, 'Delivery Stream Backup All Explicit Prefix', { + destinations: [ + new destinations.S3(bucket, { + backupConfiguration: { + backupMode: BackupMode.ALL, + prefix: 'mybackup', + } + }), + ], +}); +``` + +If any Data Processing or Transformation is configured on your Delivery Stream, the source +records will be backed up in their original format. + +## Data Processing/Transformation + +Data can be transformed before being delivered to destinations. There are two types of +data processing for delivery streams: record transformation with AWS Lambda, and record +format conversion using a schema stored in an AWS Glue table. If both types of data +processing are configured, then the Lambda transformation is performed first. By default, +no data processing occurs. This construct library currently only support data +transformation with AWS Lambda. See [#15501](https://github.com/aws/aws-cdk/issues/15501) +to track the status of adding support for record format conversion. + +### Data transformation with AWS Lambda + +To transform the data, Kinesis Data Firehose will call a Lambda function that you provide +and deliver the data returned in lieu of the source record. The function must return a +result that contains records in a specific format, including the following fields: + +- `recordId` -- the ID of the input record that corresponds the results. +- `result` -- the status of the transformation of the record: "Ok" (success), "Dropped" + (not processed intentionally), or "ProcessingFailed" (not processed due to an error). +- `data` -- the transformed data, Base64-encoded. + +The data is buffered up to 1 minute and up to 3 MiB by default before being sent to the +function, but can be configured using `bufferInterval` and `bufferSize` in the processor +configuration (see: [Buffering](#buffering)). If the function invocation fails due to a +network timeout or because of hitting an invocation limit, the invocation is retried 3 +times by default, but can be configured using `retries` in the processor configuration. + +```ts fixture=with-bucket +// Provide a Lambda function that will transform records before delivery, with custom +// buffering and retry configuration +import * as cdk from '@aws-cdk/core'; +import * as lambda from '@aws-cdk/aws-lambda'; +const lambdaFunction = new lambda.Function(this, 'Processor', { + runtime: lambda.Runtime.NODEJS_12_X, + handler: 'index.handler', + code: lambda.Code.fromAsset(path.join(__dirname, 'process-records')), +}); +const lambdaProcessor = new LambdaFunctionProcessor(lambdaFunction, { + bufferingInterval: cdk.Duration.minutes(5), + bufferingSize: cdk.Size.mebibytes(5), + retries: 5, +}); +const s3Destination = new destinations.S3(bucket, { + processors: [lambdaProcessor], +}); +new DeliveryStream(this, 'Delivery Stream', { + destinations: [destination], +}); +``` + +See: [Data Transformation](https://docs.aws.amazon.com/firehose/latest/dev/data-transformation.html) +in the *Kinesis Data Firehose Developer Guide*. + +## Specifying an IAM role + +The DeliveryStream class automatically creates an IAM role with all the minimum necessary +permissions for Kinesis Data Firehose to access the resources referenced by your delivery +stream. For example: an Elasticsearch domain, a Redshift cluster, a backup or destination +S3 bucket, a Lambda data transformer, an AWS Glue table schema, etc. If you wish, you may +specify your own IAM role. It must have the correct permissions, or delivery stream +creation or data delivery may fail. + +```ts fixture=with-bucket +import * as iam from '@aws-cdk/aws-iam'; + +const role = new iam.Role(this, 'Role', { + assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), +} +bucket.grantWrite(role); +new DeliveryStream(stack, 'Delivery Stream', { + destinations: [new destinations.S3(bucket)], + role: role, +}); +``` + +See [Controlling Access](https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html) +in the *Kinesis Data Firehose Developer Guide*. + +## Granting application access to a delivery stream + +IAM roles, users or groups which need to be able to work with delivery streams should be +granted IAM permissions. + +Any object that implements the `IGrantable` interface (ie., has an associated principal) +can be granted permissions to a delivery stream by calling: + +- `grantPutRecords(principal)` - grants the principal the ability to put records onto the + delivery stream +- `grant(principal, ...actions)` - grants the principal permission to a custom set of + actions + +```ts fixture=with-delivery-stream +import * as iam from '@aws-cdk/aws-iam'; +const lambdaRole = new iam.Role(this, 'Role', { + assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), +} + +// Give the role permissions to write data to the delivery stream +deliveryStream.grantPutRecords(lambdaRole); +``` + +The following write permissions are provided to a service principal by the `grantPutRecords()` method: + +- `firehose:PutRecord` +- `firehose:PutRecordBatch` + +## Granting a delivery stream access to a resource + +Conversely to the above, Kinesis Data Firehose requires permissions in order for delivery +streams to interact with resources that you own. For example, if an S3 bucket is specified +as a destination of a delivery stream, the delivery stream must be granted permissions to +put and get objects from the bucket. When using the built-in AWS service destinations +found in the `@aws-cdk/aws-kinesisfirehose-destinations` module, the CDK grants the +permissions automatically. However, custom or third-party destinations may require custom +permissions. In this case, use the delivery stream as an `IGrantable`, as follows: + +```ts fixture=with-delivery-stream +/// !hide +const myDestinationResource = { + grantWrite(grantee: IGrantable) {} +} +/// !show +myDestinationResource.grantWrite(deliveryStream); +``` + +## Multiple destinations + +Though the delivery stream allows specifying an array of destinations, only one +destination per delivery stream is currently allowed. This limitation is enforced at +compile time and will throw an error. diff --git a/packages/@aws-cdk/aws-kinesisfirehose/lib/delivery-stream.ts b/packages/@aws-cdk/aws-kinesisfirehose/lib/delivery-stream.ts new file mode 100644 index 0000000000000..c3252f54e408a --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose/lib/delivery-stream.ts @@ -0,0 +1,369 @@ +import * as cloudwatch from '@aws-cdk/aws-cloudwatch'; +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as iam from '@aws-cdk/aws-iam'; +import * as kms from '@aws-cdk/aws-kms'; +import * as cdk from '@aws-cdk/core'; +import { RegionInfo } from '@aws-cdk/region-info'; +import { Construct } from 'constructs'; +import { IDestination } from './destination'; +import { FirehoseMetrics } from './kinesisfirehose-canned-metrics.generated'; +import { CfnDeliveryStream } from './kinesisfirehose.generated'; + +const PUT_RECORD_ACTIONS = [ + 'firehose:PutRecord', + 'firehose:PutRecordBatch', +]; + +/** + * Represents a Kinesis Data Firehose delivery stream. + */ +export interface IDeliveryStream extends cdk.IResource, iam.IGrantable, ec2.IConnectable, cdk.ITaggable { + /** + * The ARN of the delivery stream. + * + * @attribute + */ + readonly deliveryStreamArn: string; + + /** + * The name of the delivery stream. + * + * @attribute + */ + readonly deliveryStreamName: string; + + /** + * Grant the `grantee` identity permissions to perform `actions`. + */ + grant(grantee: iam.IGrantable, ...actions: string[]): iam.Grant; + + /** + * Grant the `grantee` identity permissions to perform `firehose:PutRecord` and `firehose:PutRecordBatch` actions on this delivery stream. + */ + grantPutRecords(grantee: iam.IGrantable): iam.Grant; + + /** + * Return the given named metric for this delivery stream. + */ + metric(metricName: string, props?: cloudwatch.MetricOptions): cloudwatch.Metric; + + /** + * Metric for the number of bytes ingested successfully into the delivery stream over the specified time period after throttling. + * + * By default, this metric will be calculated as an average over a period of 5 minutes. + */ + metricIncomingBytes(props?: cloudwatch.MetricOptions): cloudwatch.Metric; + + /** + * Metric for the number of records ingested successfully into the delivery stream over the specified time period after throttling. + * + * By default, this metric will be calculated as an average over a period of 5 minutes. + */ + metricIncomingRecords(props?: cloudwatch.MetricOptions): cloudwatch.Metric; + + /** + * Metric for the number of bytes delivered to Amazon S3 for backup over the specified time period. + * + * By default, this metric will be calculated as an average over a period of 5 minutes. + */ + metricBackupToS3Bytes(props?: cloudwatch.MetricOptions): cloudwatch.Metric; + + /** + * Metric for the age (from getting into Kinesis Data Firehose to now) of the oldest record in Kinesis Data Firehose. + * + * Any record older than this age has been delivered to the Amazon S3 bucket for backup. + * + * By default, this metric will be calculated as an average over a period of 5 minutes. + */ + metricBackupToS3DataFreshness(props?: cloudwatch.MetricOptions): cloudwatch.Metric; + + /** + * Metric for the number of records delivered to Amazon S3 for backup over the specified time period. + * + * By default, this metric will be calculated as an average over a period of 5 minutes. + */ + metricBackupToS3Records(props?: cloudwatch.MetricOptions): cloudwatch.Metric; +} + +/** + * Base class for new and imported Kinesis Data Firehose delivery streams. + */ +export abstract class DeliveryStreamBase extends cdk.Resource implements IDeliveryStream { + + public abstract readonly deliveryStreamName: string; + + public abstract readonly deliveryStreamArn: string; + + public abstract readonly grantPrincipal: iam.IPrincipal; + + /** + * Network connections between Kinesis Data Firehose and other resources, i.e. Redshift cluster. + */ + public readonly connections: ec2.Connections; + + public readonly tags = new cdk.TagManager(cdk.TagType.STANDARD, 'AWS::KinesisFirehose::DeliveryStream'); + + constructor(scope: Construct, id: string) { + super(scope, id); + + this.connections = setConnections(this); + } + + public grant(grantee: iam.IGrantable, ...actions: string[]): iam.Grant { + return iam.Grant.addToPrincipal({ + resourceArns: [this.deliveryStreamArn], + grantee: grantee, + actions: actions, + }); + } + + public grantPutRecords(grantee: iam.IGrantable): iam.Grant { + return this.grant(grantee, ...PUT_RECORD_ACTIONS); + } + + public metric(metricName: string, props?: cloudwatch.MetricOptions): cloudwatch.Metric { + return new cloudwatch.Metric({ + namespace: 'AWS/Firehose', + metricName: metricName, + dimensions: { + DeliveryStreamName: this.deliveryStreamName, + }, + ...props, + }).attachTo(this); + } + + public metricIncomingBytes(props?: cloudwatch.MetricOptions): cloudwatch.Metric { + return this.cannedMetric(FirehoseMetrics.incomingBytesAverage, props); + } + + public metricIncomingRecords(props?: cloudwatch.MetricOptions): cloudwatch.Metric { + return this.cannedMetric(FirehoseMetrics.incomingRecordsAverage, props); + } + + public metricBackupToS3Bytes(props?: cloudwatch.MetricOptions): cloudwatch.Metric { + return this.cannedMetric(FirehoseMetrics.backupToS3BytesAverage, props); + } + + public metricBackupToS3DataFreshness(props?: cloudwatch.MetricOptions): cloudwatch.Metric { + return this.cannedMetric(FirehoseMetrics.backupToS3DataFreshnessAverage, props); + } + + public metricBackupToS3Records(props?: cloudwatch.MetricOptions): cloudwatch.Metric { + return this.cannedMetric(FirehoseMetrics.backupToS3RecordsAverage, props); + } + + private cannedMetric(fn: (dims: { DeliveryStreamName: string }) => cloudwatch.MetricProps, props?: cloudwatch.MetricOptions): cloudwatch.Metric { + return new cloudwatch.Metric({ + ...fn({ DeliveryStreamName: this.deliveryStreamName }), + ...props, + }).attachTo(this); + } +} + +/** + * Options for server-side encryption of a delivery stream. + */ +export enum StreamEncryption { + /** + * Data in the stream is stored unencrypted. + */ + UNENCRYPTED, + + /** + * Data in the stream is stored encrypted by a KMS key managed by the customer. + */ + CUSTOMER_MANAGED, + + /** + * Data in the stream is stored encrypted by a KMS key owned by AWS and managed for use in multiple AWS accounts. + */ + AWS_OWNED, +} + +/** + * Properties for a new delivery stream. + */ +export interface DeliveryStreamProps { + /** + * The destinations that this delivery stream will deliver data to. + * + * Only a singleton array is supported at this time. + */ + readonly destinations: IDestination[]; + + /** + * A name for the delivery stream. + * + * @default - a name is generated by CloudFormation. + */ + readonly deliveryStreamName?: string; + + /** + * The IAM role assumed by Kinesis Data Firehose to read from sources, invoke processors, and write to destinations. + * + * @default - a role will be created with default permissions. + */ + readonly role?: iam.IRole; + + /** + * Indicates the type of customer master key (CMK) to use for server-side encryption, if any. + * + * If `encryptionKey` is provided, this will be implicitly set to `CUSTOMER_MANAGED`. + * + * @default StreamEncryption.UNENCRYPTED. + */ + readonly encryption?: StreamEncryption; + + /** + * Customer managed key to server-side encrypt data in the stream. + * + * @default - if `encryption` is set to `CUSTOMER_MANAGED`, a KMS key will be created for you. + */ + readonly encryptionKey?: kms.IKey; +} + +/** + * A full specification of a delivery stream that can be used to import it fluently into the CDK application. + */ +export interface DeliveryStreamAttributes { + /** + * The ARN of the delivery stream. + * + * At least one of deliveryStreamArn and deliveryStreamName must be provided. + * + * @default - derived from `deliveryStreamName`. + */ + readonly deliveryStreamArn?: string; + + /** + * The name of the delivery stream + * + * At least one of deliveryStreamName and deliveryStreamArn must be provided. + * + * @default - derived from `deliveryStreamArn`. + */ + readonly deliveryStreamName?: string; + + + /** + * The IAM role associated with this delivery stream. + * + * Assumed by Kinesis Data Firehose to read from sources, invoke processors, and write to destinations. + * + * @default - the imported stream cannot be granted access to other resources as an `iam.IGrantable`. + */ + readonly role?: iam.IRole; +} + +/** + * Create a Kinesis Data Firehose delivery stream + * + * @resource AWS::KinesisFirehose::DeliveryStream + */ +export class DeliveryStream extends DeliveryStreamBase { + /** + * Import an existing delivery stream from its name. + */ + static fromDeliveryStreamName(scope: Construct, id: string, deliveryStreamName: string): IDeliveryStream { + return this.fromDeliveryStreamAttributes(scope, id, { deliveryStreamName }); + } + + /** + * Import an existing delivery stream from its ARN. + */ + static fromDeliveryStreamArn(scope: Construct, id: string, deliveryStreamArn: string): IDeliveryStream { + return this.fromDeliveryStreamAttributes(scope, id, { deliveryStreamArn }); + } + + /** + * Import an existing delivery stream from its attributes. + */ + static fromDeliveryStreamAttributes(scope: Construct, id: string, attrs: DeliveryStreamAttributes): IDeliveryStream { + if (!attrs.deliveryStreamName && !attrs.deliveryStreamArn) { + throw new Error('Either deliveryStreamName or deliveryStreamArn must be provided in DeliveryStreamAttributes'); + } + const deliveryStreamName = attrs.deliveryStreamName ?? cdk.Stack.of(scope).parseArn(attrs.deliveryStreamArn!).resourceName; + if (!deliveryStreamName) { + throw new Error(`Could not import delivery stream from malformatted ARN ${attrs.deliveryStreamArn}: could not determine resource name`); + } + const deliveryStreamArn = attrs.deliveryStreamArn ?? cdk.Stack.of(scope).formatArn({ + service: 'firehose', + resource: 'deliverystream', + resourceName: attrs.deliveryStreamName, + }); + class Import extends DeliveryStreamBase { + public readonly deliveryStreamName = deliveryStreamName!; + public readonly deliveryStreamArn = deliveryStreamArn; + public readonly grantPrincipal = attrs.role ?? new iam.UnknownPrincipal({ resource: this }); + } + return new Import(scope, id); + } + + readonly deliveryStreamName: string; + + readonly deliveryStreamArn: string; + + readonly grantPrincipal: iam.IPrincipal; + + constructor(scope: Construct, id: string, props: DeliveryStreamProps) { + super(scope, id); + + const role = props.role ?? new iam.Role(this, 'Service Role', { + assumedBy: new iam.ServicePrincipal('firehose.amazonaws.com'), + }); + this.grantPrincipal = role; + + if ((props.encryption === StreamEncryption.AWS_OWNED || props.encryption === StreamEncryption.UNENCRYPTED) && props.encryptionKey) { + throw new Error(`Specified stream encryption as ${StreamEncryption[props.encryption]} but provided a customer-managed key`); + } + const encryptionKey = props.encryptionKey ?? (props.encryption === StreamEncryption.CUSTOMER_MANAGED ? new kms.Key(this, 'Key') : undefined); + const encryptionConfig = (encryptionKey || (props.encryption === StreamEncryption.AWS_OWNED)) ? { + keyArn: encryptionKey?.keyArn, + keyType: encryptionKey ? 'CUSTOMER_MANAGED_CMK' : 'AWS_OWNED_CMK', + } : undefined; + encryptionKey?.grantEncryptDecrypt(role); + + if (props.destinations.length !== 1) { + throw new Error(`Only one destination is allowed per delivery stream, given ${props.destinations.length}`); + } + const destinationConfig = props.destinations[0].bind(this, { deliveryStream: this }); + + const resource = new CfnDeliveryStream(this, 'Resource', { + deliveryStreamEncryptionConfigurationInput: encryptionConfig, + deliveryStreamName: props.deliveryStreamName, + deliveryStreamType: 'DirectPut', + ...destinationConfig.properties, + }); + resource.node.addDependency(this.grantPrincipal); + + this.deliveryStreamArn = this.getResourceArnAttribute(resource.attrArn, { + service: 'kinesis', + resource: 'deliverystream', + resourceName: this.physicalName, + }); + this.deliveryStreamName = this.getResourceNameAttribute(resource.ref); + } +} + +function setConnections(scope: Construct) { + const region = cdk.Stack.of(scope).region; + let cidrBlock = RegionInfo.get(region).firehoseCidrBlock; + if (!cidrBlock) { + const mapping: {[region: string]: { FirehoseCidrBlock: string }} = {}; + RegionInfo.regions.forEach((regionInfo) => { + if (regionInfo.firehoseCidrBlock) { + mapping[regionInfo.name] = { + FirehoseCidrBlock: regionInfo.firehoseCidrBlock, + }; + } + }); + const cfnMapping = new cdk.CfnMapping(scope, 'Firehose CIDR Mapping', { + mapping, + }); + cidrBlock = cdk.Fn.findInMap(cfnMapping.logicalId, region, 'FirehoseCidrBlock'); + } + + return new ec2.Connections({ + peer: ec2.Peer.ipv4(cidrBlock), + }); +} diff --git a/packages/@aws-cdk/aws-kinesisfirehose/lib/destination.ts b/packages/@aws-cdk/aws-kinesisfirehose/lib/destination.ts new file mode 100644 index 0000000000000..8e3cc1b5e66c8 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose/lib/destination.ts @@ -0,0 +1,365 @@ +import * as iam from '@aws-cdk/aws-iam'; +import * as kms from '@aws-cdk/aws-kms'; +import * as logs from '@aws-cdk/aws-logs'; +import * as s3 from '@aws-cdk/aws-s3'; +import { Duration, Size } from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import { IDeliveryStream } from './delivery-stream'; +import { CfnDeliveryStream } from './kinesisfirehose.generated'; +import { IDataProcessor } from './processor'; + +/** + * A Kinesis Data Firehose delivery stream destination configuration. + */ +export interface DestinationConfig { + /** + * Schema-less properties that will be injected directly into `CfnDeliveryStream`. + */ + readonly properties: object; +} + +/** + * Options when binding a destination to a delivery stream. + */ +export interface DestinationBindOptions { + /** + * The delivery stream. + */ + readonly deliveryStream: IDeliveryStream; +} + +/** + * A Kinesis Data Firehose delivery stream destination. + */ +export interface IDestination { + /** + * Binds this destination to the Kinesis Data Firehose delivery stream. + * + * Implementers should use this method to bind resources to the stack and initialize values using the provided stream. + */ + bind(scope: Construct, options: DestinationBindOptions): DestinationConfig; +} + +/** + * Options for S3 record backup of a delivery stream. + */ +export enum BackupMode { + /** + * All records are backed up. + */ + ALL, + + /** + * Only records that failed to deliver or transform are backed up. + */ + FAILED, + + /** + * No records are backed up. + */ + DISABLED, +} + +/** + * Possible compression options Kinesis Data Firehose can use to compress data on delivery. + */ +export class Compression { + /** + * gzip + */ + public static readonly GZIP = new Compression('GZIP'); + + /** + * Hadoop-compatible Snappy + */ + public static readonly HADOOP_SNAPPY = new Compression('HADOOP_SNAPPY'); + + /** + * Snappy + */ + public static readonly SNAPPY = new Compression('Snappy'); + + /** + * Uncompressed + */ + public static readonly UNCOMPRESSED = new Compression('UNCOMPRESSED'); + + /** + * ZIP + */ + public static readonly ZIP = new Compression('ZIP'); + + constructor( + /** + * String value of the Compression. + */ + public readonly value: string, + ) { } +} + +/** + * Logging related properties for a delivery stream destination. + */ +interface DestinationLoggingProps { + /** + * If true, log errors when data transformation or data delivery fails. + * + * If `logGroup` is provided, this will be implicitly set to `true`. + * + * @default true - errors are logged. + */ + readonly logging?: boolean; + + /** + * The CloudWatch log group where log streams will be created to hold error logs. + * + * @default - if `logging` is set to `true`, a log group will be created for you. + */ + readonly logGroup?: logs.ILogGroup; +} + +/** + * Common properties for defining a backup, intermediary, or final S3 destination for a Kinesis Data Firehose delivery stream. + */ +export interface CommonS3Props { + /** + * The length of time that Firehose buffers incoming data before delivering + * it to the S3 bucket. + * + * If bufferingInterval is specified, bufferingSize must also be specified. + * Minimum: Duration.seconds(60) + * Maximum: Duration.seconds(900) + * + * @default Duration.seconds(300) + */ + readonly bufferingInterval?: Duration; + + /** + * The size of the buffer that Kinesis Data Firehose uses for incoming data before + * delivering it to the S3 bucket. + * + * If bufferingSize is specified, bufferingInterval must also be specified. + * Minimum: Size.mebibytes(1) + * Maximum: Size.mebibytes(128) + * + * @default Size.mebibytes(5) + */ + readonly bufferingSize?: Size; + + /** + * The type of compression that Kinesis Data Firehose uses to compress the data + * that it delivers to the Amazon S3 bucket. + * + * The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift + * destinations because they are not supported by the Amazon Redshift COPY operation + * that reads from the S3 bucket. + * + * @default - UNCOMPRESSED + */ + readonly compression?: Compression; + + /** + * The AWS KMS key used to encrypt the data that it delivers to your Amazon S3 bucket. + * + * @default - Data is not encrypted. + */ + readonly encryptionKey?: kms.IKey; + + /** + * A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. + * + * This prefix appears immediately following the bucket name. + * @see https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html + * + * @default "YYYY/MM/DD/HH" + */ + readonly errorOutputPrefix?: string; + + /** + * A prefix that Kinesis Data Firehose evaluates and adds to records before writing them to S3. + * + * This prefix appears immediately following the bucket name. + * @see https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html + * + * @default "YYYY/MM/DD/HH" + */ + readonly prefix?: string; +} + +/** + * Properties for defining an S3 backup destination. + * + * S3 backup is available for all destinations, regardless of whether the final destination is S3 or not. + */ +export interface S3BackupDestinationProps extends DestinationLoggingProps, CommonS3Props { + /** + * The S3 bucket that will store data and failed records. + * + * @default - If `backup` is set to `BackupMode.ALL` or `BackupMode.FAILED`, a bucket will be created for you. + */ + readonly backupBucket?: s3.IBucket; + + /** + * Indicates the mode by which incoming records should be backed up to S3, if any. + * + * If `backupBucket ` is provided, this will be implicitly set to `BackupMode.ALL`. + * + * @default - If `backupBucket` is provided, the default will be `BackupMode.ALL`. Otherwise, the default is + * `BackupMode.DISABLED` and source records are not backed up to S3. + */ + readonly backupMode?: BackupMode; +} + +/** + * Generic properties for defining a delivery stream destination. + */ +export interface DestinationProps extends DestinationLoggingProps { + /** + * The series of data transformations that should be performed on the data before writing to the destination. + * + * @default [] - no data transformation will occur. + */ + readonly processors?: IDataProcessor[]; + + /** + * The configuration for backing up source records to S3. + * + * @default - source records will not be backed up to S3. + */ + readonly backupConfiguration?: S3BackupDestinationProps; +} + +/** + * Abstract base class that destination types can extend to benefit from methods that create generic configuration. + */ +export abstract class DestinationBase implements IDestination { + private logGroups: { [logGroupId: string]: logs.ILogGroup } = {}; + + constructor(protected readonly props: DestinationProps = {}) { } + + abstract bind(scope: Construct, options: DestinationBindOptions): DestinationConfig; + + protected createLoggingOptions( + scope: Construct, + deliveryStream: IDeliveryStream, + streamId: string, + ): CfnDeliveryStream.CloudWatchLoggingOptionsProperty | undefined { + return this._createLoggingOptions(scope, deliveryStream, streamId, 'LogGroup', this.props.logging, this.props.logGroup); + } + + protected createProcessingConfig(deliveryStream: IDeliveryStream): CfnDeliveryStream.ProcessingConfigurationProperty | undefined { + if (this.props.processors && this.props.processors.length > 1) { + throw new Error('Only one processor is allowed per delivery stream destination'); + } + if (this.props.processors && this.props.processors.length > 0) { + const processors = this.props.processors.map((processor) => { + const processorConfig = processor.bind(deliveryStream); + const parameters = [{ parameterName: 'RoleArn', parameterValue: (deliveryStream.grantPrincipal as iam.Role).roleArn }]; + parameters.push(processorConfig.processorIdentifier); + if (processorConfig.bufferInterval) { + parameters.push({ parameterName: 'BufferIntervalInSeconds', parameterValue: processorConfig.bufferInterval.toSeconds().toString() }); + } + if (processorConfig.bufferSize) { + parameters.push({ parameterName: 'BufferSizeInMBs', parameterValue: processorConfig.bufferSize.toMebibytes().toString() }); + } + if (processorConfig.retries) { + parameters.push({ parameterName: 'NumberOfRetries', parameterValue: processorConfig.retries.toString() }); + } + return { + type: processorConfig.processorType, + parameters: parameters, + }; + }); + return { + enabled: true, + processors: processors, + }; + } + return undefined; + } + + protected createBackupConfig(scope: Construct, deliveryStream: IDeliveryStream): CfnDeliveryStream.S3DestinationConfigurationProperty | undefined { + if (!this.props.backupConfiguration) { + return undefined; + } + if (this.props.backupConfiguration.backupMode === BackupMode.DISABLED && this.props.backupConfiguration.backupBucket) { + throw new Error('Destination backup cannot be set to DISABLED when backupBucket is provided'); + } + + if ((this.props.backupConfiguration.backupMode !== undefined && this.props.backupConfiguration.backupMode !== BackupMode.DISABLED) || + this.props.backupConfiguration.backupBucket + ) { + const bucket = this.props.backupConfiguration.backupBucket ?? new s3.Bucket(scope, 'BackupBucket'); + bucket.grantReadWrite(deliveryStream); + return { + bucketArn: bucket.bucketArn, + roleArn: (deliveryStream.grantPrincipal as iam.Role).roleArn, + prefix: this.props.backupConfiguration.prefix, + errorOutputPrefix: this.props.backupConfiguration.errorOutputPrefix, + bufferingHints: this.createBufferingHints(this.props.backupConfiguration.bufferingInterval, this.props.backupConfiguration.bufferingSize), + compressionFormat: this.props.backupConfiguration.compression?.value, + encryptionConfiguration: this.createEncryptionConfig(deliveryStream, this.props.backupConfiguration.encryptionKey), + cloudWatchLoggingOptions: this.createBackupLoggingOptions(scope, deliveryStream, 'S3Backup'), + }; + } + return undefined; + } + + protected createBufferingHints(bufferingInterval?: Duration, bufferingSize?: Size): CfnDeliveryStream.BufferingHintsProperty | undefined { + if (bufferingInterval && bufferingSize) { + if (bufferingInterval.toSeconds() < 60 || bufferingInterval.toSeconds() > 900) { + throw new Error('Buffering interval must be between 60 and 900 seconds'); + } + if (bufferingSize.toMebibytes() < 1 || bufferingSize.toMebibytes() > 128) { + throw new Error('Buffering size must be between 1 and 128 MBs'); + } + return { + intervalInSeconds: bufferingInterval.toSeconds(), + sizeInMBs: bufferingSize.toMebibytes(), + }; + } else if (!bufferingInterval && bufferingSize) { + throw new Error('If bufferingSize is specified, bufferingInterval must also be specified'); + } else if (bufferingInterval && !bufferingSize) { + throw new Error('If bufferingInterval is specified, bufferingSize must also be specified'); + } + return undefined; + } + + protected createEncryptionConfig(deliveryStream: IDeliveryStream, encryptionKey?: kms.IKey): CfnDeliveryStream.EncryptionConfigurationProperty { + encryptionKey?.grantEncryptDecrypt(deliveryStream); + return encryptionKey != null + ? { kmsEncryptionConfig: { awskmsKeyArn: encryptionKey.keyArn } } + : { noEncryptionConfig: 'NoEncryption' }; + } + + private createBackupLoggingOptions( + scope: Construct, + deliveryStream: IDeliveryStream, + streamId: string, + ): CfnDeliveryStream.CloudWatchLoggingOptionsProperty | undefined { + return this._createLoggingOptions(scope, deliveryStream, streamId, 'BackupLogGroup', this.props.backupConfiguration?.logging, this.props.backupConfiguration?.logGroup); + } + + private _createLoggingOptions( + scope: Construct, + deliveryStream: IDeliveryStream, + streamId: string, + logGroupId: string, + logging?: boolean, + propsLogGroup?: logs.ILogGroup, + ): CfnDeliveryStream.CloudWatchLoggingOptionsProperty | undefined { + if (logging === false && propsLogGroup) { + throw new Error('logging cannot be set to false when logGroup is provided'); + } + if (logging !== false || propsLogGroup) { + this.logGroups[logGroupId] = this.logGroups[logGroupId] ?? propsLogGroup ?? new logs.LogGroup(scope, logGroupId); + this.logGroups[logGroupId].grantWrite(deliveryStream); + return { + enabled: true, + logGroupName: this.logGroups[logGroupId].logGroupName, + logStreamName: this.logGroups[logGroupId].addStream(streamId).logStreamName, + }; + } + return undefined; + } +} diff --git a/packages/@aws-cdk/aws-kinesisfirehose/lib/index.ts b/packages/@aws-cdk/aws-kinesisfirehose/lib/index.ts index dd7beef14d159..c76605c5b9011 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose/lib/index.ts +++ b/packages/@aws-cdk/aws-kinesisfirehose/lib/index.ts @@ -1,2 +1,6 @@ +export * from './delivery-stream'; +export * from './destination'; +export * from './processor'; + // AWS::KinesisFirehose CloudFormation Resources: export * from './kinesisfirehose.generated'; diff --git a/packages/@aws-cdk/aws-kinesisfirehose/lib/processor.ts b/packages/@aws-cdk/aws-kinesisfirehose/lib/processor.ts new file mode 100644 index 0000000000000..0ea1dbda6a761 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose/lib/processor.ts @@ -0,0 +1,108 @@ +import * as lambda from '@aws-cdk/aws-lambda'; +import { Duration, Size } from '@aws-cdk/core'; +import { IDeliveryStream } from './delivery-stream'; + +/** + * Configure the data processor. + */ +export interface DataProcessorProps { + /** + * The length of time Kinesis Data Firehose will buffer incoming data before calling the processor. + * + * @default Duration.minutes(1) + */ + readonly bufferInterval?: Duration; + + /** + * The amount of incoming data Kinesis Data Firehose will buffer before calling the processor. + * + * @default Size.mebibytes(3) + */ + readonly bufferSize?: Size; + + /** + * The number of times Kinesis Data Firehose will retry the processor invocation after a failure due to network timeout or invocation limits. + * + * @default 3 + */ + readonly retries?: number; +} + +/** + * The key-value pair that identifies the underlying processor resource. + * + * @see http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-processorparameter.html + * @example { + * parameterName: 'LambdaArn', + * parameterValue: lambdaFunction.functionArn, + * } + */ +export interface DataProcessorIdentifier { + /** + * The parameter name that corresponds to the processor resource's identifier. + * + * Must be an accepted value in `CfnDeliveryStream.ProcessoryParameterProperty.ParameterName`. + */ + readonly parameterName: string; + + /** + * The identifier of the underlying processor resource. + */ + readonly parameterValue: string; +} + +/** + * The full configuration of a data processor. + */ +export interface DataProcessorConfig extends DataProcessorProps { + /** + * The type of the underlying processor resource. + * + * Must be an accepted value in `CfnDeliveryStream.ProcessorProperty.Type`. + * @see http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-processor.html#cfn-kinesisfirehose-deliverystream-processor-type + * @example 'Lambda' + */ + readonly processorType: string; + + /** + * The key-value pair that identifies the underlying processor resource. + */ + readonly processorIdentifier: DataProcessorIdentifier; +} + +/** + * A data processor that Kinesis Data Firehose will call to transform records before delivering data. + */ +export interface IDataProcessor { + /** + * Binds this processor to the delivery stream of the destination + * + * Implementers should use this method to grant processor invocation permissions to the provided stream and return the + * necessary configuration to register as a processor. + */ + bind(deliveryStream: IDeliveryStream): DataProcessorConfig +} + +/** + * Use a Lambda function to transform records. + */ +export class LambdaFunctionProcessor implements IDataProcessor { + private readonly processorType = 'Lambda'; + private readonly processorIdentifier: DataProcessorIdentifier; + constructor(private readonly lambdaFunction: lambda.IFunction, private readonly props: DataProcessorProps = {}) { + this.processorIdentifier = { + parameterName: 'LambdaArn', + parameterValue: lambdaFunction.functionArn, + }; + } + + public bind(deliveryStream: IDeliveryStream): DataProcessorConfig { + this.lambdaFunction.grantInvoke(deliveryStream); + + return { + processorType: this.processorType, + processorIdentifier: this.processorIdentifier, + ...this.props, + }; + } +} diff --git a/packages/@aws-cdk/aws-kinesisfirehose/package.json b/packages/@aws-cdk/aws-kinesisfirehose/package.json index e3aaff7954822..02d64257f825b 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose/package.json +++ b/packages/@aws-cdk/aws-kinesisfirehose/package.json @@ -77,26 +77,52 @@ "cdk-build-tools": "0.0.0", "cfn2ts": "0.0.0", "pkglint": "0.0.0", - "@aws-cdk/assert-internal": "0.0.0" + "@aws-cdk/assert-internal": "0.0.0", + "cdk-integ-tools": "0.0.0" }, "dependencies": { + "@aws-cdk/aws-cloudwatch": "0.0.0", + "@aws-cdk/aws-ec2": "0.0.0", + "@aws-cdk/aws-iam": "0.0.0", + "@aws-cdk/aws-kinesis": "0.0.0", + "@aws-cdk/aws-kms": "0.0.0", + "@aws-cdk/aws-lambda": "0.0.0", + "@aws-cdk/aws-logs": "0.0.0", + "@aws-cdk/aws-s3": "0.0.0", "@aws-cdk/core": "0.0.0", + "@aws-cdk/custom-resources": "0.0.0", + "@aws-cdk/region-info": "0.0.0", "constructs": "^3.3.69" }, "homepage": "https://github.com/aws/aws-cdk", "peerDependencies": { + "@aws-cdk/aws-cloudwatch": "0.0.0", + "@aws-cdk/aws-ec2": "0.0.0", + "@aws-cdk/aws-iam": "0.0.0", + "@aws-cdk/aws-kinesis": "0.0.0", + "@aws-cdk/aws-kms": "0.0.0", + "@aws-cdk/aws-lambda": "0.0.0", + "@aws-cdk/aws-logs": "0.0.0", + "@aws-cdk/aws-s3": "0.0.0", "@aws-cdk/core": "0.0.0", + "@aws-cdk/custom-resources": "0.0.0", + "@aws-cdk/region-info": "0.0.0", "constructs": "^3.3.69" }, "engines": { "node": ">= 10.13.0 <13 || >=13.7.0" }, "stability": "experimental", - "maturity": "cfn-only", + "maturity": "experimental", "awscdkio": { "announce": false }, "publishConfig": { "tag": "latest" + }, + "awslint": { + "exclude": [ + "no-unused-type:@aws-cdk/aws-kinesisfirehose.Compression" + ] } } diff --git a/packages/@aws-cdk/aws-kinesisfirehose/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-kinesisfirehose/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..8a68efc25aa8e --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose/rosetta/default.ts-fixture @@ -0,0 +1,11 @@ +// Fixture with packages imported, but nothing else +import { Construct, Stack } from '@aws-cdk/core'; +import { DeliveryStream, DestinationBindOptions, DestinationConfig, IDestination } from '@aws-cdk/aws-kinesisfirehose'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} diff --git a/packages/@aws-cdk/aws-kinesisfirehose/test/delivery-stream.test.ts b/packages/@aws-cdk/aws-kinesisfirehose/test/delivery-stream.test.ts new file mode 100644 index 0000000000000..c4c911075017d --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose/test/delivery-stream.test.ts @@ -0,0 +1,530 @@ +import '@aws-cdk/assert-internal/jest'; +import { ABSENT, arrayWith } from '@aws-cdk/assert-internal'; +import * as cloudwatch from '@aws-cdk/aws-cloudwatch'; +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as iam from '@aws-cdk/aws-iam'; +import * as kms from '@aws-cdk/aws-kms'; +import * as cdk from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as firehose from '../lib'; + +describe('delivery stream', () => { + let stack: cdk.Stack; + + const bucketArn = 'arn:aws:s3:::my-bucket'; + const roleArn = 'arn:aws:iam::111122223333:role/my-role'; + const mockS3Destination: firehose.IDestination = { + bind(_scope: Construct, _options: firehose.DestinationBindOptions): firehose.DestinationConfig { + return { + properties: { + s3DestinationConfiguration: { + bucketArn: bucketArn, + roleArn: roleArn, + }, + }, + }; + }, + }; + + beforeEach(() => { + stack = new cdk.Stack(); + }); + + test('creates stream with default values', () => { + new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + expect(stack).toHaveResource('AWS::KinesisFirehose::DeliveryStream', { + DeliveryStreamEncryptionConfigurationInput: ABSENT, + DeliveryStreamName: ABSENT, + DeliveryStreamType: 'DirectPut', + KinesisStreamSourceConfiguration: ABSENT, + S3DestinationConfiguration: { + BucketARN: bucketArn, + RoleARN: roleArn, + }, + }); + }); + + test('provided role is set as grant principal', () => { + const role = new iam.Role(stack, 'Role', { + assumedBy: new iam.ServicePrincipal('firehose.amazonaws.com'), + }); + + const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + role: role, + }); + + expect(deliveryStream.grantPrincipal).toBe(role); + }); + + test('not providing role creates one', () => { + new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + expect(stack).toHaveResourceLike('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: [ + { + Principal: { + Service: 'firehose.amazonaws.com', + }, + }, + ], + }, + }); + }); + + test('requesting customer-owned encryption creates key and configuration', () => { + new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + encryption: firehose.StreamEncryption.CUSTOMER_MANAGED, + }); + + expect(stack).toHaveResource('AWS::KMS::Key'); + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: arrayWith( + 'kms:Encrypt', + 'kms:Decrypt', + ), + Resource: { + 'Fn::GetAtt': [ + 'DeliveryStreamKey56A6407F', + 'Arn', + ], + }, + }, + ], + }, + Roles: [{ Ref: 'DeliveryStreamServiceRole964EEBCC' }], + }); + expect(stack).toHaveResource('AWS::KinesisFirehose::DeliveryStream', { + DeliveryStreamType: 'DirectPut', + DeliveryStreamEncryptionConfigurationInput: { + KeyARN: { + 'Fn::GetAtt': [ + 'DeliveryStreamKey56A6407F', + 'Arn', + ], + }, + KeyType: 'CUSTOMER_MANAGED_CMK', + }, + }); + }); + + test('providing encryption key creates configuration', () => { + const key = new kms.Key(stack, 'Key'); + + new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + encryptionKey: key, + }); + + expect(stack).toHaveResource('AWS::KMS::Key'); + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: arrayWith( + 'kms:Encrypt', + 'kms:Decrypt', + ), + Resource: { + 'Fn::GetAtt': [ + 'Key961B73FD', + 'Arn', + ], + }, + }, + ], + }, + Roles: [{ Ref: 'DeliveryStreamServiceRole964EEBCC' }], + }); + expect(stack).toHaveResource('AWS::KinesisFirehose::DeliveryStream', { + DeliveryStreamType: 'DirectPut', + DeliveryStreamEncryptionConfigurationInput: { + KeyARN: stack.resolve(key.keyArn), + KeyType: 'CUSTOMER_MANAGED_CMK', + }, + }); + }); + + test('requesting AWS-owned key does not create key and creates configuration', () => { + new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + encryption: firehose.StreamEncryption.AWS_OWNED, + }); + + expect(stack).not.toHaveResource('AWS::KMS::Key'); + expect(stack).not.toHaveResourceLike('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: arrayWith( + 'kms:Encrypt', + 'kms:Decrypt', + ), + }, + ], + }, + Roles: [{ Ref: 'DeliveryStreamServiceRole964EEBCC' }], + }); + expect(stack).toHaveResourceLike('AWS::KinesisFirehose::DeliveryStream', { + DeliveryStreamType: 'DirectPut', + DeliveryStreamEncryptionConfigurationInput: { + KeyARN: ABSENT, + KeyType: 'AWS_OWNED_CMK', + }, + }); + }); + + test('requesting no encryption creates no configuration', () => { + new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + encryption: firehose.StreamEncryption.UNENCRYPTED, + }); + + expect(stack).not.toHaveResource('AWS::KMS::Key'); + expect(stack).not.toHaveResourceLike('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: arrayWith( + 'kms:Encrypt', + 'kms:Decrypt', + ), + }, + ], + }, + Roles: [{ Ref: 'DeliveryStreamServiceRole964EEBCC' }], + }); + expect(stack).toHaveResourceLike('AWS::KinesisFirehose::DeliveryStream', { + DeliveryStreamType: 'DirectPut', + DeliveryStreamEncryptionConfigurationInput: ABSENT, + }); + }); + + test('requesting AWS-owned key and providing a key throws an error', () => { + const key = new kms.Key(stack, 'Key'); + + expect(() => new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + encryption: firehose.StreamEncryption.AWS_OWNED, + encryptionKey: key, + })).toThrowError('Specified stream encryption as AWS_OWNED but provided a customer-managed key'); + }); + + test('requesting no encryption and providing a key throws an error', () => { + const key = new kms.Key(stack, 'Key'); + + expect(() => new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + encryption: firehose.StreamEncryption.UNENCRYPTED, + encryptionKey: key, + })).toThrowError('Specified stream encryption as UNENCRYPTED but provided a customer-managed key'); + }); + + test('grant provides access to stream', () => { + const role = new iam.Role(stack, 'Role', { + assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), + }); + const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + deliveryStream.grant(role, 'firehose:PutRecord'); + + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: 'firehose:PutRecord', + Resource: stack.resolve(deliveryStream.deliveryStreamArn), + }, + ], + }, + Roles: [stack.resolve(role.roleName)], + }); + }); + + test('grantPutRecords provides PutRecord* access to stream', () => { + const role = new iam.Role(stack, 'Role', { + assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), + }); + const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + deliveryStream.grantPutRecords(role); + + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: [ + 'firehose:PutRecord', + 'firehose:PutRecordBatch', + ], + Resource: stack.resolve(deliveryStream.deliveryStreamArn), + }, + ], + }, + Roles: [stack.resolve(role.roleName)], + }); + }); + + test('supplying 0 or multiple destinations throws', () => { + expect(() => new firehose.DeliveryStream(stack, 'No Destinations', { + destinations: [], + })).toThrowError(/Only one destination is allowed per delivery stream/); + expect(() => new firehose.DeliveryStream(stack, 'Too Many Destinations', { + destinations: [mockS3Destination, mockS3Destination], + })).toThrowError(/Only one destination is allowed per delivery stream/); + }); + + describe('metric methods provide a Metric with configured and attached properties', () => { + beforeEach(() => { + stack = new cdk.Stack(undefined, undefined, { env: { account: '000000000000', region: 'us-west-1' } }); + }); + + test('metric', () => { + const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + const metric = deliveryStream.metric('IncomingRecords'); + + expect(metric).toMatchObject({ + account: stack.account, + region: stack.region, + namespace: 'AWS/Firehose', + metricName: 'IncomingRecords', + dimensions: { + DeliveryStreamName: deliveryStream.deliveryStreamName, + }, + }); + }); + + test('metricIncomingBytes', () => { + const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + const metric = deliveryStream.metricIncomingBytes(); + + expect(metric).toMatchObject({ + account: stack.account, + region: stack.region, + namespace: 'AWS/Firehose', + metricName: 'IncomingBytes', + statistic: cloudwatch.Statistic.AVERAGE, + dimensions: { + DeliveryStreamName: deliveryStream.deliveryStreamName, + }, + }); + }); + + test('metricIncomingRecords', () => { + const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + const metric = deliveryStream.metricIncomingRecords(); + + expect(metric).toMatchObject({ + account: stack.account, + region: stack.region, + namespace: 'AWS/Firehose', + metricName: 'IncomingRecords', + statistic: cloudwatch.Statistic.AVERAGE, + dimensions: { + DeliveryStreamName: deliveryStream.deliveryStreamName, + }, + }); + }); + + test('metricBackupToS3Bytes', () => { + const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + const metric = deliveryStream.metricBackupToS3Bytes(); + + expect(metric).toMatchObject({ + account: stack.account, + region: stack.region, + namespace: 'AWS/Firehose', + metricName: 'BackupToS3.Bytes', + statistic: cloudwatch.Statistic.AVERAGE, + dimensions: { + DeliveryStreamName: deliveryStream.deliveryStreamName, + }, + }); + }); + + test('metricBackupToS3DataFreshness', () => { + const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + const metric = deliveryStream.metricBackupToS3DataFreshness(); + + expect(metric).toMatchObject({ + account: stack.account, + region: stack.region, + namespace: 'AWS/Firehose', + metricName: 'BackupToS3.DataFreshness', + statistic: cloudwatch.Statistic.AVERAGE, + dimensions: { + DeliveryStreamName: deliveryStream.deliveryStreamName, + }, + }); + }); + + test('metricBackupToS3Records', () => { + const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + const metric = deliveryStream.metricBackupToS3Records(); + + expect(metric).toMatchObject({ + account: stack.account, + region: stack.region, + namespace: 'AWS/Firehose', + metricName: 'BackupToS3.Records', + statistic: cloudwatch.Statistic.AVERAGE, + dimensions: { + DeliveryStreamName: deliveryStream.deliveryStreamName, + }, + }); + }); + }); + + test('allows connections for Firehose IP addresses using map when region not specified', () => { + const vpc = new ec2.Vpc(stack, 'VPC'); + const securityGroup = new ec2.SecurityGroup(stack, 'Security Group', { vpc }); + const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + securityGroup.connections.allowFrom(deliveryStream, ec2.Port.allTcp()); + + expect(stack).toHaveResourceLike('AWS::EC2::SecurityGroup', { + SecurityGroupIngress: [ + { + CidrIp: { + 'Fn::FindInMap': [ + 'DeliveryStreamFirehoseCIDRMappingE9233479', + { + Ref: 'AWS::Region', + }, + 'FirehoseCidrBlock', + ], + }, + }, + ], + }); + }); + + test('allows connections for Firehose IP addresses using literal when region specified', () => { + stack = new cdk.Stack(undefined, undefined, { env: { region: 'us-west-1' } }); + const vpc = new ec2.Vpc(stack, 'VPC'); + const securityGroup = new ec2.SecurityGroup(stack, 'Security Group', { vpc }); + const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + securityGroup.connections.allowFrom(deliveryStream, ec2.Port.allTcp()); + + expect(stack).toHaveResourceLike('AWS::EC2::SecurityGroup', { + SecurityGroupIngress: [ + { + CidrIp: '13.57.135.192/27', + }, + ], + }); + }); + + test('can add tags', () => { + const deliveryStream = new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], + }); + + cdk.Tags.of(deliveryStream).add('tagKey', 'tagValue'); + + expect(stack).toHaveResource('AWS::KinesisFirehose::DeliveryStream', { + Tags: [ + { + Key: 'tagKey', + Value: 'tagValue', + }, + ], + }); + }); + + describe('importing', () => { + test('from name', () => { + const deliveryStream = firehose.DeliveryStream.fromDeliveryStreamName(stack, 'DeliveryStream', 'mydeliverystream'); + + expect(deliveryStream.deliveryStreamName).toBe('mydeliverystream'); + expect(stack.resolve(deliveryStream.deliveryStreamArn)).toStrictEqual({ + 'Fn::Join': ['', ['arn:', stack.resolve(stack.partition), ':firehose:', stack.resolve(stack.region), ':', stack.resolve(stack.account), ':deliverystream/mydeliverystream']], + }); + expect(deliveryStream.grantPrincipal).toBeInstanceOf(iam.UnknownPrincipal); + }); + + test('from ARN', () => { + const deliveryStream = firehose.DeliveryStream.fromDeliveryStreamArn(stack, 'DeliveryStream', 'arn:aws:firehose:xx-west-1:111122223333:deliverystream/mydeliverystream'); + + expect(deliveryStream.deliveryStreamName).toBe('mydeliverystream'); + expect(deliveryStream.deliveryStreamArn).toBe('arn:aws:firehose:xx-west-1:111122223333:deliverystream/mydeliverystream'); + expect(deliveryStream.grantPrincipal).toBeInstanceOf(iam.UnknownPrincipal); + }); + + test('from attributes (just name)', () => { + const deliveryStream = firehose.DeliveryStream.fromDeliveryStreamAttributes(stack, 'DeliveryStream', { deliveryStreamName: 'mydeliverystream' }); + + expect(deliveryStream.deliveryStreamName).toBe('mydeliverystream'); + expect(stack.resolve(deliveryStream.deliveryStreamArn)).toStrictEqual({ + 'Fn::Join': ['', ['arn:', stack.resolve(stack.partition), ':firehose:', stack.resolve(stack.region), ':', stack.resolve(stack.account), ':deliverystream/mydeliverystream']], + }); + expect(deliveryStream.grantPrincipal).toBeInstanceOf(iam.UnknownPrincipal); + }); + + test('from attributes (just ARN)', () => { + const deliveryStream = firehose.DeliveryStream.fromDeliveryStreamAttributes(stack, 'DeliveryStream', { deliveryStreamArn: 'arn:aws:firehose:xx-west-1:111122223333:deliverystream/mydeliverystream' }); + + expect(deliveryStream.deliveryStreamName).toBe('mydeliverystream'); + expect(deliveryStream.deliveryStreamArn).toBe('arn:aws:firehose:xx-west-1:111122223333:deliverystream/mydeliverystream'); + expect(deliveryStream.grantPrincipal).toBeInstanceOf(iam.UnknownPrincipal); + }); + + test('from attributes (with role)', () => { + const role = iam.Role.fromRoleArn(stack, 'Delivery Stream Role', 'arn:aws:iam::111122223333:role/DeliveryStreamRole'); + const deliveryStream = firehose.DeliveryStream.fromDeliveryStreamAttributes(stack, 'DeliveryStream', { deliveryStreamName: 'mydeliverystream', role }); + + expect(deliveryStream.deliveryStreamName).toBe('mydeliverystream'); + expect(stack.resolve(deliveryStream.deliveryStreamArn)).toStrictEqual({ + 'Fn::Join': ['', ['arn:', stack.resolve(stack.partition), ':firehose:', stack.resolve(stack.region), ':', stack.resolve(stack.account), ':deliverystream/mydeliverystream']], + }); + expect(deliveryStream.grantPrincipal).toBe(role); + }); + + test('throws when malformatted ARN', () => { + expect(() => firehose.DeliveryStream.fromDeliveryStreamAttributes(stack, 'DeliveryStream', { deliveryStreamArn: 'arn:aws:firehose:xx-west-1:111122223333:deliverystream/' })) + .toThrowError(/Could not import delivery stream from malformatted ARN/); + }); + + test('throws when without name or ARN', () => { + expect(() => firehose.DeliveryStream.fromDeliveryStreamAttributes(stack, 'DeliveryStream', {})) + .toThrowError('Either deliveryStreamName or deliveryStreamArn must be provided in DeliveryStreamAttributes'); + }); + }); +}); diff --git a/packages/@aws-cdk/aws-kinesisfirehose/test/destination.test.ts b/packages/@aws-cdk/aws-kinesisfirehose/test/destination.test.ts new file mode 100644 index 0000000000000..41cc3db17df32 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose/test/destination.test.ts @@ -0,0 +1,619 @@ +import '@aws-cdk/assert-internal/jest'; +import { arrayWith, objectLike } from '@aws-cdk/assert-internal'; +import * as iam from '@aws-cdk/aws-iam'; +import * as kms from '@aws-cdk/aws-kms'; +import * as lambda from '@aws-cdk/aws-lambda'; +import * as logs from '@aws-cdk/aws-logs'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as cdk from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import * as firehose from '../lib'; + +describe('destination', () => { + let stack: cdk.Stack; + let deliveryStreamRole: iam.IRole; + let deliveryStream: firehose.IDeliveryStream; + + const deliveryStreamRoleArn = 'arn:aws:iam::111122223333:role/DeliveryStreamRole'; + + beforeEach(() => { + stack = new cdk.Stack(); + deliveryStreamRole = iam.Role.fromRoleArn(stack, 'Delivery Stream Role', deliveryStreamRoleArn); + deliveryStream = firehose.DeliveryStream.fromDeliveryStreamAttributes(stack, 'Delivery Stream', { + deliveryStreamName: 'mydeliverystream', + role: deliveryStreamRole, + }); + }); + + describe('createLoggingOptions', () => { + class LoggingDestination extends firehose.DestinationBase { + public bind(scope: Construct, options: firehose.DestinationBindOptions): firehose.DestinationConfig { + return { + properties: { + testDestinationConfig: { + loggingConfig: this.createLoggingOptions(scope, options.deliveryStream, 'streamId'), + }, + }, + }; + } + } + + test('creates resources and configuration by default', () => { + const testDestination = new LoggingDestination(); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack).toHaveResource('AWS::Logs::LogGroup'); + expect(stack).toHaveResource('AWS::Logs::LogStream'); + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: { + loggingConfig: { + enabled: true, + logGroupName: { + Ref: 'LogGroupF5B46931', + }, + logStreamName: { + Ref: 'LogGroupstreamId3B940622', + }, + }, + }, + }, + }); + }); + test('does not create resources or configuration if disabled', () => { + const testDestination = new LoggingDestination({ logging: false }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: {}, + }, + }); + }); + + test('creates configuration if log group provided', () => { + const testDestination = new LoggingDestination({ logGroup: new logs.LogGroup(stack, 'Log Group') }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toMatchObject({ + properties: { + testDestinationConfig: { + loggingConfig: { + enabled: true, + }, + }, + }, + }); + }); + + test('throws error if logging disabled but log group provided', () => { + const testDestination = new LoggingDestination({ logging: false, logGroup: new logs.LogGroup(stack, 'Log Group') }); + + expect(() => testDestination.bind(stack, { deliveryStream })).toThrowError('logging cannot be set to false when logGroup is provided'); + }); + + test('uses provided log group', () => { + const testDestination = new LoggingDestination({ logGroup: new logs.LogGroup(stack, 'Log Group') }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack).toCountResources('AWS::Logs::LogGroup', 1); + expect(stack.resolve(testDestinationConfig)).toMatchObject({ + properties: { + testDestinationConfig: { + loggingConfig: { + enabled: true, + logGroupName: { + Ref: 'LogGroupD9735569', + }, + logStreamName: { + Ref: 'LogGroupstreamIdA1293DC2', + }, + }, + }, + }, + }); + }); + + test('re-uses log group if called multiple times', () => { + const testDestination = new class extends firehose.DestinationBase { + public bind(scope: Construct, options: firehose.DestinationBindOptions): firehose.DestinationConfig { + return { + properties: { + testDestinationConfig: { + loggingConfig: this.createLoggingOptions(scope, options.deliveryStream, 'streamId'), + anotherLoggingConfig: this.createLoggingOptions(scope, options.deliveryStream, 'anotherStreamId'), + }, + }, + }; + } + }(); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack).toCountResources('AWS::Logs::LogGroup', 1); + expect(stack.resolve(testDestinationConfig)).toMatchObject({ + properties: { + testDestinationConfig: { + loggingConfig: { + logGroupName: { + Ref: 'LogGroupF5B46931', + }, + logStreamName: { + Ref: 'LogGroupstreamId3B940622', + }, + }, + anotherLoggingConfig: { + logGroupName: { + Ref: 'LogGroupF5B46931', + }, + logStreamName: { + Ref: 'LogGroupanotherStreamIdF2754481', + }, + }, + }, + }, + }); + }); + }); + + describe('createBackupConfig', () => { + class BackupDestination extends firehose.DestinationBase { + public bind(scope: Construct, options: firehose.DestinationBindOptions): firehose.DestinationConfig { + return { + properties: { + testDestinationConfig: { + backupConfig: this.createBackupConfig(scope, options.deliveryStream), + }, + }, + }; + } + } + + test('does not create resources or configuration if no backupConfiguration provided', () => { + const testDestination = new BackupDestination(); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: {}, + }, + }); + }); + + test('does not create resources or configuration if backupConfiguration is empty', () => { + const testDestination = new BackupDestination({ backupConfiguration: {} }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: {}, + }, + }); + }); + + test('create resources and configuration if explicitly enabled', () => { + const testDestination = new BackupDestination({ backupConfiguration: { backupMode: firehose.BackupMode.ALL } }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: { + backupConfig: { + bucketArn: { 'Fn::GetAtt': ['BackupBucket26B8E51C', 'Arn'] }, + roleArn: deliveryStreamRoleArn, + encryptionConfiguration: { + noEncryptionConfig: 'NoEncryption', + }, + cloudWatchLoggingOptions: { + enabled: true, + logGroupName: { + Ref: 'BackupLogGroupB15A0768', + }, + logStreamName: { + Ref: 'BackupLogGroupS3BackupA7B3FB1E', + }, + }, + }, + }, + }, + }); + }); + + test('allows custom prefixes', () => { + const testDestination = new BackupDestination({ + backupConfiguration: { + prefix: 'custom-prefix', + errorOutputPrefix: 'custom-error-prefix', + backupMode: firehose.BackupMode.ALL, + }, + }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: { + backupConfig: { + bucketArn: { 'Fn::GetAtt': ['BackupBucket26B8E51C', 'Arn'] }, + roleArn: deliveryStreamRoleArn, + encryptionConfiguration: { + noEncryptionConfig: 'NoEncryption', + }, + prefix: 'custom-prefix', + errorOutputPrefix: 'custom-error-prefix', + cloudWatchLoggingOptions: { + enabled: true, + logGroupName: { + Ref: 'BackupLogGroupB15A0768', + }, + logStreamName: { + Ref: 'BackupLogGroupS3BackupA7B3FB1E', + }, + }, + }, + }, + }, + }); + }); + + test('allows compression', () => { + const testDestination = new BackupDestination({ + backupConfiguration: { + compression: firehose.Compression.GZIP, + backupMode: firehose.BackupMode.ALL, + }, + }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: { + backupConfig: { + bucketArn: { 'Fn::GetAtt': ['BackupBucket26B8E51C', 'Arn'] }, + roleArn: deliveryStreamRoleArn, + encryptionConfiguration: { + noEncryptionConfig: 'NoEncryption', + }, + compressionFormat: 'GZIP', + cloudWatchLoggingOptions: { + enabled: true, + logGroupName: { + Ref: 'BackupLogGroupB15A0768', + }, + logStreamName: { + Ref: 'BackupLogGroupS3BackupA7B3FB1E', + }, + }, + }, + }, + }, + }); + }); + + test('allows encryption', () => { + const encryptionKeyArn = 'arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab'; + const encryptionKey = kms.Key.fromKeyArn(stack, 'Backup Key', encryptionKeyArn); + const testDestination = new BackupDestination({ + backupConfiguration: { + encryptionKey, + backupMode: firehose.BackupMode.ALL, + }, + }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: { + backupConfig: { + bucketArn: { 'Fn::GetAtt': ['BackupBucket26B8E51C', 'Arn'] }, + roleArn: deliveryStreamRoleArn, + encryptionConfiguration: { + kmsEncryptionConfig: { + awskmsKeyArn: encryptionKeyArn, + }, + }, + cloudWatchLoggingOptions: { + enabled: true, + logGroupName: { + Ref: 'BackupLogGroupB15A0768', + }, + logStreamName: { + Ref: 'BackupLogGroupS3BackupA7B3FB1E', + }, + }, + }, + }, + }, + }); + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { + PolicyDocument: { + Statement: arrayWith(objectLike({ + Action: arrayWith( + 'kms:Encrypt', + 'kms:Decrypt', + ), + Resource: encryptionKeyArn, + })), + }, + Roles: ['DeliveryStreamRole'], + }); + }); + + test('creates configuration using bucket if provided', () => { + const testDestination = new BackupDestination({ backupConfiguration: { backupBucket: new s3.Bucket(stack, 'Bucket') } }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack).toCountResources('AWS::S3::Bucket', 1); + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: { + backupConfig: { + bucketArn: { 'Fn::GetAtt': ['Bucket83908E77', 'Arn'] }, + roleArn: deliveryStreamRoleArn, + encryptionConfiguration: { + noEncryptionConfig: 'NoEncryption', + }, + cloudWatchLoggingOptions: { + enabled: true, + logGroupName: { + Ref: 'BackupLogGroupB15A0768', + }, + logStreamName: { + Ref: 'BackupLogGroupS3BackupA7B3FB1E', + }, + }, + }, + }, + }, + }); + }); + + test('throws error if backup disabled and bucket provided', () => { + const testDestination = new BackupDestination({ backupConfiguration: { backupMode: firehose.BackupMode.DISABLED, backupBucket: new s3.Bucket(stack, 'Bucket') } }); + + expect(() => testDestination.bind(stack, { deliveryStream })).toThrowError('Destination backup cannot be set to DISABLED when backupBucket is provided'); + }); + + test('can configure backup prefix', () => { + const testDestination = new BackupDestination({ backupConfiguration: { backupMode: firehose.BackupMode.ALL, prefix: 'backupPrefix' } }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toMatchObject({ + properties: { + testDestinationConfig: { + backupConfig: { + prefix: 'backupPrefix', + }, + }, + }, + }); + }); + }); + + describe('createProcessingConfig', () => { + class ProcessingDestination extends firehose.DestinationBase { + public bind(_scope: Construct, options: firehose.DestinationBindOptions): firehose.DestinationConfig { + return { + properties: { + testDestinationConfig: { + processingConfig: this.createProcessingConfig(options.deliveryStream), + }, + }, + }; + } + } + + let lambdaFunction: lambda.IFunction; + const lambdaFunctionArn = 'arn:aws:lambda:us-west-1:111122223333:function:my-function'; + beforeEach(() => { + lambdaFunction = lambda.Function.fromFunctionAttributes(stack, 'Processor', { + functionArn: lambdaFunctionArn, + sameEnvironment: true, + }); + }); + + test('does not create configuration by default', () => { + const testDestination = new ProcessingDestination(); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: {}, + }, + }); + }); + + test('does not create configuration if processors array is empty', () => { + const testDestination = new ProcessingDestination({ processors: [] }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: {}, + }, + }); + }); + + test('creates configuration if a processor is specified with only required parameters', () => { + const testDestination = new ProcessingDestination({ processors: [new firehose.LambdaFunctionProcessor(lambdaFunction, {})] }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: { + processingConfig: { + enabled: true, + processors: [ + { + parameters: [ + { + parameterName: 'RoleArn', + parameterValue: deliveryStreamRoleArn, + }, + { + parameterName: 'LambdaArn', + parameterValue: lambdaFunctionArn, + }, + ], + type: 'Lambda', + }, + ], + }, + }, + }, + }); + }); + + test('creates configuration if a processor is specified with optional parameters', () => { + const testDestination = new ProcessingDestination({ + processors: [ + new firehose.LambdaFunctionProcessor(lambdaFunction, { + bufferInterval: cdk.Duration.minutes(1), + bufferSize: cdk.Size.kibibytes(1024), + retries: 1, + }), + ], + }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: { + processingConfig: { + enabled: true, + processors: [ + { + parameters: [ + { + parameterName: 'RoleArn', + parameterValue: deliveryStreamRoleArn, + }, + { + parameterName: 'LambdaArn', + parameterValue: lambdaFunctionArn, + }, + { + parameterName: 'BufferIntervalInSeconds', + parameterValue: '60', + }, + { + parameterName: 'BufferSizeInMBs', + parameterValue: '1', + }, + { + parameterName: 'NumberOfRetries', + parameterValue: '1', + }, + ], + type: 'Lambda', + }, + ], + }, + }, + }, + }); + }); + + test('throws an error if multiple processors are specified', () => { + const testDestination = new ProcessingDestination({ + processors: [new firehose.LambdaFunctionProcessor(lambdaFunction), new firehose.LambdaFunctionProcessor(lambdaFunction)], + }); + + expect(() => testDestination.bind(stack, { deliveryStream })).toThrowError('Only one processor is allowed per delivery stream destination'); + }); + }); + + describe('createBufferingHints', () => { + class BufferingDestination extends firehose.DestinationBase { + constructor(protected readonly props: firehose.DestinationProps = {}) { + super(props); + } + + public bind(_scope: Construct, _options: firehose.DestinationBindOptions): firehose.DestinationConfig { + return { + properties: { + testDestinationConfig: { + bufferingConfig: this.createBufferingHints(this.props.backupConfiguration?.bufferingInterval, + this.props.backupConfiguration?.bufferingSize), + }, + }, + }; + } + } + + test('does not create configuration by default', () => { + const testDestination = new BufferingDestination(); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: {}, + }, + }); + }); + + test('creates configuration when interval and size provided', () => { + const testDestination = new BufferingDestination({ + backupConfiguration: { bufferingInterval: cdk.Duration.minutes(1), bufferingSize: cdk.Size.kibibytes(1024) }, + }); + + const testDestinationConfig = testDestination.bind(stack, { deliveryStream }); + + expect(stack.resolve(testDestinationConfig)).toStrictEqual({ + properties: { + testDestinationConfig: { + bufferingConfig: { + intervalInSeconds: 60, + sizeInMBs: 1, + }, + }, + }, + }); + }); + + test('throws when only one of interval and size provided', () => { + expect(() => new BufferingDestination({ backupConfiguration: { bufferingInterval: cdk.Duration.minutes(1) } }).bind(stack, { deliveryStream })) + .toThrowError('If bufferingInterval is specified, bufferingSize must also be specified'); + expect(() => new BufferingDestination({ backupConfiguration: { bufferingSize: cdk.Size.kibibytes(1024) } }).bind(stack, { deliveryStream })) + .toThrowError('If bufferingSize is specified, bufferingInterval must also be specified'); + }); + + test('validates bufferingInterval', () => { + expect(() => new BufferingDestination({ + backupConfiguration: { bufferingInterval: cdk.Duration.seconds(30), bufferingSize: cdk.Size.mebibytes(1) }, + }).bind(stack, { deliveryStream })) + .toThrowError('Buffering interval must be between 60 and 900 seconds'); + expect(() => new BufferingDestination({ + backupConfiguration: { bufferingInterval: cdk.Duration.minutes(16), bufferingSize: cdk.Size.mebibytes(1) }, + }).bind(stack, { deliveryStream })) + .toThrowError('Buffering interval must be between 60 and 900 seconds'); + }); + + test('validates bufferingSize', () => { + expect(() => new BufferingDestination({ + backupConfiguration: { bufferingSize: cdk.Size.mebibytes(0), bufferingInterval: cdk.Duration.minutes(1) }, + }).bind(stack, { deliveryStream })) + .toThrowError('Buffering size must be between 1 and 128 MBs'); + expect(() => new BufferingDestination({ + backupConfiguration: { bufferingSize: cdk.Size.mebibytes(256), bufferingInterval: cdk.Duration.minutes(1) }, + }).bind(stack, { deliveryStream })) + .toThrowError('Buffering size must be between 1 and 128 MBs'); + }); + }); +}); diff --git a/packages/@aws-cdk/aws-kinesisfirehose/test/integ.delivery-stream.expected.json b/packages/@aws-cdk/aws-kinesisfirehose/test/integ.delivery-stream.expected.json new file mode 100644 index 0000000000000..2d783a749781a --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose/test/integ.delivery-stream.expected.json @@ -0,0 +1,145 @@ +{ + "Resources": { + "Bucket83908E77": { + "Type": "AWS::S3::Bucket", + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "Role1ABCC5F0": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "firehose.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + } + } + }, + "DeliveryStreamServiceRole964EEBCC": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "firehose.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + } + } + }, + "DeliveryStreamF6D5572D": { + "Type": "AWS::KinesisFirehose::DeliveryStream", + "Properties": { + "DeliveryStreamType": "DirectPut", + "S3DestinationConfiguration": { + "BucketARN": { + "Fn::GetAtt": [ + "Bucket83908E77", + "Arn" + ] + }, + "RoleARN": { + "Fn::GetAtt": [ + "Role1ABCC5F0", + "Arn" + ] + } + } + }, + "DependsOn": [ + "DeliveryStreamServiceRole964EEBCC" + ] + } + }, + "Mappings": { + "DeliveryStreamFirehoseCIDRMappingE9233479": { + "af-south-1": { + "FirehoseCidrBlock": "13.244.121.224/27" + }, + "ap-east-1": { + "FirehoseCidrBlock": "18.162.221.32/27" + }, + "ap-northeast-1": { + "FirehoseCidrBlock": "13.113.196.224/27" + }, + "ap-northeast-2": { + "FirehoseCidrBlock": "13.209.1.64/27" + }, + "ap-northeast-3": { + "FirehoseCidrBlock": "13.208.177.192/27" + }, + "ap-south-1": { + "FirehoseCidrBlock": "13.232.67.32/27" + }, + "ap-southeast-1": { + "FirehoseCidrBlock": "13.228.64.192/27" + }, + "ap-southeast-2": { + "FirehoseCidrBlock": "13.210.67.224/27" + }, + "ca-central-1": { + "FirehoseCidrBlock": "35.183.92.128/27" + }, + "cn-north-1": { + "FirehoseCidrBlock": "52.81.151.32/27" + }, + "cn-northwest-1": { + "FirehoseCidrBlock": "161.189.23.64/27" + }, + "eu-central-1": { + "FirehoseCidrBlock": "35.158.127.160/27" + }, + "eu-north-1": { + "FirehoseCidrBlock": "13.53.63.224/27" + }, + "eu-south-1": { + "FirehoseCidrBlock": "15.161.135.128/27" + }, + "eu-west-1": { + "FirehoseCidrBlock": "52.19.239.192/27" + }, + "eu-west-2": { + "FirehoseCidrBlock": "18.130.1.96/27" + }, + "eu-west-3": { + "FirehoseCidrBlock": "35.180.1.96/27" + }, + "me-south-1": { + "FirehoseCidrBlock": "15.185.91.0/27" + }, + "sa-east-1": { + "FirehoseCidrBlock": "18.228.1.128/27" + }, + "us-east-1": { + "FirehoseCidrBlock": "52.70.63.192/27" + }, + "us-east-2": { + "FirehoseCidrBlock": "13.58.135.96/27" + }, + "us-gov-east-1": { + "FirehoseCidrBlock": "18.253.138.96/27" + }, + "us-gov-west-1": { + "FirehoseCidrBlock": "52.61.204.160/27" + }, + "us-west-1": { + "FirehoseCidrBlock": "13.57.135.192/27" + }, + "us-west-2": { + "FirehoseCidrBlock": "52.89.255.224/27" + } + } + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-kinesisfirehose/test/integ.delivery-stream.ts b/packages/@aws-cdk/aws-kinesisfirehose/test/integ.delivery-stream.ts new file mode 100644 index 0000000000000..7bcbb788399c8 --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose/test/integ.delivery-stream.ts @@ -0,0 +1,37 @@ +#!/usr/bin/env node +import * as iam from '@aws-cdk/aws-iam'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as cdk from '@aws-cdk/core'; +import * as constructs from 'constructs'; +import * as firehose from '../lib'; + +const app = new cdk.App(); + +const stack = new cdk.Stack(app, 'aws-cdk-firehose-delivery-stream'); + +const bucket = new s3.Bucket(stack, 'Bucket', { + removalPolicy: cdk.RemovalPolicy.DESTROY, +}); + +const role = new iam.Role(stack, 'Role', { + assumedBy: new iam.ServicePrincipal('firehose.amazonaws.com'), +}); + +const mockS3Destination: firehose.IDestination = { + bind(_scope: constructs.Construct, _options: firehose.DestinationBindOptions): firehose.DestinationConfig { + return { + properties: { + s3DestinationConfiguration: { + bucketArn: bucket.bucketArn, + roleArn: role.roleArn, + }, + }, + }; + }, +}; + +new firehose.DeliveryStream(stack, 'Delivery Stream', { + destinations: [mockS3Destination], +}); + +app.synth(); diff --git a/packages/@aws-cdk/aws-kinesisfirehose/test/kinesisfirehose.test.ts b/packages/@aws-cdk/aws-kinesisfirehose/test/kinesisfirehose.test.ts deleted file mode 100644 index c4505ad966984..0000000000000 --- a/packages/@aws-cdk/aws-kinesisfirehose/test/kinesisfirehose.test.ts +++ /dev/null @@ -1,6 +0,0 @@ -import '@aws-cdk/assert-internal/jest'; -import {} from '../lib'; - -test('No tests are specified for this package', () => { - expect(true).toBe(true); -}); diff --git a/packages/@aws-cdk/aws-kinesisfirehose/test/processor.test.ts b/packages/@aws-cdk/aws-kinesisfirehose/test/processor.test.ts new file mode 100644 index 0000000000000..c1c9628e795eb --- /dev/null +++ b/packages/@aws-cdk/aws-kinesisfirehose/test/processor.test.ts @@ -0,0 +1,60 @@ +import '@aws-cdk/assert-internal/jest'; +import * as iam from '@aws-cdk/aws-iam'; +import * as lambda from '@aws-cdk/aws-lambda'; +import * as cdk from '@aws-cdk/core'; +import * as firehose from '../lib'; +import { LambdaFunctionProcessor } from '../lib/processor'; + +describe('processor', () => { + let stack: cdk.Stack; + let deliveryStream: firehose.IDeliveryStream; + + beforeEach(() => { + stack = new cdk.Stack(); + deliveryStream = firehose.DeliveryStream.fromDeliveryStreamAttributes(stack, 'Delivery Stream', { + deliveryStreamName: 'mydeliverystream', + role: iam.Role.fromRoleArn(stack, 'Delivery Stream Role', 'arn:aws:iam::111122223333:role/DeliveryStreamRole'), + }); + }); + + describe('createProcessingConfig', () => { + let lambdaFunction: lambda.IFunction; + const functionArn = 'arn:aws:lambda:xx-west-1:111122223333:function:my-function'; + beforeEach(() => { + lambdaFunction = lambda.Function.fromFunctionAttributes(stack, 'Processor', { + functionArn: functionArn, + sameEnvironment: true, + }); + }); + + test('correctly sets processor type and identifier', () => { + const processor = new LambdaFunctionProcessor(lambdaFunction); + + const processorConfig = processor.bind(deliveryStream); + + expect(stack.resolve(processorConfig)).toStrictEqual({ + processorType: 'Lambda', + processorIdentifier: { + parameterName: 'LambdaArn', + parameterValue: functionArn, + }, + }); + }); + + test('passes configuration through', () => { + const processor = new LambdaFunctionProcessor(lambdaFunction, { + bufferInterval: cdk.Duration.minutes(10), + bufferSize: cdk.Size.mebibytes(64), + retries: 5, + }); + + const processorConfig = processor.bind(deliveryStream); + + expect(stack.resolve(processorConfig)).toMatchObject({ + bufferInterval: cdk.Duration.minutes(10), + bufferSize: cdk.Size.mebibytes(64), + retries: 5, + }); + }); + }); +}); diff --git a/packages/@aws-cdk/region-info/build-tools/fact-tables.ts b/packages/@aws-cdk/region-info/build-tools/fact-tables.ts index c2ce689f3aaf3..28ec007b00c84 100644 --- a/packages/@aws-cdk/region-info/build-tools/fact-tables.ts +++ b/packages/@aws-cdk/region-info/build-tools/fact-tables.ts @@ -158,3 +158,32 @@ export const APPMESH_ECR_ACCOUNTS: { [region: string]: string } = { 'us-west-1': '840364872350', 'us-west-2': '840364872350', }; + +// https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-rs-vpc +export const FIREHOSE_CIDR_BLOCKS: { [region: string]: string } = { + 'af-south-1': '13.244.121.224', + 'ap-east-1': '18.162.221.32', + 'ap-northeast-1': '13.113.196.224', + 'ap-northeast-2': '13.209.1.64', + 'ap-northeast-3': '13.208.177.192', + 'ap-south-1': '13.232.67.32', + 'ap-southeast-1': '13.228.64.192', + 'ap-southeast-2': '13.210.67.224', + 'ca-central-1': '35.183.92.128', + 'cn-north-1': '52.81.151.32', + 'cn-northwest-1': '161.189.23.64', + 'eu-central-1': '35.158.127.160', + 'eu-north-1': '13.53.63.224', + 'eu-south-1': '15.161.135.128', + 'eu-west-1': '52.19.239.192', + 'eu-west-2': '18.130.1.96', + 'eu-west-3': '35.180.1.96', + 'me-south-1': '15.185.91.0', + 'sa-east-1': '18.228.1.128', + 'us-east-1': '52.70.63.192', + 'us-east-2': '13.58.135.96', + 'us-gov-east-1': '18.253.138.96', + 'us-gov-west-1': '52.61.204.160', + 'us-west-1': '13.57.135.192', + 'us-west-2': '52.89.255.224', +}; diff --git a/packages/@aws-cdk/region-info/build-tools/generate-static-data.ts b/packages/@aws-cdk/region-info/build-tools/generate-static-data.ts index d23704b6d0062..63455b72ef665 100644 --- a/packages/@aws-cdk/region-info/build-tools/generate-static-data.ts +++ b/packages/@aws-cdk/region-info/build-tools/generate-static-data.ts @@ -3,14 +3,15 @@ import * as fs from 'fs-extra'; import { Default } from '../lib/default'; import { AWS_REGIONS, AWS_SERVICES } from './aws-entities'; import { - APPMESH_ECR_ACCOUNTS, AWS_CDK_METADATA, AWS_OLDER_REGIONS, DLC_REPOSITORY_ACCOUNTS, ELBV2_ACCOUNTS, PARTITION_MAP, - ROUTE_53_BUCKET_WEBSITE_ZONE_IDS, + APPMESH_ECR_ACCOUNTS, AWS_CDK_METADATA, AWS_OLDER_REGIONS, DLC_REPOSITORY_ACCOUNTS, ELBV2_ACCOUNTS, FIREHOSE_CIDR_BLOCKS, + PARTITION_MAP, ROUTE_53_BUCKET_WEBSITE_ZONE_IDS, } from './fact-tables'; async function main(): Promise { checkRegions(APPMESH_ECR_ACCOUNTS); checkRegions(DLC_REPOSITORY_ACCOUNTS); checkRegions(ELBV2_ACCOUNTS); + checkRegions(FIREHOSE_CIDR_BLOCKS); checkRegions(ROUTE_53_BUCKET_WEBSITE_ZONE_IDS); const lines = [ @@ -61,6 +62,11 @@ async function main(): Promise { registerFact(region, 'APPMESH_ECR_ACCOUNT', APPMESH_ECR_ACCOUNTS[region]); + const firehoseCidrBlock = FIREHOSE_CIDR_BLOCKS[region]; + if (firehoseCidrBlock) { + registerFact(region, 'FIREHOSE_CIDR_BLOCK', `${FIREHOSE_CIDR_BLOCKS[region]}/27`); + } + const vpcEndpointServiceNamePrefix = `${domainSuffix.split('.').reverse().join('.')}.vpce`; registerFact(region, 'VPC_ENDPOINT_SERVICE_NAME_PREFIX', vpcEndpointServiceNamePrefix); diff --git a/packages/@aws-cdk/region-info/lib/fact.ts b/packages/@aws-cdk/region-info/lib/fact.ts index 3b5e57835cc7e..6ccef0e8b794f 100644 --- a/packages/@aws-cdk/region-info/lib/fact.ts +++ b/packages/@aws-cdk/region-info/lib/fact.ts @@ -152,6 +152,11 @@ export class FactName { */ public static readonly APPMESH_ECR_ACCOUNT = 'appMeshRepositoryAccount'; + /** + * The CIDR block used by Kinesis Data Firehose servers. + */ + public static readonly FIREHOSE_CIDR_BLOCK = 'firehoseCidrBlock'; + /** * The name of the regional service principal for a given service. * diff --git a/packages/@aws-cdk/region-info/lib/region-info.ts b/packages/@aws-cdk/region-info/lib/region-info.ts index 042b3cec9c177..9e28120a8da62 100644 --- a/packages/@aws-cdk/region-info/lib/region-info.ts +++ b/packages/@aws-cdk/region-info/lib/region-info.ts @@ -117,4 +117,11 @@ export class RegionInfo { public get appMeshRepositoryAccount(): string | undefined { return Fact.find(this.name, FactName.APPMESH_ECR_ACCOUNT); } + + /** + * The CIDR block used by Kinesis Data Firehose servers. + */ + public get firehoseCidrBlock(): string | undefined { + return Fact.find(this.name, FactName.FIREHOSE_CIDR_BLOCK); + } } diff --git a/packages/aws-cdk-lib/package.json b/packages/aws-cdk-lib/package.json index 8761fbbf8e44e..8451b4e93d3dc 100644 --- a/packages/aws-cdk-lib/package.json +++ b/packages/aws-cdk-lib/package.json @@ -239,6 +239,7 @@ "@aws-cdk/aws-kinesisanalytics": "0.0.0", "@aws-cdk/aws-kinesisanalytics-flink": "0.0.0", "@aws-cdk/aws-kinesisfirehose": "0.0.0", + "@aws-cdk/aws-kinesisfirehose-destinations": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", "@aws-cdk/aws-lakeformation": "0.0.0", "@aws-cdk/aws-lambda": "0.0.0", diff --git a/packages/decdk/package.json b/packages/decdk/package.json index d3db39216e69b..3f80405b71cba 100644 --- a/packages/decdk/package.json +++ b/packages/decdk/package.json @@ -146,6 +146,7 @@ "@aws-cdk/aws-kinesisanalytics": "0.0.0", "@aws-cdk/aws-kinesisanalytics-flink": "0.0.0", "@aws-cdk/aws-kinesisfirehose": "0.0.0", + "@aws-cdk/aws-kinesisfirehose-destinations": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", "@aws-cdk/aws-lakeformation": "0.0.0", "@aws-cdk/aws-lambda": "0.0.0", diff --git a/packages/monocdk/package.json b/packages/monocdk/package.json index a075a584e92bd..159d3d3e44d47 100644 --- a/packages/monocdk/package.json +++ b/packages/monocdk/package.json @@ -240,6 +240,7 @@ "@aws-cdk/aws-kinesisanalytics": "0.0.0", "@aws-cdk/aws-kinesisanalytics-flink": "0.0.0", "@aws-cdk/aws-kinesisfirehose": "0.0.0", + "@aws-cdk/aws-kinesisfirehose-destinations": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", "@aws-cdk/aws-lakeformation": "0.0.0", "@aws-cdk/aws-lambda": "0.0.0", diff --git a/tools/pkglint/lib/rules.ts b/tools/pkglint/lib/rules.ts index a696b43bceabe..add3c58fee4b2 100644 --- a/tools/pkglint/lib/rules.ts +++ b/tools/pkglint/lib/rules.ts @@ -1634,6 +1634,7 @@ export class NoExperimentalDependents extends ValidationRule { ['@aws-cdk/aws-apigatewayv2-integrations', ['@aws-cdk/aws-apigatewayv2']], ['@aws-cdk/aws-apigatewayv2-authorizers', ['@aws-cdk/aws-apigatewayv2']], ['@aws-cdk/aws-events-targets', ['@aws-cdk/aws-kinesisfirehose']], + ['@aws-cdk/aws-kinesisfirehose-destinations', ['@aws-cdk/aws-kinesisfirehose']], ]); private readonly excludedModules = ['@aws-cdk/cloudformation-include'];