diff --git a/.mergify.yml b/.mergify.yml index 96cc7bbb7c21f..9be52c560afde 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -17,7 +17,6 @@ pull_request_rules: method: squash strict_method: merge commit_message: title+body - delete_head_branch: {} conditions: - base!=release - -title~=(WIP|wip) @@ -41,7 +40,6 @@ pull_request_rules: method: squash strict_method: merge commit_message: title+body - delete_head_branch: {} conditions: - base!=release - -title~=(WIP|wip) @@ -67,7 +65,6 @@ pull_request_rules: method: merge strict_method: merge commit_message: title+body - delete_head_branch: {} conditions: - -title~=(WIP|wip) - -label~=(blocked|do-not-merge) @@ -115,7 +112,6 @@ pull_request_rules: # It's not dangerous: GitHub branch protection settings prevent merging stale branches. strict: false method: squash - delete_head_branch: {} conditions: - -title~=(WIP|wip) - -label~=(blocked|do-not-merge) diff --git a/CHANGELOG.md b/CHANGELOG.md index 77f99f927709b..ca071fd1212bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,66 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +## [1.63.0](https://github.com/aws/aws-cdk/compare/v1.62.0...v1.63.0) (2020-09-12) + + +### ⚠ BREAKING CHANGES TO EXPERIMENTAL FEATURES + +* **appsync:** force `apiKeyConfig` require a Expiration class instead of string +- **appsync**: Parameter `apiKeyConfig` takes `Expiration` class instead of `string` +* **core:** custom implementations of `IStackSynthesizer` +must now implement `synthesize()` instead of +`synthesizeStackArtifacts()`. +* **aws-batch:** Changed type of `ComputeResources.computeResourcesTags` from `Tag` to map + +### Features + +* **appsync:** add authorization config to the HttpDataSource ([#10171](https://github.com/aws/aws-cdk/issues/10171)) ([b2cc277](https://github.com/aws/aws-cdk/commit/b2cc277971aed36aa03e720b8fea093ef14bd9be)), closes [#9971](https://github.com/aws/aws-cdk/issues/9971) [#9934](https://github.com/aws/aws-cdk/issues/9934) +* **appsync:** add support for subscriptions for code-first schema generation ([#10078](https://github.com/aws/aws-cdk/issues/10078)) ([65db131](https://github.com/aws/aws-cdk/commit/65db1312d6b395700c203edeb50248b5e8a0c036)), closes [#9345](https://github.com/aws/aws-cdk/issues/9345) +* **appsync:** implement directives for code-first approach ([#9973](https://github.com/aws/aws-cdk/issues/9973)) ([088cd48](https://github.com/aws/aws-cdk/commit/088cd4857f2f3f29873bdf5f7d1c4b25b3d49372)), closes [#9879](https://github.com/aws/aws-cdk/issues/9879) +* **appsync:** support enumeration types for code-first approach ([#10023](https://github.com/aws/aws-cdk/issues/10023)) ([30a5b80](https://github.com/aws/aws-cdk/commit/30a5b806f265d88e88d2faea623e1ce024c019c3)), closes [#10023](https://github.com/aws/aws-cdk/pull/10023#issuecomment-690773916) +* **appsync:** support union types for code-first approach ([#10025](https://github.com/aws/aws-cdk/issues/10025)) ([28a9834](https://github.com/aws/aws-cdk/commit/28a9834fb6cbacbd3e0ef97441fa0fb6e45120b1)) +* **cfn-include:** add support for Hooks ([#10143](https://github.com/aws/aws-cdk/issues/10143)) ([4de68c0](https://github.com/aws/aws-cdk/commit/4de68c0a5b0e7568a5e222e8a29703186b961ec7)), closes [#9713](https://github.com/aws/aws-cdk/issues/9713) +* **cfn-include:** allow renaming the template elements logical IDs ([#10169](https://github.com/aws/aws-cdk/issues/10169)) ([cf746a0](https://github.com/aws/aws-cdk/commit/cf746a07be171b35bb5e5514287c628642436df3)), closes [#9714](https://github.com/aws/aws-cdk/issues/9714) +* **chatbot:** log retention support and metrics utility methods ([#10137](https://github.com/aws/aws-cdk/issues/10137)) ([0f0d1e7](https://github.com/aws/aws-cdk/commit/0f0d1e74fb71a7b415aa9a5d02258b7c5933536b)), closes [#10135](https://github.com/aws/aws-cdk/issues/10135) +* **cli:** support credential_source in aws shared config file ([#10272](https://github.com/aws/aws-cdk/issues/10272)) ([940a443](https://github.com/aws/aws-cdk/commit/940a443ba457ddaebd85b44fa63cd9b15201c18a)) +* **codebuild:** add git submodule options of codebuild ([#10283](https://github.com/aws/aws-cdk/issues/10283)) ([698e5ef](https://github.com/aws/aws-cdk/commit/698e5ef3568880474adf73a38c21dad919b62b7e)), closes [#10271](https://github.com/aws/aws-cdk/issues/10271) +* **eks:** arm64 support ([#9875](https://github.com/aws/aws-cdk/issues/9875)) ([ffb84c6](https://github.com/aws/aws-cdk/commit/ffb84c62d03e40daa0e07c451c31bdea2dd0816c)), closes [#9915](https://github.com/aws/aws-cdk/issues/9915) +* **eks:** bump aws-node-termination-handler to 0.9.5 ([#10278](https://github.com/aws/aws-cdk/issues/10278)) ([8cfc190](https://github.com/aws/aws-cdk/commit/8cfc190daaa112de448c19716df2c10eeb29695c)), closes [aws/aws-cdk#10277](https://github.com/aws/aws-cdk/issues/10277) +* **eks:** managed nodegroup with custom AMI and launch template support ([#9881](https://github.com/aws/aws-cdk/issues/9881)) ([5c294fb](https://github.com/aws/aws-cdk/commit/5c294fbc1149167a70f35c7870ff1995042839b7)), closes [#9873](https://github.com/aws/aws-cdk/issues/9873) +* **elasticloadbalancingv2:** more health check validations to NLB target group ([#3703](https://github.com/aws/aws-cdk/issues/3703)) ([#10205](https://github.com/aws/aws-cdk/issues/10205)) ([e3f3332](https://github.com/aws/aws-cdk/commit/e3f333212b7c9f7deb836be8ecd71c7a7a06c394)) +* **elasticloadbalancingv2:** multiple security groups for ALBs ([#10244](https://github.com/aws/aws-cdk/issues/10244)) ([1ebf362](https://github.com/aws/aws-cdk/commit/1ebf36206b1e6a98a9a708efbe3ba3bfb1d3f05e)), closes [#5138](https://github.com/aws/aws-cdk/issues/5138) +* **lambda-nodejs:** improved project root detection ([#10182](https://github.com/aws/aws-cdk/issues/10182)) ([cce83dc](https://github.com/aws/aws-cdk/commit/cce83dcc163da8f71b8c2f95818d7d8f25db95f4)), closes [#10174](https://github.com/aws/aws-cdk/issues/10174) +* **pipelines:** adding IAM permissions to ShellScriptAction ([#10149](https://github.com/aws/aws-cdk/issues/10149)) ([ec15485](https://github.com/aws/aws-cdk/commit/ec154850b6bc2f485241aa1c0ea41400785d8a41)), closes [#9600](https://github.com/aws/aws-cdk/issues/9600) +* **rds:** database clusters from snapshots ([#10130](https://github.com/aws/aws-cdk/issues/10130)) ([915eb4b](https://github.com/aws/aws-cdk/commit/915eb4be3946652a00b7496b9e8610169852f27b)), closes [#4379](https://github.com/aws/aws-cdk/issues/4379) +* **rds:** deprecate OracleSE and OracleSE1 engine versions ([#10241](https://github.com/aws/aws-cdk/issues/10241)) ([562f891](https://github.com/aws/aws-cdk/commit/562f8913dae7b77a1516a60cc1ff277ac42fb9e0)), closes [#9249](https://github.com/aws/aws-cdk/issues/9249) +* **rds:** metrics for clusters ([#10162](https://github.com/aws/aws-cdk/issues/10162)) ([49f6034](https://github.com/aws/aws-cdk/commit/49f6034ee2b514f870819b7b48880246750b6fd2)), closes [#5212](https://github.com/aws/aws-cdk/issues/5212) +* **route53-patterns:** support IPv6 in HttpsRedirect ([#10203](https://github.com/aws/aws-cdk/issues/10203)) ([a1f6e1b](https://github.com/aws/aws-cdk/commit/a1f6e1be8a21625b1723b51277457f598a3be23e)) +* **secrets-manager:** exclude characters for password rotation applications ([#10110](https://github.com/aws/aws-cdk/issues/10110)) ([1260d52](https://github.com/aws/aws-cdk/commit/1260d5215d474d6edc2460ffe9658552d17ab239)), closes [#4144](https://github.com/aws/aws-cdk/issues/4144) + + +### Bug Fixes + +* **appsync:** strongly type `expires` prop in apiKeyConfig ([#9122](https://github.com/aws/aws-cdk/issues/9122)) ([287f808](https://github.com/aws/aws-cdk/commit/287f808cf34870295e4032995fc083ac204b64c3)), closes [#8698](https://github.com/aws/aws-cdk/issues/8698) +* **aws-batch:** `computeResources` tags are not configured properly ([#10209](https://github.com/aws/aws-cdk/issues/10209)) ([40222ef](https://github.com/aws/aws-cdk/commit/40222ef398fd1fb63b3b886624d5bb40562142c6)), closes [#7350](https://github.com/aws/aws-cdk/issues/7350) +* **cfn-include:** correctly parse YAML strings in short-form GetAtt ([#10197](https://github.com/aws/aws-cdk/issues/10197)) ([a388d70](https://github.com/aws/aws-cdk/commit/a388d70f38a84195bbe5e580220b5cd21ebde624)), closes [#10177](https://github.com/aws/aws-cdk/issues/10177) +* **cfn-include:** correctly substitute falsy parameter values ([#10195](https://github.com/aws/aws-cdk/issues/10195)) ([8791f88](https://github.com/aws/aws-cdk/commit/8791f8877165c87f6ef4967376aa53ffeb696013)), closes [#10107](https://github.com/aws/aws-cdk/issues/10107) +* **cli:** metadata not recorded for templates >50k ([#10184](https://github.com/aws/aws-cdk/issues/10184)) ([dfd2baf](https://github.com/aws/aws-cdk/commit/dfd2baf8b9d4cc930a1c5dc88c178ffe7d1121b0)) +* **cli:** simplify lib template ([#10175](https://github.com/aws/aws-cdk/issues/10175)) ([fc3ec9b](https://github.com/aws/aws-cdk/commit/fc3ec9b20edb2442bab456acf55c5cff7efb11b9)) +* **cli:** unable to set termination protection for pipeline stacks ([#9938](https://github.com/aws/aws-cdk/issues/9938)) ([a00a4ee](https://github.com/aws/aws-cdk/commit/a00a4ee162f287b5db45e73051ecdf0e32009def)) +* **cloudfront:** comment for origin access identity is too long ([#10266](https://github.com/aws/aws-cdk/issues/10266)) ([495aeb9](https://github.com/aws/aws-cdk/commit/495aeb96fd530ba0f73eeeab5a5b75b4a7390c99)), closes [#10211](https://github.com/aws/aws-cdk/issues/10211) +* **codepipeline:** cross-region support stack requires bootstrapping ([#10217](https://github.com/aws/aws-cdk/issues/10217)) ([b5ff4d6](https://github.com/aws/aws-cdk/commit/b5ff4d6673629ebabceb72fc83464001267c328e)), closes [#10215](https://github.com/aws/aws-cdk/issues/10215) +* **core:** DefaultSynthesizer breaks this.node.setContext() on Stack ([#10246](https://github.com/aws/aws-cdk/issues/10246)) ([61865aa](https://github.com/aws/aws-cdk/commit/61865aaef682be6727d7768213260c7a95d799f8)) +* **core:** Stacks render CloudFormation elements in nested Stages ([#10156](https://github.com/aws/aws-cdk/issues/10156)) ([5f36f6b](https://github.com/aws/aws-cdk/commit/5f36f6b5b2126de763b2e156fa2a9b6604f40f00)), closes [#9792](https://github.com/aws/aws-cdk/issues/9792) [#9669](https://github.com/aws/aws-cdk/issues/9669) +* **custom-resources:** deleting custom resource fails when using two or more ([#10012](https://github.com/aws/aws-cdk/issues/10012)) ([8d23f24](https://github.com/aws/aws-cdk/commit/8d23f248c7496d56c352a3db4b9d4bcccbdfe1c4)) +* **ec2:** cfn-init user data hash not updated if file asset changes ([#10216](https://github.com/aws/aws-cdk/issues/10216)) ([0d7ca63](https://github.com/aws/aws-cdk/commit/0d7ca63edb11a80440732d6327d0fe3ed685a993)), closes [#10206](https://github.com/aws/aws-cdk/issues/10206) +* **eks:** restricted public access breaks cluster functionality ([#10103](https://github.com/aws/aws-cdk/issues/10103)) ([a1b5bf6](https://github.com/aws/aws-cdk/commit/a1b5bf6f5a77e236169f95159cf62a2ecfb25bc4)) +* **kms:** do not change the principal to root for imported resources in dependent Stacks ([#10299](https://github.com/aws/aws-cdk/issues/10299)) ([54dfe83](https://github.com/aws/aws-cdk/commit/54dfe8374afeaff400e85f43185462b9fd9c8ac2)), closes [#10166](https://github.com/aws/aws-cdk/issues/10166) +* **lambda-nodejs:** permission denied, mkdir '/.parcel-cache' ([#10181](https://github.com/aws/aws-cdk/issues/10181)) ([20f5535](https://github.com/aws/aws-cdk/commit/20f5535bee8d0e1e7aa5b0c3bad780666d64bc87)) +* **pipelines:** changing synth action doesn't restart pipeline ([#10176](https://github.com/aws/aws-cdk/issues/10176)) ([14c8a98](https://github.com/aws/aws-cdk/commit/14c8a9878d092aac857655c2e5c8684015c84b29)), closes [#9458](https://github.com/aws/aws-cdk/issues/9458) +* **pipelines:** check for an empty Stage object ([#10153](https://github.com/aws/aws-cdk/issues/10153)) ([cec20c8](https://github.com/aws/aws-cdk/commit/cec20c8f68676c2bf3c8c3246334c82f50261358)), closes [#9559](https://github.com/aws/aws-cdk/issues/9559) +* **rds:** Make most DatabaseClusterAttributes properties optional ([#10291](https://github.com/aws/aws-cdk/issues/10291)) ([0653e6b](https://github.com/aws/aws-cdk/commit/0653e6bead37ed92e47295010645009e3b97e246)), closes [#3587](https://github.com/aws/aws-cdk/issues/3587) + ## [1.62.0](https://github.com/aws/aws-cdk/compare/v1.61.1...v1.62.0) (2020-09-03) @@ -10,6 +70,7 @@ All notable changes to this project will be documented in this file. See [standa * **eks:** when importing EKS clusters using `eks.Cluster.fromClusterAttributes`, the `clusterArn` attribute is not supported anymore, and will always be derived from `clusterName`. * **eks**: Only a single `eks.Cluster` is allowed per CloudFormation stack. * **eks**: The `securityGroups` attribute of `ClusterAttributes` is now `securityGroupIds`. +* **cli**: `--qualifier` must be alphanumeric and not longer than 10 characters when bootstrapping using `newStyleStackSynthesis`. ### Features diff --git a/allowed-breaking-changes.txt b/allowed-breaking-changes.txt index 56ecdf3636d0f..dc8b4b8f8c9cd 100644 --- a/allowed-breaking-changes.txt +++ b/allowed-breaking-changes.txt @@ -28,3 +28,10 @@ changed-type:@aws-cdk/aws-codedeploy.ServerDeploymentGroup.autoScalingGroups change-return-type:@aws-cdk/aws-ecs.ContainerDefinition.renderContainerDefinition change-return-type:@aws-cdk/aws-ecs.FirelensLogRouter.renderContainerDefinition change-return-type:@aws-cdk/aws-ecs.LinuxParameters.renderLinuxParameters + +# These were accidentally not marked @experimental +removed:@aws-cdk/core.BootstraplessSynthesizer.synthesizeStackArtifacts +removed:@aws-cdk/core.DefaultStackSynthesizer.synthesizeStackArtifacts +removed:@aws-cdk/core.LegacyStackSynthesizer.synthesizeStackArtifacts +removed:@aws-cdk/core.NestedStackSynthesizer.synthesizeStackArtifacts +removed:@aws-cdk/core.IStackSynthesizer.synthesizeStackArtifacts diff --git a/lerna.json b/lerna.json index 55e11b15eb0cf..41116c625ce66 100644 --- a/lerna.json +++ b/lerna.json @@ -10,5 +10,5 @@ "tools/*" ], "rejectCycles": "true", - "version": "1.62.0" + "version": "1.63.0" } diff --git a/package.json b/package.json index dffd3026d0f12..1367e774bff77 100644 --- a/package.json +++ b/package.json @@ -69,6 +69,18 @@ "@aws-cdk/core/minimatch/**", "@aws-cdk/cx-api/semver", "@aws-cdk/cx-api/semver/**", + "aws-cdk-lib/case", + "aws-cdk-lib/case/**", + "aws-cdk-lib/fs-extra", + "aws-cdk-lib/fs-extra/**", + "aws-cdk-lib/jsonschema", + "aws-cdk-lib/jsonschema/**", + "aws-cdk-lib/minimatch", + "aws-cdk-lib/minimatch/**", + "aws-cdk-lib/semver", + "aws-cdk-lib/semver/**", + "aws-cdk-lib/yaml", + "aws-cdk-lib/yaml/**", "monocdk-experiment/case", "monocdk-experiment/case/**", "monocdk-experiment/fs-extra", diff --git a/packages/@aws-cdk/assert/README.md b/packages/@aws-cdk/assert/README.md index cb953b02a7710..1c8eb3bd30c05 100644 --- a/packages/@aws-cdk/assert/README.md +++ b/packages/@aws-cdk/assert/README.md @@ -114,6 +114,65 @@ expect(stack).to(haveResourceLike('AWS::IAM::Policy', { })); ``` +### Capturing values from a match + +Special `Capture` matchers exist to capture values encountered during a match. These can be +used for two typical purposes: + +* Apply additional assertions to the values found during a matching operation. +* Use the value found during a matching operation in a new matching operation. + +`Capture` matchers take an inner matcher as an argument, and will only capture the value +if the inner matcher succeeds in matching the given value. + +Here's an example which asserts that a policy for `RoleA` contains two statements +with *different* ARNs (without caring what those ARNs might be), and that +a policy for `RoleB` *also* has a statement for one of those ARNs (again, without +caring what the ARN might be): + +```ts +const arn1 = Capture.aString(); +const arn2 = Capture.aString(); + +expect(stack).to(haveResourceLike('AWS::IAM::Policy', { + Roles: ['RoleA'], + PolicyDocument: { + Statement: [ + objectLike({ + Resource: [arn1.capture()], + }), + objectLike({ + Resource: [arn2.capture()], + }), + ], + }, +})); + +// Don't care about the values as long as they are not the same +expect(arn1.capturedValue).not.toEqual(arn2.capturedValue); + +expect(stack).to(haveResourceLike('AWS::IAM::Policy', { + Roles: ['RoleB'], + PolicyDocument: { + Statement: [ + objectLike({ + // This ARN must be the same as ARN1 above. + Resource: [arn1.capturedValue] + }), + ], + }, +})); +``` + +NOTE: `Capture` look somewhat like *bindings* in other pattern matching +libraries you might be used to, but they are far simpler and very +deterministic. In particular, they don't do unification: if the same Capture +is either used multiple times in the same structure expression or matches +multiple times, no restarting of the match is done to make them all match the +same value: the last value encountered by the `Capture` (as determined by the +behavior of the matchers around it) is stored into it and will be the one +available after the match has completed. + ### Check number of resources If you want to assert that `n` number of resources of a particular type exist, with or without specific properties: diff --git a/packages/@aws-cdk/assert/lib/assertions/have-resource-matchers.ts b/packages/@aws-cdk/assert/lib/assertions/have-resource-matchers.ts index 13157b595751f..f5edc878f95fb 100644 --- a/packages/@aws-cdk/assert/lib/assertions/have-resource-matchers.ts +++ b/packages/@aws-cdk/assert/lib/assertions/have-resource-matchers.ts @@ -281,6 +281,104 @@ export function notMatching(matcher: any): PropertyMatcher { }); } +export type TypeValidator = (x: any) => x is T; + +/** + * Captures a value onto an object if it matches a given inner matcher + * + * @example + * + * const someValue = Capture.aString(); + * expect(stack).toHaveResource({ + * // ... + * Value: someValue.capture(stringMatching('*a*')), + * }); + * console.log(someValue.capturedValue); + */ +export class Capture { + /** + * A Capture object that captures any type + */ + public static anyType(): Capture { + return new Capture(); + } + + /** + * A Capture object that captures a string type + */ + public static aString(): Capture { + return new Capture((x: any): x is string => { + if (typeof x !== 'string') { + throw new Error(`Expected to capture a string, got '${x}'`); + } + return true; + }); + } + + /** + * A Capture object that captures a custom type + */ + public static a(validator: TypeValidator): Capture { + return new Capture(validator); + } + + private _value?: T; + private _didCapture = false; + private _wasInvoked = false; + + protected constructor(private readonly typeValidator?: TypeValidator) { + } + + /** + * Capture the value if the inner matcher successfully matches it + * + * If no matcher is given, `anything()` is assumed. + * + * And exception will be thrown if the inner matcher returns `true` and + * the value turns out to be of a different type than the `Capture` object + * is expecting. + */ + public capture(matcher?: any): PropertyMatcher { + if (matcher === undefined) { + matcher = anything(); + } + + return annotateMatcher({ $capture: matcher }, (value: any, failure: InspectionFailure) => { + this._wasInvoked = true; + const result = matcherFrom(matcher)(value, failure); + if (result) { + if (this.typeValidator && !this.typeValidator(value)) { + throw new Error(`Value not of the expected type: ${value}`); + } + this._didCapture = true; + this._value = value; + } + return result; + }); + } + + /** + * Whether a value was successfully captured + */ + public get didCapture() { + return this._didCapture; + } + + /** + * Return the value that was captured + * + * Throws an exception if now value was captured + */ + public get capturedValue(): T { + // When this module is ported to jsii, the type parameter will obviously + // have to be dropped and this will have to turn into an `any`. + if (!this.didCapture) { + throw new Error(`Did not capture a value: ${this._wasInvoked ? 'inner matcher failed' : 'never invoked'}`); + } + return this._value!; + } +} + /** * Match on the innards of a JSON string, instead of the complete string */ diff --git a/packages/@aws-cdk/assert/test/have-resource.test.ts b/packages/@aws-cdk/assert/test/have-resource.test.ts index ad114a485dead..7772e5f7d193f 100644 --- a/packages/@aws-cdk/assert/test/have-resource.test.ts +++ b/packages/@aws-cdk/assert/test/have-resource.test.ts @@ -2,7 +2,7 @@ import { writeFileSync } from 'fs'; import { join } from 'path'; import * as cxschema from '@aws-cdk/cloud-assembly-schema'; import * as cxapi from '@aws-cdk/cx-api'; -import { ABSENT, arrayWith, exactValue, expect as cdkExpect, haveResource, haveResourceLike } from '../lib/index'; +import { ABSENT, arrayWith, exactValue, expect as cdkExpect, haveResource, haveResourceLike, Capture, anything } from '../lib/index'; test('support resource with no properties', () => { const synthStack = mkStack({ @@ -238,6 +238,19 @@ describe('property absence', () => { })); }).not.toThrowError(); }); + + test('test capturing', () => { + const synthStack = mkSomeResource({ + Prop: 'somevalue', + }); + + const propValue = Capture.aString(); + cdkExpect(synthStack).to(haveResourceLike('Some::Resource', { + Prop: propValue.capture(anything()), + })); + + expect(propValue.capturedValue).toEqual('somevalue'); + }); }); function mkStack(template: any): cxapi.CloudFormationStackArtifact { diff --git a/packages/@aws-cdk/aws-appsync/README.md b/packages/@aws-cdk/aws-appsync/README.md index 5ab39d8ec6375..0c2896a6cd2e1 100644 --- a/packages/@aws-cdk/aws-appsync/README.md +++ b/packages/@aws-cdk/aws-appsync/README.md @@ -18,8 +18,10 @@ APIs that use GraphQL. ### Example +### DynamoDB + Example of a GraphQL API with `AWS_IAM` authorization resolving into a DynamoDb -backend data source. +backend data source. GraphQL schema file `schema.graphql`: @@ -82,6 +84,83 @@ demoDS.createResolver({ }); ``` +#### HTTP Endpoints +GraphQL schema file `schema.graphql`: + +```gql +type job { + id: String! + version: String! +} + +input DemoInput { + version: String! +} + +type Mutation { + callStepFunction(input: DemoInput!): job +} +``` + +GraphQL request mapping template `request.vtl`: + +``` +{ + "version": "2018-05-29", + "method": "POST", + "resourcePath": "/", + "params": { + "headers": { + "content-type": "application/x-amz-json-1.0", + "x-amz-target":"AWSStepFunctions.StartExecution" + }, + "body": { + "stateMachineArn": "", + "input": "{ \"id\": \"$context.arguments.id\" }" + } + } +} +``` + +GraphQL response mapping template `response.vtl`: + +``` +{ + "id": "${context.result.id}" +} +``` + +CDK stack file `app-stack.ts`: + +```ts +import * as appsync from '@aws-cdk/aws-appsync'; + +const api = new appsync.GraphqlApi(scope, 'api', { + name: 'api', + schema: appsync.Schema.fromFile(join(__dirname, 'schema.graphql')), +}); + +const httpDs = api.addHttpDataSource( + 'ds', + 'https://states.amazonaws.com', + { + name: 'httpDsWithStepF', + description: 'from appsync to StepFunctions Workflow', + authorizationConfig: { + signingRegion: 'us-east-1', + signingServiceName: 'states' + } + } +); + +httpDs.createResolver({ + typeName: 'Mutation', + fieldName: 'callStepFunction', + requestMappingTemplate: MappingTemplate.fromFile('request.vtl'), + responseMappingTemplate: MappingTemplate.fromFile('response.vtl') +}); +``` + ### Schema Every GraphQL Api needs a schema to define the Api. CDK offers `appsync.Schema` @@ -129,7 +208,6 @@ const api = appsync.GraphqlApi(stack, 'api', { ``` ### Imports - Any GraphQL Api that has been created outside the stack can be imported from another stack into your CDK app. Utilizing the `fromXxx` function, you have the ability to add data sources and resolvers through a `IGraphqlApi` interface. @@ -367,6 +445,21 @@ More concretely, GraphQL Types are simply the types appended to variables. Referencing the object type `Demo` in the previous example, the GraphQL Types is `String!` and is applied to both the names `id` and `version`. +#### Directives + +`Directives` are attached to a field or type and affect the execution of queries, +mutations, and types. With AppSync, we use `Directives` to configure authorization. +CDK provides static functions to add directives to your Schema. + +- `Directive.iam()` sets a type or field's authorization to be validated through `Iam` +- `Directive.apiKey()` sets a type or field's authorization to be validated through a `Api Key` +- `Directive.oidc()` sets a type or field's authorization to be validated through `OpenID Connect` +- `Directive.cognito(...groups: string[])` sets a type or field's authorization to be validated +through `Cognito User Pools` + - `groups` the name of the cognito groups to give access + +To learn more about authorization and directives, read these docs [here](https://docs.aws.amazon.com/appsync/latest/devguide/security.html). + #### Field and Resolvable Fields While `GraphqlType` is a base implementation for GraphQL fields, we have abstractions @@ -470,7 +563,9 @@ Types will be the meat of your GraphQL Schema as they are the types defined by y Intermediate Types include: - [**Interface Types**](#Interface-Types) - [**Object Types**](#Object-Types) +- [**Enum Types**](#Enum-Types) - [**Input Types**](#Input-Types) +- [**Union Types**](#Union-Types) ##### Interface Types @@ -487,6 +582,8 @@ const node = new appsync.InterfaceType('Node', { }); ``` +To learn more about **Interface Types**, read the docs [here](https://graphql.org/learn/schema/#interfaces). + ##### Object Types **Object Types** are types that you declare. For example, in the [code-first example](#code-first-example) @@ -548,21 +645,40 @@ You can create Object Types in three ways: }, }); ``` - > This method allows for reusability and modularity, ideal for reducing code duplication. + > This method allows for reusability and modularity, ideal for reducing code duplication. -3. Object Types can be created ***internally*** within the GraphQL API. - ```ts - const api = new appsync.GraphqlApi(stack, 'Api', { - name: 'demo', - }); - api.addType('Demo', { - defintion: { - id: appsync.GraphqlType.string({ isRequired: true }), - version: appsync.GraphqlType.string({ isRequired: true }), - }, - }); - ``` - > This method provides easy use and is ideal for smaller projects. +To learn more about **Object Types**, read the docs [here](https://graphql.org/learn/schema/#object-types-and-fields). + +### Enum Types + +**Enum Types** are a special type of Intermediate Type. They restrict a particular +set of allowed values for other Intermediate Types. + +```gql +enum Episode { + NEWHOPE + EMPIRE + JEDI +} +``` + +> This means that wherever we use the type Episode in our schema, we expect it to +> be exactly one of NEWHOPE, EMPIRE, or JEDI. + +The above GraphQL Enumeration Type can be expressed in CDK as the following: + +```ts +const episode = new appsync.EnumType('Episode', { + definition: [ + 'NEWHOPE', + 'EMPIRE', + 'JEDI', + ], +}); +api.addType(episode); +``` + +To learn more about **Enum Types**, read the docs [here](https://graphql.org/learn/schema/#enumeration-types). ##### Input Types @@ -590,6 +706,34 @@ api.addType(review); To learn more about **Input Types**, read the docs [here](https://graphql.org/learn/schema/#input-types). +### Union Types + +**Union Types** are a special type of Intermediate Type. They are similar to +Interface Types, but they cannot specify any common fields between types. + +**Note:** the fields of a union type need to be `Object Types`. In other words, you +can't create a union type out of interfaces, other unions, or inputs. + +```gql +union Search = Human | Droid | Starship +``` + +The above GraphQL Union Type encompasses the Object Types of Human, Droid and Starship. It +can be expressed in CDK as the following: + +```ts +const string = appsync.GraphqlType.string(); +const human = new appsync.ObjectType('Human', { definition: { name: string } }); +const droid = new appsync.ObjectType('Droid', { definition: { name: string } }); +const starship = new appsync.ObjectType('Starship', { definition: { name: string } });); +const search = new appsync.UnionType('Search', { + definition: [ human, droid, starship ], +}); +api.addType(search); +``` + +To learn more about **Union Types**, read the docs [here](https://graphql.org/learn/schema/#union-types). + #### Query Every schema requires a top level Query type. By default, the schema will look @@ -635,3 +779,25 @@ api.addMutation('addFilm', new appsync.ResolvableField({ ``` To learn more about top level operations, check out the docs [here](https://docs.aws.amazon.com/appsync/latest/devguide/graphql-overview.html). + +#### Subscription + +Every schema **can** have a top level Subscription type. The top level `Subscription` Type +is the only exposed type that users can access to invoke a response to a mutation. `Subscriptions` +notify users when a mutation specific mutation is called. This means you can make any data source +real time by specify a GraphQL Schema directive on a mutation. + +**Note**: The AWS AppSync client SDK automatically handles subscription connection management. + +To add fields for these subscriptions, we can simply run the `addSubscription` function to add +to the schema's `Subscription` type. + +```ts +api.addSubscription('addedFilm', new appsync.ResolvableField({ + returnType: film.attribute(), + args: { id: appsync.GraphqlType.id({ isRequired: true }) }, + directive: [appsync.Directive.subscribe('addFilm')], +})); +``` + +To learn more about top level operations, check out the docs [here](https://docs.aws.amazon.com/appsync/latest/devguide/real-time-data.html). diff --git a/packages/@aws-cdk/aws-appsync/lib/data-source.ts b/packages/@aws-cdk/aws-appsync/lib/data-source.ts index 04c617b12e5d0..c67d96562b126 100644 --- a/packages/@aws-cdk/aws-appsync/lib/data-source.ts +++ b/packages/@aws-cdk/aws-appsync/lib/data-source.ts @@ -203,6 +203,21 @@ export class DynamoDbDataSource extends BackedDataSource { } } +/** + * The authorization config in case the HTTP endpoint requires authorization + */ +export interface AwsIamConfig { + /** + * The signing region for AWS IAM authorization + */ + readonly signingRegion: string; + + /** + * The signing service name for AWS IAM authorization + */ + readonly signingServiceName: string; +} + /** * Properties for an AppSync http datasource */ @@ -211,6 +226,14 @@ export interface HttpDataSourceProps extends BaseDataSourceProps { * The http endpoint */ readonly endpoint: string; + + /** + * The authorization config in case the HTTP endpoint requires authorization + * + * @default - none + * + */ + readonly authorizationConfig?: AwsIamConfig; } /** @@ -218,11 +241,16 @@ export interface HttpDataSourceProps extends BaseDataSourceProps { */ export class HttpDataSource extends BaseDataSource { constructor(scope: Construct, id: string, props: HttpDataSourceProps) { + const authorizationConfig = props.authorizationConfig ? { + authorizationType: 'AWS_IAM', + awsIamConfig: props.authorizationConfig, + } : undefined; super(scope, id, props, { + type: 'HTTP', httpConfig: { endpoint: props.endpoint, + authorizationConfig, }, - type: 'HTTP', }); } } diff --git a/packages/@aws-cdk/aws-appsync/lib/graphqlapi-base.ts b/packages/@aws-cdk/aws-appsync/lib/graphqlapi-base.ts index 2f357c142db91..0525b51340fcd 100644 --- a/packages/@aws-cdk/aws-appsync/lib/graphqlapi-base.ts +++ b/packages/@aws-cdk/aws-appsync/lib/graphqlapi-base.ts @@ -1,7 +1,7 @@ import { ITable } from '@aws-cdk/aws-dynamodb'; import { IFunction } from '@aws-cdk/aws-lambda'; import { CfnResource, IResource, Resource } from '@aws-cdk/core'; -import { DynamoDbDataSource, HttpDataSource, LambdaDataSource, NoneDataSource } from './data-source'; +import { DynamoDbDataSource, HttpDataSource, LambdaDataSource, NoneDataSource, AwsIamConfig } from './data-source'; /** * Optional configuration for data sources @@ -22,6 +22,18 @@ export interface DataSourceOptions { readonly description?: string; } +/** + * Optional configuration for Http data sources + */ +export interface HttpDataSourceOptions extends DataSourceOptions { + /** + * The authorization config in case the HTTP endpoint requires authorization + * + * @default - none + */ + readonly authorizationConfig?: AwsIamConfig; +} + /** * Interface for GraphQL */ @@ -67,7 +79,7 @@ export interface IGraphqlApi extends IResource { * @param endpoint The http endpoint * @param options The optional configuration for this data source */ - addHttpDataSource(id: string, endpoint: string, options?: DataSourceOptions): HttpDataSource; + addHttpDataSource(id: string, endpoint: string, options?: HttpDataSourceOptions): HttpDataSource; /** * add a new Lambda data source to this API @@ -140,12 +152,13 @@ export abstract class GraphqlApiBase extends Resource implements IGraphqlApi { * @param endpoint The http endpoint * @param options The optional configuration for this data source */ - public addHttpDataSource(id: string, endpoint: string, options?: DataSourceOptions): HttpDataSource { + public addHttpDataSource(id: string, endpoint: string, options?: HttpDataSourceOptions): HttpDataSource { return new HttpDataSource(this, id, { api: this, endpoint, name: options?.name, description: options?.description, + authorizationConfig: options?.authorizationConfig, }); } diff --git a/packages/@aws-cdk/aws-appsync/lib/graphqlapi.ts b/packages/@aws-cdk/aws-appsync/lib/graphqlapi.ts index 052e3ecd53392..235c5abe369f3 100644 --- a/packages/@aws-cdk/aws-appsync/lib/graphqlapi.ts +++ b/packages/@aws-cdk/aws-appsync/lib/graphqlapi.ts @@ -1,6 +1,6 @@ import { IUserPool } from '@aws-cdk/aws-cognito'; import { ManagedPolicy, Role, ServicePrincipal, Grant, IGrantable } from '@aws-cdk/aws-iam'; -import { CfnResource, Construct, Duration, IResolvable, Stack } from '@aws-cdk/core'; +import { CfnResource, Construct, Duration, Expiration, IResolvable, Stack } from '@aws-cdk/core'; import { CfnApiKey, CfnGraphQLApi, CfnGraphQLSchema } from './appsync.generated'; import { IGraphqlApi, GraphqlApiBase } from './graphqlapi-base'; import { Schema } from './schema'; @@ -111,12 +111,13 @@ export interface ApiKeyConfig { readonly description?: string; /** - * The time from creation time after which the API key expires, using RFC3339 representation. + * The time from creation time after which the API key expires. * It must be a minimum of 1 day and a maximum of 365 days from date of creation. * Rounded down to the nearest hour. - * @default - 7 days from creation time + * + * @default - 7 days rounded down to nearest hour */ - readonly expires?: string; + readonly expires?: Expiration; } /** @@ -376,6 +377,11 @@ export class GraphqlApi extends GraphqlApiBase { */ public readonly schema: Schema; + /** + * The Authorization Types for this GraphQL Api + */ + public readonly modes: AuthorizationType[]; + /** * the configured API key, if present * @@ -395,6 +401,8 @@ export class GraphqlApi extends GraphqlApiBase { const additionalModes = props.authorizationConfig?.additionalAuthorizationModes ?? []; const modes = [defaultMode, ...additionalModes]; + this.modes = modes.map((mode) => mode.authorizationType ); + this.validateAuthorizationProps(modes); this.api = new CfnGraphQLApi(this, 'Resource', { @@ -549,16 +557,10 @@ export class GraphqlApi extends GraphqlApiBase { } private createAPIKey(config?: ApiKeyConfig) { - let expires: number | undefined; - if (config?.expires) { - expires = new Date(config.expires).valueOf(); - const days = (d: number) => - Date.now() + Duration.days(d).toMilliseconds(); - if (expires < days(1) || expires > days(365)) { - throw Error('API key expiration must be between 1 and 365 days.'); - } - expires = Math.round(expires / 1000); + if (config?.expires?.isBefore(Duration.days(1)) || config?.expires?.isAfter(Duration.days(365))) { + throw Error('API key expiration must be between 1 and 365 days.'); } + const expires = config?.expires ? config?.expires.toEpoch() : undefined; return new CfnApiKey(this, `${config?.name || 'Default'}ApiKey`, { expires, description: config?.description, @@ -592,8 +594,8 @@ export class GraphqlApi extends GraphqlApiBase { } /** - * Add a query field to the schema's Query. If one isn't set by - * the user, CDK will create an Object Type called 'Query'. For example, + * Add a query field to the schema's Query. CDK will create an + * Object Type called 'Query'. For example, * * type Query { * fieldName: Field.returnType @@ -607,8 +609,8 @@ export class GraphqlApi extends GraphqlApiBase { } /** - * Add a mutation field to the schema's Mutation. If one isn't set by - * the user, CDK will create an Object Type called 'Mutation'. For example, + * Add a mutation field to the schema's Mutation. CDK will create an + * Object Type called 'Mutation'. For example, * * type Mutation { * fieldName: Field.returnType @@ -620,4 +622,19 @@ export class GraphqlApi extends GraphqlApiBase { public addMutation(fieldName: string, field: ResolvableField): ObjectType { return this.schema.addMutation(fieldName, field); } + + /** + * Add a subscription field to the schema's Subscription. CDK will create an + * Object Type called 'Subscription'. For example, + * + * type Subscription { + * fieldName: Field.returnType + * } + * + * @param fieldName the name of the Subscription + * @param field the resolvable field to for this Subscription + */ + public addSubscription(fieldName: string, field: ResolvableField): ObjectType { + return this.schema.addSubscription(fieldName, field); + } } diff --git a/packages/@aws-cdk/aws-appsync/lib/private.ts b/packages/@aws-cdk/aws-appsync/lib/private.ts index 25e99fa93b753..f31ab439bccc8 100644 --- a/packages/@aws-cdk/aws-appsync/lib/private.ts +++ b/packages/@aws-cdk/aws-appsync/lib/private.ts @@ -1,3 +1,4 @@ +import { AuthorizationType } from './graphqlapi'; import { Directive } from './schema-base'; import { InterfaceType } from './schema-intermediate'; @@ -47,6 +48,10 @@ export interface SchemaAdditionOptions { * the fields to reduce onto the addition */ readonly fields: string[]; + /** + * the authorization modes for this graphql type + */ + readonly modes?: AuthorizationType[]; } /** @@ -67,7 +72,10 @@ export interface SchemaAdditionOptions { export function shapeAddition(options: SchemaAdditionOptions): string { const typeName = (): string => { return options.name ? ` ${options.name}` : ''; }; const interfaces = generateInterfaces(options.interfaceTypes); - const directives = generateDirectives(options.directives); + const directives = generateDirectives({ + directives: options.directives, + modes: options.modes, + }); return options.fields.reduce((acc, field) => `${acc} ${field}\n`, `${options.prefix}${typeName()}${interfaces}${directives} {\n`) + '}'; } @@ -197,14 +205,33 @@ function generateInterfaces(interfaceTypes?: InterfaceType[]): string { `${acc} ${interfaceType.name},`, ' implements').slice(0, -1); } +/** + * options to generate directives + */ +interface generateDirectivesOptions { + /** + * the directives of a given type + */ + readonly directives?: Directive[]; + /** + * thee separator betweeen directives + * + * @default - a space + */ + readonly delimiter?: string; + /** + * the authorization modes + */ + readonly modes?: AuthorizationType[]; +} + /** * Utility function to generate directives - * - * @param directives the directives of a given type - * @param delimiter the separator betweeen directives (by default we will add a space) */ -function generateDirectives(directives?: Directive[], delimiter?: string): string { - if (!directives || directives.length === 0) return ''; - return directives.reduce((acc, directive) => - `${acc}${directive.statement}${delimiter ?? ' '}`, ' ').slice(0, -1); +function generateDirectives(options: generateDirectivesOptions): string { + if (!options.directives || options.directives.length === 0) return ''; + // reduce over all directives and get string version of the directive + // pass in the auth modes for checks to happen on compile time + return options.directives.reduce((acc, directive) => + `${acc}${directive._bindToAuthModes(options.modes).toString()}${options.delimiter ?? ' '}`, ' ').slice(0, -1); } \ No newline at end of file diff --git a/packages/@aws-cdk/aws-appsync/lib/schema-base.ts b/packages/@aws-cdk/aws-appsync/lib/schema-base.ts index 940f21bc9be7e..a639c747ca208 100644 --- a/packages/@aws-cdk/aws-appsync/lib/schema-base.ts +++ b/packages/@aws-cdk/aws-appsync/lib/schema-base.ts @@ -1,3 +1,4 @@ +import { AuthorizationType, GraphqlApi } from './graphqlapi'; import { Resolver } from './resolver'; import { ResolvableFieldOptions, BaseTypeOptions, GraphqlType } from './schema-field'; import { InterfaceType } from './schema-intermediate'; @@ -61,6 +62,15 @@ export interface IField { * Generate the arguments for this field */ argsToString(): string; + + /** + * Generate the directives for this field + * + * @param modes the authorization modes of the graphql api + * + * @default - no authorization modes + */ + directivesToString(modes?: AuthorizationType[]): string } /** @@ -127,7 +137,16 @@ export interface IIntermediateType { * * @default - no intermediate type */ - readonly intermediateType?: InterfaceType; + readonly intermediateType?: IIntermediateType; + + /** + * Method called when the stringifying Intermediate Types for schema generation + * + * @param api The binding GraphQL Api [disable-awslint:ref-via-interface] + * + * @internal + */ + _bindToGraphqlApi(api: GraphqlApi): IIntermediateType; /** * Create an GraphQL Type representing this Intermediate Type @@ -146,12 +165,22 @@ export interface IIntermediateType { /** * Add a field to this Intermediate Type - * - * @param options - the options to add a field */ addField(options: AddFieldOptions): void; } +interface DirectiveOptions { + /** + * The authorization type of this directive + */ + readonly mode?: AuthorizationType; + + /** + * Mutation fields for a subscription directive + */ + readonly mutationFields?: string[]; +} + /** * Directives for types * @@ -164,7 +193,53 @@ export class Directive { * Add the @aws_iam directive */ public static iam(): Directive { - return new Directive('@aws_iam'); + return new Directive('@aws_iam', { mode: AuthorizationType.IAM }); + } + + /** + * Add the @aws_oidc directive + */ + public static oidc(): Directive { + return new Directive('@aws_oidc', { mode: AuthorizationType.OIDC }); + } + + /** + * Add the @aws_api_key directive + */ + public static apiKey(): Directive { + return new Directive('@aws_api_key', { mode: AuthorizationType.API_KEY }); + } + + /** + * Add the @aws_auth or @aws_cognito_user_pools directive + * + * @param groups the groups to allow access to + */ + public static cognito(...groups: string[]): Directive { + if (groups.length === 0) { + throw new Error(`Cognito authorization requires at least one Cognito group to be supplied. Received: ${groups.length}`); + } + // this function creates the cognito groups as a string (i.e. ["group1", "group2", "group3"]) + const stringify = (array: string[]): string => { + return array.reduce((acc, element) => `${acc}"${element}", `, '').slice(0, -2); + }; + return new Directive(`@aws_auth(cognito_groups: [${stringify(groups)}])`, { mode: AuthorizationType.USER_POOL }); + } + + /** + * Add the @aws_subscribe directive. Only use for top level Subscription type. + * + * @param mutations the mutation fields to link to + */ + public static subscribe(...mutations: string[]): Directive { + if (mutations.length === 0) { + throw new Error(`Subscribe directive requires at least one mutation field to be supplied. Received: ${mutations.length}`); + } + // this function creates the subscribe directive as a string (i.e. ["mutation_field_1", "mutation_field_2"]) + const stringify = (array: string[]): string => { + return array.reduce((acc, mutation) => `${acc}"${mutation}", `, '').slice(0, -2); + }; + return new Directive(`@aws_subscribe(mutations: [${stringify(mutations)}])`, { mutationFields: mutations }); } /** @@ -176,12 +251,60 @@ export class Directive { return new Directive(statement); } + /** + * The authorization type of this directive + * + * @default - not an authorization directive + */ + public readonly mode?: AuthorizationType; + + /** + * Mutation fields for a subscription directive + * + * @default - not a subscription directive + */ + public readonly mutationFields?: string[]; + /** * the directive statement */ - public readonly statement: string; + private statement: string; - private constructor(statement: string) { this.statement = statement; } + /** + * the authorization modes for this intermediate type + */ + protected modes?: AuthorizationType[]; + + private constructor(statement: string, options?: DirectiveOptions) { + this.statement = statement; + this.mode = options?.mode; + this.mutationFields = options?.mutationFields; + } + + /** + * Method called when the stringifying Directive for schema generation + * + * @param modes the authorization modes + * + * @internal + */ + public _bindToAuthModes(modes?: AuthorizationType[]): Directive { + this.modes = modes; + return this; + } + + /** + * Generate the directive statement + */ + public toString(): string { + if (this.modes && this.mode && !this.modes.some((mode) => mode === this.mode)) { + throw new Error(`No Authorization Type ${this.mode} declared in GraphQL Api.`); + } + if (this.mode === AuthorizationType.USER_POOL && this.modes && this.modes.length > 1) { + this.statement = this.statement.replace('@aws_auth', '@aws_cognito_user_pools'); + } + return this.statement; + } } /** diff --git a/packages/@aws-cdk/aws-appsync/lib/schema-field.ts b/packages/@aws-cdk/aws-appsync/lib/schema-field.ts index ba22dd085fe56..ed7c8790818db 100644 --- a/packages/@aws-cdk/aws-appsync/lib/schema-field.ts +++ b/packages/@aws-cdk/aws-appsync/lib/schema-field.ts @@ -1,6 +1,7 @@ import { BaseDataSource } from './data-source'; +import { AuthorizationType } from './graphqlapi'; import { MappingTemplate } from './mapping-template'; -import { Type, IField, IIntermediateType } from './schema-base'; +import { Type, IField, IIntermediateType, Directive } from './schema-base'; /** * Base options for GraphQL Types @@ -321,6 +322,13 @@ export class GraphqlType implements IField { public argsToString(): string { return ''; } + + /** + * Generate the directives for this field + */ + public directivesToString(_modes?: AuthorizationType[]): string { + return ''; + } } /** @@ -345,6 +353,12 @@ export interface FieldOptions { * @default - no arguments */ readonly args?: { [key: string]: GraphqlType }; + /** + * the directives for this field + * + * @default - no directives + */ + readonly directives?: Directive[]; } /** @@ -375,13 +389,17 @@ export class Field extends GraphqlType implements IField { */ public argsToString(): string { if (!this.fieldOptions || !this.fieldOptions.args) { return ''; } - let args = '('; - Object.keys(this.fieldOptions?.args ?? {}).forEach((key) => { - const type = this.fieldOptions?.args?.[key].toString(); - args = `${args}${key}: ${type} `; - }); - args = args.slice(0, -1); - return `${args})`; + return Object.keys(this.fieldOptions.args).reduce((acc, key) => + `${acc}${key}: ${this.fieldOptions?.args?.[key].toString()} `, '(').slice(0, -1) + ')'; + } + + /** + * Generate the directives for this field + */ + public directivesToString(modes?: AuthorizationType[]): string { + if (!this.fieldOptions || !this.fieldOptions.directives) { return ''; } + return this.fieldOptions.directives.reduce((acc, directive) => + `${acc}${directive._bindToAuthModes(modes).toString()} `, '\n ').slice(0, -1); } } diff --git a/packages/@aws-cdk/aws-appsync/lib/schema-intermediate.ts b/packages/@aws-cdk/aws-appsync/lib/schema-intermediate.ts index 0b243160cc8c1..cc778aee51a50 100644 --- a/packages/@aws-cdk/aws-appsync/lib/schema-intermediate.ts +++ b/packages/@aws-cdk/aws-appsync/lib/schema-intermediate.ts @@ -1,3 +1,4 @@ +import { AuthorizationType, GraphqlApi } from './graphqlapi'; import { shapeAddition } from './private'; import { Resolver } from './resolver'; import { Directive, IField, IIntermediateType, AddFieldOptions } from './schema-base'; @@ -8,14 +9,21 @@ import { BaseTypeOptions, GraphqlType, ResolvableFieldOptions } from './schema-f * * @param definition - the variables and types that define this type * i.e. { string: GraphqlType, string: GraphqlType } + * @param directives - the directives for this object type * * @experimental */ -export interface IntermediateTypeProps { +export interface IntermediateTypeOptions { /** * the attributes of this type */ readonly definition: { [key: string]: IField }; + /** + * the directives for this object type + * + * @default - no directives + */ + readonly directives?: Directive[]; } /** @@ -33,19 +41,27 @@ export class InterfaceType implements IIntermediateType { * the attributes of this type */ public readonly definition: { [key: string]: IField }; + /** + * the directives for this object type + * + * @default - no directives + */ + public readonly directives?: Directive[]; + /** + * the authorization modes for this intermediate type + */ + protected modes?: AuthorizationType[]; - public constructor(name: string, props: IntermediateTypeProps) { + public constructor(name: string, props: IntermediateTypeOptions) { this.name = name; this.definition = props.definition; + this.directives = props.directives; } /** - * Create an GraphQL Type representing this Intermediate Type + * Create a GraphQL Type representing this Intermediate Type * * @param options the options to configure this attribute - * - isList - * - isRequired - * - isRequiredList */ public attribute(options?: BaseTypeOptions): GraphqlType { return GraphqlType.intermediate({ @@ -63,8 +79,12 @@ export class InterfaceType implements IIntermediateType { return shapeAddition({ prefix: 'interface', name: this.name, - fields: Object.keys(this.definition).map((key) => - `${key}${this.definition[key].argsToString()}: ${this.definition[key].toString()}`), + directives: this.directives, + fields: Object.keys(this.definition).map((key) => { + const field = this.definition[key]; + return `${key}${field.argsToString()}: ${field.toString()}${field.directivesToString(this.modes)}`; + }), + modes: this.modes, }); } @@ -81,6 +101,16 @@ export class InterfaceType implements IIntermediateType { } this.definition[options.fieldName] = options.field; } + + /** + * Method called when the stringifying Intermediate Types for schema generation + * + * @internal + */ + public _bindToGraphqlApi(api: GraphqlApi): IIntermediateType { + this.modes = api.modes; + return this; + } } /** @@ -93,19 +123,13 @@ export class InterfaceType implements IIntermediateType { * * @experimental */ -export interface ObjectTypeProps extends IntermediateTypeProps { +export interface ObjectTypeOptions extends IntermediateTypeOptions { /** * The Interface Types this Object Type implements * * @default - no interface types */ readonly interfaceTypes?: InterfaceType[]; - /** - * the directives for this object type - * - * @default - no directives - */ - readonly directives?: Directive[]; } /** @@ -120,26 +144,20 @@ export class ObjectType extends InterfaceType implements IIntermediateType { * @default - no interface types */ public readonly interfaceTypes?: InterfaceType[]; - /** - * the directives for this object type - * - * @default - no directives - */ - public readonly directives?: Directive[]; /** * The resolvers linked to this data source */ public resolvers?: Resolver[]; - public constructor(name: string, props: ObjectTypeProps) { + public constructor(name: string, props: ObjectTypeOptions) { const options = { definition: props.interfaceTypes?.reduce((def, interfaceType) => { return Object.assign({}, def, interfaceType.definition); }, props.definition) ?? props.definition, + directives: props.directives, }; super(name, options); this.interfaceTypes = props.interfaceTypes; - this.directives = props.directives; this.resolvers = []; Object.keys(this.definition).forEach((fieldName) => { @@ -148,7 +166,6 @@ export class ObjectType extends InterfaceType implements IIntermediateType { }); } - /** * Add a field to this Object Type. * @@ -173,8 +190,11 @@ export class ObjectType extends InterfaceType implements IIntermediateType { name: this.name, interfaceTypes: this.interfaceTypes, directives: this.directives, - fields: Object.keys(this.definition).map((key) => - `${key}${this.definition[key].argsToString()}: ${this.definition[key].toString()}`), + fields: Object.keys(this.definition).map((key) => { + const field = this.definition[key]; + return `${key}${field.argsToString()}: ${field.toString()}${field.directivesToString(this.modes)}`; + }), + modes: this.modes, }); } @@ -182,16 +202,15 @@ export class ObjectType extends InterfaceType implements IIntermediateType { * Generate the resolvers linked to this Object Type */ protected generateResolver(fieldName: string, options?: ResolvableFieldOptions): void { - if (options?.dataSource) { - if (!this.resolvers) { this.resolvers = []; } - this.resolvers.push(options.dataSource.createResolver({ - typeName: this.name, - fieldName: fieldName, - pipelineConfig: options.pipelineConfig, - requestMappingTemplate: options.requestMappingTemplate, - responseMappingTemplate: options.responseMappingTemplate, - })); - } + if (!options?.dataSource) return; + if (!this.resolvers) { this.resolvers = []; } + this.resolvers.push(options.dataSource.createResolver({ + typeName: this.name, + fieldName: fieldName, + pipelineConfig: options.pipelineConfig, + requestMappingTemplate: options.requestMappingTemplate, + responseMappingTemplate: options.responseMappingTemplate, + })); } } @@ -210,19 +229,20 @@ export class InputType implements IIntermediateType { * the attributes of this type */ public readonly definition: { [key: string]: IField }; + /** + * the authorization modes for this intermediate type + */ + protected modes?: AuthorizationType[]; - public constructor(name: string, props: IntermediateTypeProps) { + public constructor(name: string, props: IntermediateTypeOptions) { this.name = name; this.definition = props.definition; } /** - * Create an GraphQL Type representing this Input Type + * Create a GraphQL Type representing this Input Type * * @param options the options to configure this attribute - * - isList - * - isRequired - * - isRequiredList */ public attribute(options?: BaseTypeOptions): GraphqlType { return GraphqlType.intermediate({ @@ -242,6 +262,7 @@ export class InputType implements IIntermediateType { name: this.name, fields: Object.keys(this.definition).map((key) => `${key}${this.definition[key].argsToString()}: ${this.definition[key].toString()}`), + modes: this.modes, }); } @@ -258,4 +279,204 @@ export class InputType implements IIntermediateType { } this.definition[options.fieldName] = options.field; } + + /** + * Method called when the stringifying Intermediate Types for schema generation + * + * @internal + */ + public _bindToGraphqlApi(api: GraphqlApi): IIntermediateType { + this.modes = api.modes; + return this; + } +} + +/** + * Properties for configuring an Union Type + * + * @experimental + */ +export interface UnionTypeOptions { + /** + * the object types for this union type + */ + readonly definition: IIntermediateType[]; +} + +/** + * Union Types are abstract types that are similar to Interface Types, + * but they cannot to specify any common fields between types. + * + * Note that fields of a union type need to be object types. In other words, + * you can't create a union type out of interfaces, other unions, or inputs. + * + * @experimental + */ +export class UnionType implements IIntermediateType { + /** + * the name of this type + */ + public readonly name: string; + /** + * the attributes of this type + */ + public readonly definition: { [key: string]: IField }; + /** + * the authorization modes supported by this intermediate type + */ + protected modes?: AuthorizationType[]; + + public constructor(name: string, options: UnionTypeOptions) { + this.name = name; + this.definition = {}; + options.definition.map((def) => this.addField({ field: def.attribute() })); + } + + /** + * Create a GraphQL Type representing this Union Type + * + * @param options the options to configure this attribute + */ + public attribute(options?: BaseTypeOptions): GraphqlType { + return GraphqlType.intermediate({ + isList: options?.isList, + isRequired: options?.isRequired, + isRequiredList: options?.isRequiredList, + intermediateType: this, + }); + } + + /** + * Generate the string of this Union type + */ + public toString(): string { + // Return a string that appends all Object Types for this Union Type + // i.e. 'union Example = example1 | example2' + return Object.values(this.definition).reduce((acc, field) => + `${acc} ${field.toString()} |`, `union ${this.name} =`).slice(0, -2); + } + + /** + * Add a field to this Union Type + * + * Input Types must have field options and the IField must be an Object Type. + * + * @param options the options to add a field + */ + public addField(options: AddFieldOptions): void { + if (options.fieldName) { + throw new Error('Union Types cannot be configured with the fieldName option. Use the field option instead.'); + } + if (!options.field) { + throw new Error('Union Types must be configured with the field option.'); + } + if (options.field && !(options.field.intermediateType instanceof ObjectType)) { + throw new Error('Fields for Union Types must be Object Types.'); + } + this.definition[options.field?.toString() + 'id'] = options.field; + } + + /** + * Method called when the stringifying Intermediate Types for schema generation + * + * @internal + */ + public _bindToGraphqlApi(api: GraphqlApi): IIntermediateType { + this.modes = api.modes; + return this; + } +} + +/** + * Properties for configuring an Enum Type + * + * @experimental + */ +export interface EnumTypeOptions { + /** + * the attributes of this type + */ + readonly definition: string[]; +} + +/** + * Enum Types are abstract types that includes a set of fields + * that represent the strings this type can create. + * + * @experimental + */ +export class EnumType implements IIntermediateType { + /** + * the name of this type + */ + public readonly name: string; + /** + * the attributes of this type + */ + public readonly definition: { [key: string]: IField }; + /** + * the authorization modes for this intermediate type + */ + protected modes?: AuthorizationType[]; + + public constructor(name: string, options: EnumTypeOptions) { + this.name = name; + this.definition = {}; + options.definition.map((fieldName: string) => this.addField({ fieldName })); + } + + /** + * Create an GraphQL Type representing this Enum Type + */ + public attribute(options?: BaseTypeOptions): GraphqlType { + return GraphqlType.intermediate({ + isList: options?.isList, + isRequired: options?.isRequired, + isRequiredList: options?.isRequiredList, + intermediateType: this, + }); + } + + /** + * Generate the string of this enum type + */ + public toString(): string { + return shapeAddition({ + prefix: 'enum', + name: this.name, + fields: Object.keys(this.definition), + modes: this.modes, + }); + } + + /** + * Add a field to this Enum Type + * + * To add a field to this Enum Type, you must only configure + * addField with the fieldName options. + * + * @param options the options to add a field + */ + public addField(options: AddFieldOptions): void { + if (options.field) { + throw new Error('Enum Type fields consist of strings. Use the fieldName option instead of the field option.'); + } + if (!options.fieldName) { + throw new Error('When adding a field to an Enum Type, you must configure the fieldName option.'); + } + if (options.fieldName.indexOf(' ') > -1) { + throw new Error(`Enum Type values cannot have whitespace. Received: ${options.fieldName}`); + } + this.definition[options.fieldName] = GraphqlType.string(); + } + + /** + * Method called when the stringifying Intermediate Types for schema generation + * + * @internal + */ + public _bindToGraphqlApi(api: GraphqlApi): IIntermediateType { + this.modes = api.modes; + return this; + } } diff --git a/packages/@aws-cdk/aws-appsync/lib/schema.ts b/packages/@aws-cdk/aws-appsync/lib/schema.ts index 9f1a2e9d2988a..ec5bbfa14241d 100644 --- a/packages/@aws-cdk/aws-appsync/lib/schema.ts +++ b/packages/@aws-cdk/aws-appsync/lib/schema.ts @@ -56,6 +56,8 @@ export class Schema { private mode: SchemaMode; + private types: IIntermediateType[]; + public constructor(options?: SchemaOptions) { if (options?.filePath) { this.mode = SchemaMode.FILE; @@ -64,6 +66,7 @@ export class Schema { this.mode = SchemaMode.CODE; this.definition = ''; } + this.types = []; } /** @@ -76,7 +79,12 @@ export class Schema { if (!this.schema) { this.schema = new CfnGraphQLSchema(api, 'Schema', { apiId: api.apiId, - definition: Lazy.stringValue({ produce: () => `${this.declareSchema()}${this.definition}` }), + definition: this.mode === SchemaMode.CODE ? + Lazy.stringValue({ + produce: () => this.types.reduce((acc, type) => { return `${acc}${type._bindToGraphqlApi(api).toString()}\n`; }, + `${this.declareSchema()}${this.definition}`), + }) + : this.definition, }); } return this.schema; @@ -101,8 +109,8 @@ export class Schema { } /** - * Add a query field to the schema's Query. If one isn't set by - * the user, CDK will create an Object Type called 'Query'. For example, + * Add a query field to the schema's Query. CDK will create an + * Object Type called 'Query'. For example, * * type Query { * fieldName: Field.returnType @@ -113,7 +121,7 @@ export class Schema { */ public addQuery(fieldName: string, field: ResolvableField): ObjectType { if (this.mode !== SchemaMode.CODE) { - throw new Error(`Unable to add query. Schema definition mode must be ${SchemaMode.CODE} Received: ${this.mode}`); + throw new Error(`Unable to add query. Schema definition mode must be ${SchemaMode.CODE}. Received: ${this.mode}`); } if (!this.query) { this.query = new ObjectType('Query', { definition: {} }); @@ -124,8 +132,8 @@ export class Schema { } /** - * Add a mutation field to the schema's Mutation. If one isn't set by - * the user, CDK will create an Object Type called 'Mutation'. For example, + * Add a mutation field to the schema's Mutation. CDK will create an + * Object Type called 'Mutation'. For example, * * type Mutation { * fieldName: Field.returnType @@ -136,7 +144,7 @@ export class Schema { */ public addMutation(fieldName: string, field: ResolvableField): ObjectType { if (this.mode !== SchemaMode.CODE) { - throw new Error(`Unable to add mutation. Schema definition mode must be ${SchemaMode.CODE} Received: ${this.mode}`); + throw new Error(`Unable to add mutation. Schema definition mode must be ${SchemaMode.CODE}. Received: ${this.mode}`); } if (!this.mutation) { this.mutation = new ObjectType('Mutation', { definition: {} }); @@ -146,6 +154,33 @@ export class Schema { return this.mutation; } + /** + * Add a subscription field to the schema's Subscription. CDK will create an + * Object Type called 'Subscription'. For example, + * + * type Subscription { + * fieldName: Field.returnType + * } + * + * @param fieldName the name of the Subscription + * @param field the resolvable field to for this Subscription + */ + public addSubscription(fieldName: string, field: ResolvableField): ObjectType { + if (this.mode !== SchemaMode.CODE) { + throw new Error(`Unable to add subscription. Schema definition mode must be ${SchemaMode.CODE}. Received: ${this.mode}`); + } + if (!this.subscription) { + this.subscription = new ObjectType('Subscription', { definition: {} }); + this.addType(this.subscription); + } + const directives = field.fieldOptions?.directives?.filter((directive) => directive.mutationFields); + if (directives && directives.length > 1) { + throw new Error(`Subscription fields must not have more than one @aws_subscribe directives. Received: ${directives.length}`); + } + this.subscription.addField({ fieldName, field }); + return this.subscription; + } + /** * Add type to the schema * @@ -157,7 +192,7 @@ export class Schema { if (this.mode !== SchemaMode.CODE) { throw new Error('API cannot add type because schema definition mode is not configured as CODE.'); } - this.addToSchema(Lazy.stringValue({ produce: () => type.toString() })); + this.types.push(type); return type; } diff --git a/packages/@aws-cdk/aws-appsync/test/appsync-auth.test.ts b/packages/@aws-cdk/aws-appsync/test/appsync-auth.test.ts index 5815908198feb..fbac8fcd07350 100644 --- a/packages/@aws-cdk/aws-appsync/test/appsync-auth.test.ts +++ b/packages/@aws-cdk/aws-appsync/test/appsync-auth.test.ts @@ -88,6 +88,71 @@ describe('AppSync API Key Authorization', () => { }); }); + test('apiKeyConfig creates default with valid expiration date', () => { + const expirationDate: number = cdk.Expiration.after(cdk.Duration.days(10)).toEpoch(); + + // WHEN + new appsync.GraphqlApi(stack, 'API', { + name: 'apiKeyUnitTest', + schema: appsync.Schema.fromAsset(path.join(__dirname, 'appsync.auth.graphql')), + authorizationConfig: { + defaultAuthorization: { + authorizationType: appsync.AuthorizationType.API_KEY, + apiKeyConfig: { + expires: cdk.Expiration.after(cdk.Duration.days(10)), + }, + }, + }, + }); + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::ApiKey', { + ApiId: { 'Fn::GetAtt': ['API62EA1CFF', 'ApiId'] }, + Expires: expirationDate, + }); + }); + + test('apiKeyConfig fails if expire argument less than a day', () => { + // WHEN + const when = () => { + new appsync.GraphqlApi(stack, 'API', { + name: 'apiKeyUnitTest', + schema: appsync.Schema.fromAsset(path.join(__dirname, 'appsync.auth.graphql')), + authorizationConfig: { + defaultAuthorization: { + authorizationType: appsync.AuthorizationType.API_KEY, + apiKeyConfig: { + expires: cdk.Expiration.after(cdk.Duration.hours(1)), + }, + }, + }, + }); + }; + + // THEN + expect(when).toThrowError('API key expiration must be between 1 and 365 days.'); + }); + + test('apiKeyConfig fails if expire argument greater than 365 day', () => { + // WHEN + const when = () => { + new appsync.GraphqlApi(stack, 'API', { + name: 'apiKeyUnitTest', + schema: appsync.Schema.fromAsset(path.join(__dirname, 'appsync.auth.graphql')), + authorizationConfig: { + defaultAuthorization: { + authorizationType: appsync.AuthorizationType.API_KEY, + apiKeyConfig: { + expires: cdk.Expiration.after(cdk.Duration.days(366)), + }, + }, + }, + }); + }; + + // THEN + expect(when).toThrowError('API key expiration must be between 1 and 365 days.'); + }); + test('appsync creates configured api key with additionalAuthorizationModes (not as first element)', () => { // WHEN new appsync.GraphqlApi(stack, 'api', { diff --git a/packages/@aws-cdk/aws-appsync/test/appsync-directives.test.ts b/packages/@aws-cdk/aws-appsync/test/appsync-directives.test.ts new file mode 100644 index 0000000000000..3c47e0b63cf6d --- /dev/null +++ b/packages/@aws-cdk/aws-appsync/test/appsync-directives.test.ts @@ -0,0 +1,140 @@ +import '@aws-cdk/assert/jest'; +import * as cdk from '@aws-cdk/core'; +import * as cognito from '@aws-cdk/aws-cognito'; +import * as appsync from '../lib'; +import * as t from './scalar-type-defintions'; + +const iam = [appsync.Directive.iam()]; +const apiKey = [appsync.Directive.apiKey()]; +const oidc = [appsync.Directive.oidc()]; +const cognito_default = [appsync.Directive.cognito('test', 'test2')]; +const cognito_additional = [appsync.Directive.cognito('test', 'test2')]; +const custom = [appsync.Directive.custom('custom')]; + +const generateField = (directives: appsync.Directive[]): appsync.Field => { + return new appsync.Field({ + returnType: t.string, + directives, + }); +}; + +const generateRField = (directives: appsync.Directive[]): appsync.ResolvableField => { + return new appsync.ResolvableField({ + returnType: t.string, + directives, + }); +}; + +let stack: cdk.Stack; + +let api_apiKey: appsync.GraphqlApi, api_iam: appsync.GraphqlApi, api_oidc: appsync.GraphqlApi, + api_auth: appsync.GraphqlApi, api_cognito: appsync.GraphqlApi; +beforeEach(() => { + // GIVEN + stack = new cdk.Stack(); + const userPool = new cognito.UserPool(stack, 'userpool'); + api_apiKey = new appsync.GraphqlApi(stack, 'api_apiKey', { + name: 'api', + }); + api_iam = new appsync.GraphqlApi(stack, 'api_iam', { + name: 'api', + authorizationConfig: { + defaultAuthorization: { + authorizationType: appsync.AuthorizationType.IAM, + }, + }, + }); + api_oidc = new appsync.GraphqlApi(stack, 'api_oidc', { + name: 'api', + authorizationConfig: { + defaultAuthorization: { + authorizationType: appsync.AuthorizationType.OIDC, + openIdConnectConfig: { oidcProvider: 'test' }, + }, + }, + }); + api_auth = new appsync.GraphqlApi(stack, 'api_cognito_default', { + name: 'api', + authorizationConfig: { + defaultAuthorization: { + authorizationType: appsync.AuthorizationType.USER_POOL, + userPoolConfig: { userPool }, + }, + }, + }); + api_cognito = new appsync.GraphqlApi(stack, 'api_cognito_additional', { + name: 'api', + authorizationConfig: { + additionalAuthorizationModes: [ + { + authorizationType: appsync.AuthorizationType.USER_POOL, + userPoolConfig: { userPool }, + }, + ], + }, + }); +}); + +const testObjectType = (IApi: appsync.GraphqlApi, directives: appsync.Directive[], tag: string): any => { + // WHEN + IApi.addType(new appsync.ObjectType('Test', { + definition: { + field: generateField(directives), + rfield: generateRField(directives), + }, + directives: directives, + })); + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: `type Test ${tag} {\n field: String\n ${tag}\n rfield: String\n ${tag}\n}\n`, + }); +}; + +const testInterfaceType = (IApi: appsync.GraphqlApi, directives: appsync.Directive[], tag: string): any => { + // WHEN + IApi.addType(new appsync.InterfaceType('Test', { + definition: { + field: generateField(directives), + rfield: generateRField(directives), + }, + directives: directives, + })); + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: `interface Test ${tag} {\n field: String\n ${tag}\n rfield: String\n ${tag}\n}\n`, + }); +}; + +describe('Basic Testing of Directives for Code-First', () => { + test('Iam directive configures in Object Type', () => { testObjectType(api_iam, iam, '@aws_iam'); }); + + test('Iam directive configures in Interface Type', () => { testInterfaceType(api_iam, iam, '@aws_iam'); }); + + test('Api Key directive configures in Object Type', () => { testObjectType(api_apiKey, apiKey, '@aws_api_key'); }); + + test('Api Key directive configures in Interface Type', () => { testInterfaceType(api_apiKey, apiKey, '@aws_api_key'); }); + + test('OIDC directive configures in Object Type', () => { testObjectType(api_oidc, oidc, '@aws_oidc'); }); + + test('OIDC directive configures in Interface Type', () => { testInterfaceType(api_oidc, oidc, '@aws_oidc'); }); + + test('Cognito as default directive configures in Object Type', () => { + testObjectType(api_auth, cognito_default, '@aws_auth(cognito_groups: ["test", "test2"])'); + }); + + test('Cognito as default directive configures in Interface Type', () => { + testInterfaceType(api_auth, cognito_default, '@aws_auth(cognito_groups: ["test", "test2"])'); + }); + + test('Cognito as additional directive configures in Object Type', () => { + testObjectType(api_cognito, cognito_additional, '@aws_cognito_user_pools(cognito_groups: ["test", "test2"])'); + }); + + test('Custom directive configures in Object Type', () => { + testObjectType(api_cognito, custom, 'custom'); + }); + + test('Custom directive configures in Interface Type', () => { + testInterfaceType(api_cognito, custom, 'custom'); + }); +}); \ No newline at end of file diff --git a/packages/@aws-cdk/aws-appsync/test/appsync-enum-type.test.ts b/packages/@aws-cdk/aws-appsync/test/appsync-enum-type.test.ts new file mode 100644 index 0000000000000..b937a0e12217d --- /dev/null +++ b/packages/@aws-cdk/aws-appsync/test/appsync-enum-type.test.ts @@ -0,0 +1,106 @@ +import '@aws-cdk/assert/jest'; +import * as cdk from '@aws-cdk/core'; +import * as appsync from '../lib'; +import * as t from './scalar-type-defintions'; + +const out = 'enum Test {\n test1\n test2\n test3\n}\n'; +let stack: cdk.Stack; +let api: appsync.GraphqlApi; +beforeEach(() => { + // GIVEN + stack = new cdk.Stack(); + api = new appsync.GraphqlApi(stack, 'api', { + name: 'api', + }); +}); + +describe('testing Enum Type properties', () => { + test('EnumType configures properly', () => { + // WHEN + const test = new appsync.EnumType('Test', { + definition: ['test1', 'test2', 'test3'], + }); + api.addType(test); + + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: `${out}`, + }); + expect(stack).not.toHaveResource('AWS::AppSync::Resolver'); + }); + + test('EnumType can addField', () => { + // WHEN + const test = new appsync.EnumType('Test', { + definition: ['test1', 'test2'], + }); + api.addType(test); + test.addField({ fieldName: 'test3' }); + + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: `${out}`, + }); + }); + + test('EnumType can be a GraphqlType', () => { + // WHEN + const test = new appsync.EnumType('Test', { + definition: ['test1', 'test2', 'test3'], + }); + api.addType(test); + + api.addType(new appsync.ObjectType('Test2', { + definition: { enum: test.attribute() }, + })); + + const obj = 'type Test2 {\n enum: Test\n}\n'; + + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: `${out}${obj}`, + }); + }); + + test('errors when enum type is configured with white space', () => { + // THEN + expect(() => { + new appsync.EnumType('Test', { + definition: ['test 1', 'test2', 'test3'], + }); + }).toThrowError('Enum Type values cannot have whitespace. Received: test 1'); + }); + + test('errors when the fieldName in addField has white space', () => { + // WHEN + const test = new appsync.EnumType('Test', { + definition: [], + }); + // THEN + expect(() => { + test.addField({ fieldName: ' ' }); + }).toThrowError('Enum Type values cannot have whitespace. Received: '); + }); + + test('errors when enum type is configured with field options', () => { + // WHEN + const test = new appsync.EnumType('Test', { + definition: [], + }); + // THEN + expect(() => { + test.addField({ fieldName: 'test', field: t.string }); + }).toThrowError('Enum Type fields consist of strings. Use the fieldName option instead of the field option.'); + }); + + test('errors when enum type is missing fieldName option', () => { + // WHEN + const test = new appsync.EnumType('Test', { + definition: [], + }); + // THEN + expect(() => { + test.addField({}); + }).toThrowError('When adding a field to an Enum Type, you must configure the fieldName option.'); + }); +}); \ No newline at end of file diff --git a/packages/@aws-cdk/aws-appsync/test/appsync-http.test.ts b/packages/@aws-cdk/aws-appsync/test/appsync-http.test.ts index 6bc237e0f5c71..fbc9b7bc85c33 100644 --- a/packages/@aws-cdk/aws-appsync/test/appsync-http.test.ts +++ b/packages/@aws-cdk/aws-appsync/test/appsync-http.test.ts @@ -57,6 +57,35 @@ describe('Http Data Source configuration', () => { }); }); + test('appsync configures name, authorizationConfig correctly', () => { + // WHEN + api.addHttpDataSource('ds', endpoint, { + name: 'custom', + description: 'custom description', + authorizationConfig: { + signingRegion: 'us-east-1', + signingServiceName: 'states', + }, + }); + + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::DataSource', { + Type: 'HTTP', + Name: 'custom', + Description: 'custom description', + HttpConfig: { + Endpoint: endpoint, + AuthorizationConfig: { + AuthorizationType: 'AWS_IAM', + AwsIamConfig: { + SigningRegion: 'us-east-1', + SigningServiceName: 'states', + }, + }, + }, + }); + }); + test('appsync errors when creating multiple http data sources with no configuration', () => { // THEN expect(() => { @@ -97,4 +126,3 @@ describe('adding http data source from imported api', () => { }); }); - diff --git a/packages/@aws-cdk/aws-appsync/test/appsync-interface-type.test.ts b/packages/@aws-cdk/aws-appsync/test/appsync-interface-type.test.ts index 15525bdafe098..89a578fc0bd0b 100644 --- a/packages/@aws-cdk/aws-appsync/test/appsync-interface-type.test.ts +++ b/packages/@aws-cdk/aws-appsync/test/appsync-interface-type.test.ts @@ -89,6 +89,56 @@ describe('testing InterfaceType properties', () => { }); }); + test('Interface Type can generate Fields with Directives', () => { + // WHEN + const test = new appsync.InterfaceType('Test', { + definition: { + test: t.string, + }, + }); + test.addField({ + fieldName: 'resolve', + field: new appsync.Field({ + returnType: t.string, + directives: [appsync.Directive.apiKey()], + }), + }); + + api.addType(test); + const out = 'interface Test {\n test: String\n resolve: String\n @aws_api_key\n}\n'; + + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: `${out}`, + }); + }); + + test('Interface Type can generate ResolvableFields with Directives, but not the resolver', () => { + // WHEN + const test = new appsync.InterfaceType('Test', { + definition: { + test: t.string, + }, + }); + test.addField({ + fieldName: 'resolve', + field: new appsync.ResolvableField({ + returnType: t.string, + directives: [appsync.Directive.apiKey()], + dataSource: api.addNoneDataSource('none'), + }), + }); + + api.addType(test); + const out = 'interface Test {\n test: String\n resolve: String\n @aws_api_key\n}\n'; + + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: `${out}`, + }); + expect(stack).not.toHaveResource('AWS::AppSync::Resolver'); + }); + test('appsync fails addField with InterfaceType missing fieldName', () => { // WHEN const test = new appsync.InterfaceType('Test', { definition: {} }); diff --git a/packages/@aws-cdk/aws-appsync/test/appsync-object-type.test.ts b/packages/@aws-cdk/aws-appsync/test/appsync-object-type.test.ts index 9f18d68e4753d..77da20a58ca4c 100644 --- a/packages/@aws-cdk/aws-appsync/test/appsync-object-type.test.ts +++ b/packages/@aws-cdk/aws-appsync/test/appsync-object-type.test.ts @@ -29,8 +29,8 @@ describe('testing Object Type properties', () => { directives: [appsync.Directive.custom('@test')], }); - api.addToSchema(baseTest.toString()); - api.addToSchema(objectTest.toString()); + api.addType(baseTest); + api.addType(objectTest); const gql_interface = 'interface baseTest {\n id: ID\n}\n'; const gql_object = 'type objectTest implements baseTest @test {\n id2: ID\n id: ID\n}\n'; const out = `${gql_interface}${gql_object}`; @@ -56,9 +56,9 @@ describe('testing Object Type properties', () => { }, }); - api.addToSchema(baseTest.toString()); - api.addToSchema(anotherTest.toString()); - api.addToSchema(objectTest.toString()); + api.addType(baseTest); + api.addType(anotherTest); + api.addType(objectTest); const gql_interface = 'interface baseTest {\n id: ID\n}\ninterface anotherTest {\n id2: ID\n}\n'; const gql_object = 'type objectTest implements anotherTest, baseTest {\n id3: ID\n id2: ID\n id: ID\n}\n'; @@ -83,7 +83,7 @@ describe('testing Object Type properties', () => { test: graphqlType, }, }); - api.addToSchema(test.toString()); + api.addType(test); const out = 'type Test {\n test: baseTest\n}\n'; // THEN @@ -107,7 +107,7 @@ describe('testing Object Type properties', () => { resolve: field, }, }); - api.addToSchema(test.toString()); + api.addType(test); const out = 'type Test {\n test: String\n resolve(arg: Int): String\n}\n'; // THEN @@ -131,7 +131,7 @@ describe('testing Object Type properties', () => { resolve: field, }, }); - api.addToSchema(test.toString()); + api.addType(test); const out = 'type Test {\n test: String\n resolve(arg: Int): String\n}\n'; // THEN @@ -154,7 +154,7 @@ describe('testing Object Type properties', () => { }), }, }); - api.addToSchema(test.toString()); + api.addType(test); // THEN expect(stack).toHaveResourceLike('AWS::AppSync::Resolver', { @@ -168,9 +168,7 @@ describe('testing Object Type properties', () => { const field = new appsync.ResolvableField({ returnType: t.string, dataSource: api.addNoneDataSource('none'), - args: { - arg: t.int, - }, + args: { arg: t.int }, }); const test = new appsync.ObjectType('Test', { definition: { @@ -181,7 +179,7 @@ describe('testing Object Type properties', () => { // test.addField('resolve', field); test.addField({ fieldName: 'dynamic', field: t.string }); - api.addToSchema(test.toString()); + api.addType(test); const out = 'type Test {\n test: String\n resolve(arg: Int): String\n dynamic: String\n}\n'; // THEN @@ -191,31 +189,49 @@ describe('testing Object Type properties', () => { expect(stack).toHaveResource('AWS::AppSync::Resolver'); }); - test('Object Type can dynamically add Fields', () => { + test('Object Type can generate Fields with Directives', () => { // WHEN - const garbage = new appsync.InterfaceType('Garbage', { + const test = new appsync.ObjectType('Test', { definition: { - garbage: t.string, + test: t.string, }, }); + test.addField({ + fieldName: 'resolve', + field: new appsync.Field({ + returnType: t.string, + directives: [appsync.Directive.apiKey()], + }), + }); + + api.addType(test); + const out = 'type Test {\n test: String\n resolve: String\n @aws_api_key\n}\n'; + + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: `${out}`, + }); + }); + + test('Object Type can generate ResolvableFields with Directives', () => { + // WHEN const test = new appsync.ObjectType('Test', { definition: { test: t.string, }, }); const field = new appsync.ResolvableField({ - returnType: garbage.attribute(), + returnType: t.string, + directives: [appsync.Directive.apiKey()], dataSource: api.addNoneDataSource('none'), args: { - arg: garbage.attribute(), + arg: t.string, }, }); test.addField({ fieldName: 'resolve', field }); - // test.addField('resolve', field); - test.addField({ fieldName: 'dynamic', field: garbage.attribute() }); - api.addToSchema(test.toString()); - const out = 'type Test {\n test: String\n resolve(arg: Garbage): Garbage\n dynamic: Garbage\n}\n'; + api.addType(test); + const out = 'type Test {\n test: String\n resolve(arg: String): String\n @aws_api_key\n}\n'; // THEN expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { diff --git a/packages/@aws-cdk/aws-appsync/test/appsync-schema.test.ts b/packages/@aws-cdk/aws-appsync/test/appsync-schema.test.ts index 95fe1ac7c500a..fafc4e20c2e8a 100644 --- a/packages/@aws-cdk/aws-appsync/test/appsync-schema.test.ts +++ b/packages/@aws-cdk/aws-appsync/test/appsync-schema.test.ts @@ -124,6 +124,56 @@ describe('basic testing schema definition mode `code`', () => { Definition: 'schema {\n mutation: Mutation\n}\ntype Mutation {\n test: String\n}\n', }); }); + + test('definition mode `code` allows for api to addSubscription', () => { + // WHEN + const api = new appsync.GraphqlApi(stack, 'API', { + name: 'demo', + }); + api.addSubscription('test', new appsync.ResolvableField({ + returnType: t.string, + })); + + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: 'schema {\n subscription: Subscription\n}\ntype Subscription {\n test: String\n}\n', + }); + }); + + test('definition mode `code` allows for schema to addSubscription', () => { + // WHEN + const schema = new appsync.Schema(); + new appsync.GraphqlApi(stack, 'API', { + name: 'demo', + schema, + }); + schema.addSubscription('test', new appsync.ResolvableField({ + returnType: t.string, + })); + + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: 'schema {\n subscription: Subscription\n}\ntype Subscription {\n test: String\n}\n', + }); + }); + + test('definition mode `code` addSubscription w/ @aws_subscribe', () => { + // WHE + const api = new appsync.GraphqlApi(stack, 'API', { + name: 'demo', + }); + api.addSubscription('test', new appsync.ResolvableField({ + returnType: t.string, + directives: [appsync.Directive.subscribe('test1')], + })); + + const out = 'schema {\n subscription: Subscription\n}\ntype Subscription {\n test: String\n @aws_subscribe(mutations: ["test1"])\n}\n'; + + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: out, + }); + }); }); describe('testing schema definition mode `file`', () => { @@ -194,7 +244,7 @@ describe('testing schema definition mode `file`', () => { // THEN expect(() => { api.addQuery('blah', new appsync.ResolvableField({ returnType: t.string })); - }).toThrowError('Unable to add query. Schema definition mode must be CODE Received: FILE'); + }).toThrowError('Unable to add query. Schema definition mode must be CODE. Received: FILE'); }); test('definition mode `file` errors when addMutation is called', () => { @@ -207,6 +257,19 @@ describe('testing schema definition mode `file`', () => { // THEN expect(() => { api.addMutation('blah', new appsync.ResolvableField({ returnType: t.string })); - }).toThrowError('Unable to add mutation. Schema definition mode must be CODE Received: FILE'); + }).toThrowError('Unable to add mutation. Schema definition mode must be CODE. Received: FILE'); + }); + + test('definition mode `file` errors when addSubscription is called', () => { + // WHEN + const api = new appsync.GraphqlApi(stack, 'API', { + name: 'demo', + schema: appsync.Schema.fromAsset(join(__dirname, 'appsync.test.graphql')), + }); + + // THEN + expect(() => { + api.addSubscription('blah', new appsync.ResolvableField({ returnType: t.string })); + }).toThrowError('Unable to add subscription. Schema definition mode must be CODE. Received: FILE'); }); }); \ No newline at end of file diff --git a/packages/@aws-cdk/aws-appsync/test/appsync-union-types.test.ts b/packages/@aws-cdk/aws-appsync/test/appsync-union-types.test.ts new file mode 100644 index 0000000000000..7cf14b9c2f870 --- /dev/null +++ b/packages/@aws-cdk/aws-appsync/test/appsync-union-types.test.ts @@ -0,0 +1,152 @@ +import '@aws-cdk/assert/jest'; +import * as cdk from '@aws-cdk/core'; +import * as appsync from '../lib'; +import * as t from './scalar-type-defintions'; + +const out = 'type Test1 {\n test1: String\n}\ntype Test2 {\n test2: String\n}\nunion UnionTest = Test1 | Test2\n'; +const test1 = new appsync.ObjectType('Test1', { + definition: { test1: t.string }, +}); +const test2 = new appsync.ObjectType('Test2', { + definition: { test2: t.string }, +}); +let stack: cdk.Stack; +let api: appsync.GraphqlApi; +beforeEach(() => { + // GIVEN + stack = new cdk.Stack(); + api = new appsync.GraphqlApi(stack, 'api', { + name: 'api', + }); + api.addType(test1); + api.addType(test2); +}); + +describe('testing Union Type properties', () => { + test('UnionType configures properly', () => { + // WHEN + const union = new appsync.UnionType('UnionTest', { + definition: [test1, test2], + }); + api.addType(union); + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: `${out}`, + }); + expect(stack).not.toHaveResource('AWS::AppSync::Resolver'); + }); + + test('UnionType can addField', () => { + // WHEN + const union = new appsync.UnionType('UnionTest', { + definition: [test1], + }); + api.addType(union); + union.addField({ field: test2.attribute() }); + + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: `${out}`, + }); + }); + + test('UnionType errors when addField is configured with fieldName option', () => { + // WHEN + const union = new appsync.UnionType('UnionTest', { + definition: [test1], + }); + api.addType(union); + + // THEN + expect(() => { + union.addField({ fieldName: 'fail', field: test2.attribute() }); + }).toThrowError('Union Types cannot be configured with the fieldName option. Use the field option instead.'); + }); + + test('UnionType errors when addField is not configured with field option', () => { + // WHEN + const union = new appsync.UnionType('UnionTest', { + definition: [test1], + }); + api.addType(union); + + // THEN + expect(() => { + union.addField({}); + }).toThrowError('Union Types must be configured with the field option.'); + }); + + test('UnionType can be a GraphqlType', () => { + // WHEN + const union = new appsync.UnionType('UnionTest', { + definition: [test1, test2], + }); + api.addType(union); + + api.addType(new appsync.ObjectType('Test2', { + definition: { union: union.attribute() }, + })); + + const obj = 'type Test2 {\n union: UnionTest\n}\n'; + + // THEN + expect(stack).toHaveResourceLike('AWS::AppSync::GraphQLSchema', { + Definition: `${out}${obj}`, + }); + }); + + test('appsync errors when addField with Graphql Types', () => { + // WHEN + const test = new appsync.UnionType('Test', { + definition: [], + }); + // THEN + expect(() => { + test.addField({ field: t.string }); + }).toThrowError('Fields for Union Types must be Object Types.'); + }); + + test('appsync errors when addField with Field', () => { + // WHEN + const test = new appsync.UnionType('Test', { + definition: [], + }); + // THEN + expect(() => { + test.addField({ field: new appsync.Field({ returnType: t.string }) }); + }).toThrowError('Fields for Union Types must be Object Types.'); + }); + + test('appsync errors when addField with ResolvableField', () => { + // WHEN + const test = new appsync.UnionType('Test', { + definition: [], + }); + // THEN + expect(() => { + test.addField({ field: new appsync.ResolvableField({ returnType: t.string }) }); + }).toThrowError('Fields for Union Types must be Object Types.'); + }); + + test('appsync errors when addField with Interface Types', () => { + // WHEN + const test = new appsync.UnionType('Test', { + definition: [], + }); + // THEN + expect(() => { + test.addField({ field: new appsync.InterfaceType('break', { definition: {} }).attribute() }); + }).toThrowError('Fields for Union Types must be Object Types.'); + }); + + test('appsync errors when addField with Union Types', () => { + // WHEN + const test = new appsync.UnionType('Test', { + definition: [], + }); + // THEN + expect(() => { + test.addField({ field: test.attribute() }); + }).toThrowError('Fields for Union Types must be Object Types.'); + }); +}); \ No newline at end of file diff --git a/packages/@aws-cdk/aws-appsync/test/appsync.auth.graphql b/packages/@aws-cdk/aws-appsync/test/appsync.auth.graphql new file mode 100644 index 0000000000000..e59469dedfd10 --- /dev/null +++ b/packages/@aws-cdk/aws-appsync/test/appsync.auth.graphql @@ -0,0 +1,12 @@ +type test { + id: Int! + version: String! +} + +type Query { + getTests: [ test! ] +} + +type Mutation { + addTest(version: String!): test! +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-appsync/test/integ.auth-apikey.expected.json b/packages/@aws-cdk/aws-appsync/test/integ.auth-apikey.expected.json new file mode 100644 index 0000000000000..17fec6fbed787 --- /dev/null +++ b/packages/@aws-cdk/aws-appsync/test/integ.auth-apikey.expected.json @@ -0,0 +1,186 @@ +{ + "Resources": { + "ApiF70053CD": { + "Type": "AWS::AppSync::GraphQLApi", + "Properties": { + "AuthenticationType": "API_KEY", + "Name": "Integ_Test_APIKey" + } + }, + "ApiSchema510EECD7": { + "Type": "AWS::AppSync::GraphQLSchema", + "Properties": { + "ApiId": { + "Fn::GetAtt": [ + "ApiF70053CD", + "ApiId" + ] + }, + "Definition": "type test {\n id: Int!\n version: String!\n}\n\ntype Query {\n getTests: [ test! ]\n}\n\ntype Mutation {\n addTest(version: String!): test!\n}" + } + }, + "ApiDefaultApiKeyF991C37B": { + "Type": "AWS::AppSync::ApiKey", + "Properties": { + "ApiId": { + "Fn::GetAtt": [ + "ApiF70053CD", + "ApiId" + ] + }, + "Expires": 1626566400 + }, + "DependsOn": [ + "ApiSchema510EECD7" + ] + }, + "ApitestDataSourceServiceRoleACBC3F3D": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "appsync.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + } + } + }, + "ApitestDataSourceServiceRoleDefaultPolicy897CD912": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "dynamodb:BatchGetItem", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:Query", + "dynamodb:GetItem", + "dynamodb:Scan", + "dynamodb:BatchWriteItem", + "dynamodb:PutItem", + "dynamodb:UpdateItem", + "dynamodb:DeleteItem" + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "TestTable5769773A", + "Arn" + ] + }, + { + "Ref": "AWS::NoValue" + } + ] + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "ApitestDataSourceServiceRoleDefaultPolicy897CD912", + "Roles": [ + { + "Ref": "ApitestDataSourceServiceRoleACBC3F3D" + } + ] + } + }, + "ApitestDataSource96AE54D5": { + "Type": "AWS::AppSync::DataSource", + "Properties": { + "ApiId": { + "Fn::GetAtt": [ + "ApiF70053CD", + "ApiId" + ] + }, + "Name": "testDataSource", + "Type": "AMAZON_DYNAMODB", + "DynamoDBConfig": { + "AwsRegion": { + "Ref": "AWS::Region" + }, + "TableName": { + "Ref": "TestTable5769773A" + } + }, + "ServiceRoleArn": { + "Fn::GetAtt": [ + "ApitestDataSourceServiceRoleACBC3F3D", + "Arn" + ] + } + } + }, + "ApitestDataSourceQuerygetTestsResolverA3BBB672": { + "Type": "AWS::AppSync::Resolver", + "Properties": { + "ApiId": { + "Fn::GetAtt": [ + "ApiF70053CD", + "ApiId" + ] + }, + "FieldName": "getTests", + "TypeName": "Query", + "DataSourceName": "testDataSource", + "Kind": "UNIT", + "RequestMappingTemplate": "{\"version\" : \"2017-02-28\", \"operation\" : \"Scan\"}", + "ResponseMappingTemplate": "$util.toJson($ctx.result.items)" + }, + "DependsOn": [ + "ApiSchema510EECD7", + "ApitestDataSource96AE54D5" + ] + }, + "ApitestDataSourceMutationaddTestResolver36203D6B": { + "Type": "AWS::AppSync::Resolver", + "Properties": { + "ApiId": { + "Fn::GetAtt": [ + "ApiF70053CD", + "ApiId" + ] + }, + "FieldName": "addTest", + "TypeName": "Mutation", + "DataSourceName": "testDataSource", + "Kind": "UNIT", + "RequestMappingTemplate": "\n #set($input = $ctx.args.test)\n \n {\n \"version\": \"2017-02-28\",\n \"operation\": \"PutItem\",\n \"key\" : {\n \"id\" : $util.dynamodb.toDynamoDBJson($util.autoId())\n },\n \"attributeValues\": $util.dynamodb.toMapValuesJson($input)\n }", + "ResponseMappingTemplate": "$util.toJson($ctx.result)" + }, + "DependsOn": [ + "ApiSchema510EECD7", + "ApitestDataSource96AE54D5" + ] + }, + "TestTable5769773A": { + "Type": "AWS::DynamoDB::Table", + "Properties": { + "KeySchema": [ + { + "AttributeName": "id", + "KeyType": "HASH" + } + ], + "AttributeDefinitions": [ + { + "AttributeName": "id", + "AttributeType": "S" + } + ], + "BillingMode": "PAY_PER_REQUEST" + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + } + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-appsync/test/integ.auth-apikey.ts b/packages/@aws-cdk/aws-appsync/test/integ.auth-apikey.ts new file mode 100644 index 0000000000000..5ddcb9abd1dbb --- /dev/null +++ b/packages/@aws-cdk/aws-appsync/test/integ.auth-apikey.ts @@ -0,0 +1,63 @@ +import { join } from 'path'; +import { AttributeType, BillingMode, Table } from '@aws-cdk/aws-dynamodb'; +import { App, RemovalPolicy, Stack, Expiration } from '@aws-cdk/core'; +import { AuthorizationType, GraphqlApi, MappingTemplate, PrimaryKey, Schema, Values } from '../lib'; + +/* + * Creates an Appsync GraphQL API with API_KEY authorization. + * Testing for API_KEY Authorization. + * + * Stack verification steps: + * Deploy stack, get api-key and endpoint. + * Check if authorization occurs with empty get. + * + * -- bash verify.integ.auth-apikey.sh --start -- deploy stack -- + * -- aws appsync list-graphql-apis -- obtain api id && endpoint -- + * -- aws appsync list-api-keys --api-id [API ID] -- obtain api key -- + * -- bash verify.integ.auth-apikey.sh --check [APIKEY] [ENDPOINT] -- check if fails/success -- + * -- bash verify.integ.auth-apikey.sh --clean -- clean dependencies/stack -- + */ + +const app = new App(); +const stack = new Stack(app, 'aws-appsync-integ'); + +const api = new GraphqlApi(stack, 'Api', { + name: 'Integ_Test_APIKey', + schema: Schema.fromAsset(join(__dirname, 'appsync.auth.graphql')), + authorizationConfig: { + defaultAuthorization: { + authorizationType: AuthorizationType.API_KEY, + apiKeyConfig: { + // Generate a timestamp that's 365 days ahead, use atTimestamp so integ test doesn't fail + expires: Expiration.atTimestamp(1626566400000), + }, + }, + }, +}); + +const testTable = new Table(stack, 'TestTable', { + billingMode: BillingMode.PAY_PER_REQUEST, + partitionKey: { + name: 'id', + type: AttributeType.STRING, + }, + removalPolicy: RemovalPolicy.DESTROY, +}); + +const testDS = api.addDynamoDbDataSource('testDataSource', testTable); + +testDS.createResolver({ + typeName: 'Query', + fieldName: 'getTests', + requestMappingTemplate: MappingTemplate.dynamoDbScanTable(), + responseMappingTemplate: MappingTemplate.dynamoDbResultList(), +}); + +testDS.createResolver({ + typeName: 'Mutation', + fieldName: 'addTest', + requestMappingTemplate: MappingTemplate.dynamoDbPutItem(PrimaryKey.partition('id').auto(), Values.projecting('test')), + responseMappingTemplate: MappingTemplate.dynamoDbResultItem(), +}); + +app.synth(); \ No newline at end of file diff --git a/packages/@aws-cdk/aws-appsync/test/integ.graphql-schema.expected.json b/packages/@aws-cdk/aws-appsync/test/integ.graphql-schema.expected.json index a1902fb2ad368..1e49940a8f92a 100644 --- a/packages/@aws-cdk/aws-appsync/test/integ.graphql-schema.expected.json +++ b/packages/@aws-cdk/aws-appsync/test/integ.graphql-schema.expected.json @@ -16,7 +16,7 @@ "ApiId" ] }, - "Definition": "schema {\n query: Query\n mutation: Mutation\n}\ninterface Node {\n created: String\n edited: String\n id: ID!\n}\ntype Planet {\n name: String\n diameter: Int\n rotationPeriod: Int\n orbitalPeriod: Int\n gravity: String\n population: [String]\n climates: [String]\n terrains: [String]\n surfaceWater: Float\n created: String\n edited: String\n id: ID!\n}\ntype Species implements Node {\n name: String\n classification: String\n designation: String\n averageHeight: Float\n averageLifespan: Int\n eyeColors: [String]\n hairColors: [String]\n skinColors: [String]\n language: String\n homeworld: Planet\n created: String\n edited: String\n id: ID!\n}\ntype Query {\n getPlanets: [Planet]\n}\ntype Mutation {\n addPlanet(name: String diameter: Int rotationPeriod: Int orbitalPeriod: Int gravity: String population: [String] climates: [String] terrains: [String] surfaceWater: Float): Planet\n}\ninput input {\n awesomeInput: String\n}\n" + "Definition": "schema {\n query: Query\n mutation: Mutation\n subscription: Subscription\n}\ninterface Node {\n created: String\n edited: String\n id: ID!\n}\ntype Planet {\n name: String\n diameter: Int\n rotationPeriod: Int\n orbitalPeriod: Int\n gravity: String\n population: [String]\n climates: [String]\n terrains: [String]\n surfaceWater: Float\n created: String\n edited: String\n id: ID!\n}\ntype Species implements Node {\n name: String\n classification: String\n designation: String\n averageHeight: Float\n averageLifespan: Int\n eyeColors: [String]\n hairColors: [String]\n skinColors: [String]\n language: String\n homeworld: Planet\n created: String\n edited: String\n id: ID!\n}\ntype Query {\n getPlanets: [Planet]\n}\ntype Mutation {\n addPlanet(name: String diameter: Int rotationPeriod: Int orbitalPeriod: Int gravity: String population: [String] climates: [String] terrains: [String] surfaceWater: Float): Planet\n}\ntype Subscription {\n addedPlanets(id: ID!): Planet\n @aws_subscribe(mutations: [\"addPlanet\"])\n}\ninput AwesomeInput {\n awesomeInput: String\n}\nenum Episodes {\n The_Phantom_Menace\n Attack_of_the_Clones\n Revenge_of_the_Sith\n A_New_Hope\n The_Empire_Strikes_Back\n Return_of_the_Jedi\n The_Force_Awakens\n The_Last_Jedi\n The_Rise_of_Skywalker\n}\nunion Union = Species | Planet\n" } }, "codefirstapiDefaultApiKey89863A80": { diff --git a/packages/@aws-cdk/aws-appsync/test/integ.graphql-schema.ts b/packages/@aws-cdk/aws-appsync/test/integ.graphql-schema.ts index 870af80a51e06..4c02dd07f4a5a 100644 --- a/packages/@aws-cdk/aws-appsync/test/integ.graphql-schema.ts +++ b/packages/@aws-cdk/aws-appsync/test/integ.graphql-schema.ts @@ -21,13 +21,15 @@ const stack = new cdk.Stack(app, 'code-first-schema'); const schema = new appsync.Schema(); -const node = schema.addType(new appsync.InterfaceType('Node', { +const node = new appsync.InterfaceType('Node', { definition: { created: ScalarType.string, edited: ScalarType.string, id: ScalarType.required_id, }, -})); +}); + +schema.addType(node); const api = new appsync.GraphqlApi(stack, 'code-first-api', { name: 'api', @@ -44,9 +46,9 @@ const table = new db.Table(stack, 'table', { const tableDS = api.addDynamoDbDataSource('planets', table); const planet = ObjectType.planet; -schema.addToSchema(planet.toString()); +schema.addType(planet); -api.addType(new appsync.ObjectType('Species', { +const species = api.addType(new appsync.ObjectType('Species', { interfaceTypes: [node], definition: { name: ScalarType.string, @@ -101,8 +103,31 @@ api.addMutation('addPlanet', new appsync.ResolvableField({ responseMappingTemplate: appsync.MappingTemplate.dynamoDbResultItem(), })); -api.addType(new appsync.InputType('input', { +api.addSubscription('addedPlanets', new appsync.ResolvableField({ + returnType: planet.attribute(), + args: { id: ScalarType.required_id }, + directives: [appsync.Directive.subscribe('addPlanet')], +})); +api.addType(new appsync.InputType('AwesomeInput', { definition: { awesomeInput: ScalarType.string }, })); +api.addType(new appsync.EnumType('Episodes', { + definition: [ + 'The_Phantom_Menace', + 'Attack_of_the_Clones', + 'Revenge_of_the_Sith', + 'A_New_Hope', + 'The_Empire_Strikes_Back', + 'Return_of_the_Jedi', + 'The_Force_Awakens', + 'The_Last_Jedi', + 'The_Rise_of_Skywalker', + ], +})); + +api.addType(new appsync.UnionType('Union', { + definition: [species, planet], +})); + app.synth(); \ No newline at end of file diff --git a/packages/@aws-cdk/aws-appsync/test/verify.integ.auth-apikey.sh b/packages/@aws-cdk/aws-appsync/test/verify.integ.auth-apikey.sh new file mode 100644 index 0000000000000..2102f10d627e4 --- /dev/null +++ b/packages/@aws-cdk/aws-appsync/test/verify.integ.auth-apikey.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +function error { + printf "\e[91;5;81m$@\e[0m\n" +} + +function usage { + echo "###############################################################################" + echo "# run 'verify.integ.auth-apikey.sh --start' to deploy #" + echo "# run 'verify.integ.auth-apikey.sh --check [APIKEY] [ENDPOINT]' to run check #" + echo "# run 'verify.integ.auth-apikey.sh --clean' to clean up stack #" + echo "###############################################################################" +} + +if [[ "$1" == "--start" ]]; then + cdk deploy --app "node integ.auth-apikey.js" +elif [[ "$1" == "--check" ]]; then + if [[ -z $2 || -z $3 ]]; then + error "Error: --check flag requires [APIKEY] [ENDPOINT]" + usage + exit 1 + fi + echo THIS TEST SHOULD FAIL + curl -XPOST -H "Content-Type:application/graphql" -H "x-api-key:garbage" -d '{ "query": "query { getTests { id version } }" }" }' $3 + echo "" + echo "" + echo THIS TEST SHOULD SUCCEED + curl -XPOST -H "Content-Type:application/graphql" -H "x-api-key:$2" -d '{ "query": "query { getTests { id version } }" }" }' $3 + echo "" +elif [[ "$1" == "--clean" ]];then + cdk destroy --app "node integ.auth-apikey.js" +else + error "Error: use flags --start, --check, --clean" + usage + exit 1 +fi + diff --git a/packages/@aws-cdk/aws-autoscaling/lib/auto-scaling-group.ts b/packages/@aws-cdk/aws-autoscaling/lib/auto-scaling-group.ts index d3056d32d418a..a030cead38957 100644 --- a/packages/@aws-cdk/aws-autoscaling/lib/auto-scaling-group.ts +++ b/packages/@aws-cdk/aws-autoscaling/lib/auto-scaling-group.ts @@ -6,7 +6,7 @@ import * as iam from '@aws-cdk/aws-iam'; import * as sns from '@aws-cdk/aws-sns'; import { - CfnAutoScalingRollingUpdate, Construct, Duration, Fn, IResource, Lazy, PhysicalName, Resource, Stack, + Annotations, CfnAutoScalingRollingUpdate, Construct, Duration, Fn, IResource, Lazy, PhysicalName, Resource, Stack, Tokenization, withResolved, Tags, } from '@aws-cdk/core'; import { CfnAutoScalingGroup, CfnAutoScalingGroupProps, CfnLaunchConfiguration } from './autoscaling.generated'; @@ -659,7 +659,7 @@ export class AutoScalingGroup extends AutoScalingGroupBase implements }); if (desiredCapacity !== undefined) { - this.node.addWarning('desiredCapacity has been configured. Be aware this will reset the size of your AutoScalingGroup on every deployment. See https://github.com/aws/aws-cdk/issues/5215'); + Annotations.of(this).addWarning('desiredCapacity has been configured. Be aware this will reset the size of your AutoScalingGroup on every deployment. See https://github.com/aws/aws-cdk/issues/5215'); } this.maxInstanceLifetime = props.maxInstanceLifetime; @@ -1259,7 +1259,7 @@ function synthesizeBlockDeviceMappings(construct: Construct, blockDevices: Block throw new Error('iops property is required with volumeType: EbsDeviceVolumeType.IO1'); } } else if (volumeType !== EbsDeviceVolumeType.IO1) { - construct.node.addWarning('iops will be ignored without volumeType: EbsDeviceVolumeType.IO1'); + Annotations.of(construct).addWarning('iops will be ignored without volumeType: EbsDeviceVolumeType.IO1'); } } diff --git a/packages/@aws-cdk/aws-batch/lib/compute-environment.ts b/packages/@aws-cdk/aws-batch/lib/compute-environment.ts index 35e56093afd94..5e20606a0e762 100644 --- a/packages/@aws-cdk/aws-batch/lib/compute-environment.ts +++ b/packages/@aws-cdk/aws-batch/lib/compute-environment.ts @@ -1,6 +1,6 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import * as iam from '@aws-cdk/aws-iam'; -import { Construct, IResource, Resource, Stack, Tag } from '@aws-cdk/core'; +import { Construct, IResource, Resource, Stack } from '@aws-cdk/core'; import { CfnComputeEnvironment } from './batch.generated'; /** @@ -210,7 +210,9 @@ export interface ComputeResources { * * @default - no tags will be assigned on compute resources. */ - readonly computeResourcesTags?: Tag; + readonly computeResourcesTags?: { + [key: string]: string + }; } /** diff --git a/packages/@aws-cdk/aws-batch/test/compute-environment.test.ts b/packages/@aws-cdk/aws-batch/test/compute-environment.test.ts index 4013233c32e64..42c958473f1c3 100644 --- a/packages/@aws-cdk/aws-batch/test/compute-environment.test.ts +++ b/packages/@aws-cdk/aws-batch/test/compute-environment.test.ts @@ -161,7 +161,10 @@ describe('Batch Compute Evironment', () => { computeResources: { allocationStrategy: batch.AllocationStrategy.BEST_FIT, vpc, - computeResourcesTags: new cdk.Tag('foo', 'bar'), + computeResourcesTags: { + 'Name': 'AWS Batch Instance - C4OnDemand', + 'Tag Other': 'Has other value', + }, desiredvCpus: 1, ec2KeyPair: 'my-key-pair', image: new ecs.EcsOptimizedAmi({ @@ -244,10 +247,8 @@ describe('Batch Compute Evironment', () => { }, ], Tags: { - key: 'foo', - props: {}, - defaultPriority: 100, - value: 'bar', + 'Name': 'AWS Batch Instance - C4OnDemand', + 'Tag Other': 'Has other value', }, Type: 'EC2', }, diff --git a/packages/@aws-cdk/aws-batch/test/integ.batch.expected.json b/packages/@aws-cdk/aws-batch/test/integ.batch.expected.json index 80d101f22547c..09d021e49bd9d 100644 --- a/packages/@aws-cdk/aws-batch/test/integ.batch.expected.json +++ b/packages/@aws-cdk/aws-batch/test/integ.batch.expected.json @@ -859,6 +859,9 @@ "Ref": "vpcPrivateSubnet3Subnet985AC459" } ], + "Tags": { + "compute-env-tag": "123XYZ" + }, "Type": "EC2" }, "State": "ENABLED" @@ -1351,4 +1354,4 @@ } } } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-batch/test/integ.batch.ts b/packages/@aws-cdk/aws-batch/test/integ.batch.ts index c92ba719f9d96..4e19da37ca897 100644 --- a/packages/@aws-cdk/aws-batch/test/integ.batch.ts +++ b/packages/@aws-cdk/aws-batch/test/integ.batch.ts @@ -43,6 +43,9 @@ new batch.JobQueue(stack, 'batch-job-queue', { launchTemplate: { launchTemplateName: launchTemplate.launchTemplateName as string, }, + computeResourcesTags: { + 'compute-env-tag': '123XYZ', + }, }, }), order: 2, diff --git a/packages/@aws-cdk/aws-chatbot/README.md b/packages/@aws-cdk/aws-chatbot/README.md index 83d6afcef7773..b45609ebc066f 100644 --- a/packages/@aws-cdk/aws-chatbot/README.md +++ b/packages/@aws-cdk/aws-chatbot/README.md @@ -35,3 +35,19 @@ slackChannel.addToPrincipalPolicy(new iam.PolicyStatement({ resources: ['arn:aws:s3:::abc/xyz/123.txt'], })); ``` + +### Log Group + +Slack channel configuration automatically create a log group with the name `/aws/chatbot/` in `us-east-1` upon first execution with +log data set to never expire. + +The `logRetention` property can be used to set a different expiration period. A log group will be created if not already exists. +If the log group already exists, it's expiration will be configured to the value specified in this construct (never expire, by default). + +By default, CDK uses the AWS SDK retry options when interacting with the log group. The `logRetentionRetryOptions` property +allows you to customize the maximum number of retries and base backoff duration. + +*Note* that, if `logRetention` is set, a [CloudFormation custom +resource](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html) is added +to the stack that pre-creates the log group as part of the stack deployment, if it already doesn't exist, and sets the +correct log retention period (never expire, by default). diff --git a/packages/@aws-cdk/aws-chatbot/lib/slack-channel-configuration.ts b/packages/@aws-cdk/aws-chatbot/lib/slack-channel-configuration.ts index d7746c6b8d679..e686d5b8b3209 100644 --- a/packages/@aws-cdk/aws-chatbot/lib/slack-channel-configuration.ts +++ b/packages/@aws-cdk/aws-chatbot/lib/slack-channel-configuration.ts @@ -1,4 +1,6 @@ +import * as cloudwatch from '@aws-cdk/aws-cloudwatch'; import * as iam from '@aws-cdk/aws-iam'; +import * as logs from '@aws-cdk/aws-logs'; import * as sns from '@aws-cdk/aws-sns'; import * as cdk from '@aws-cdk/core'; import { CfnSlackChannelConfiguration } from './chatbot.generated'; @@ -52,6 +54,31 @@ export interface SlackChannelConfigurationProps { * @default LoggingLevel.NONE */ readonly loggingLevel?: LoggingLevel; + + /** + * The number of days log events are kept in CloudWatch Logs. When updating + * this property, unsetting it doesn't remove the log retention policy. To + * remove the retention policy, set the value to `INFINITE`. + * + * @default logs.RetentionDays.INFINITE + */ + readonly logRetention?: logs.RetentionDays; + + /** + * The IAM role for the Lambda function associated with the custom resource + * that sets the retention policy. + * + * @default - A new role is created. + */ + readonly logRetentionRole?: iam.IRole; + + /** + * When log retention is specified, a custom resource attempts to create the CloudWatch log group. + * These options control the retry policy when interacting with CloudWatch APIs. + * + * @default - Default AWS SDK retry options. + */ + readonly logRetentionRetryOptions?: logs.LogRetentionRetryOptions; } /** @@ -104,6 +131,11 @@ export interface ISlackChannelConfiguration extends cdk.IResource, iam.IGrantabl * Adds a statement to the IAM role. */ addToRolePolicy(statement: iam.PolicyStatement): void; + + /** + * Return the given named metric for this SlackChannelConfiguration + */ + metric(metricName: string, props?: cloudwatch.MetricOptions): cloudwatch.Metric; } /** @@ -129,6 +161,23 @@ abstract class SlackChannelConfigurationBase extends cdk.Resource implements ISl this.role.addToPrincipalPolicy(statement); } + + /** + * Return the given named metric for this SlackChannelConfiguration + */ + public metric(metricName: string, props?: cloudwatch.MetricOptions): cloudwatch.Metric { + // AWS Chatbot publishes metrics to us-east-1 regardless of stack region + // https://docs.aws.amazon.com/chatbot/latest/adminguide/monitoring-cloudwatch.html + return new cloudwatch.Metric({ + namespace: 'AWS/Chatbot', + region: 'us-east-1', + dimensions: { + ConfigurationName: this.slackChannelConfigurationName, + }, + metricName, + ...props, + }); + } } /** @@ -180,6 +229,20 @@ export class SlackChannelConfiguration extends SlackChannelConfigurationBase { return new Import(scope, id); } + /** + * Return the given named metric for All SlackChannelConfigurations + */ + public static metricAll(metricName: string, props?: cloudwatch.MetricOptions): cloudwatch.Metric { + // AWS Chatbot publishes metrics to us-east-1 regardless of stack region + // https://docs.aws.amazon.com/chatbot/latest/adminguide/monitoring-cloudwatch.html + return new cloudwatch.Metric({ + namespace: 'AWS/Chatbot', + region: 'us-east-1', + metricName, + ...props, + }); + } + readonly slackChannelConfigurationArn: string; readonly slackChannelConfigurationName: string; @@ -208,6 +271,18 @@ export class SlackChannelConfiguration extends SlackChannelConfigurationBase { loggingLevel: props.loggingLevel?.toString(), }); + // Log retention + // AWS Chatbot publishes logs to us-east-1 regardless of stack region https://docs.aws.amazon.com/chatbot/latest/adminguide/cloudwatch-logs.html + if (props.logRetention) { + new logs.LogRetention(this, 'LogRetention', { + logGroupName: `/aws/chatbot/${props.slackChannelConfigurationName}`, + retention: props.logRetention, + role: props.logRetentionRole, + logGroupRegion: 'us-east-1', + logRetentionRetryOptions: props.logRetentionRetryOptions, + }); + } + this.slackChannelConfigurationArn = configuration.ref; this.slackChannelConfigurationName = props.slackChannelConfigurationName; } diff --git a/packages/@aws-cdk/aws-chatbot/package.json b/packages/@aws-cdk/aws-chatbot/package.json index 2a2acb7013526..59ddd0cd77453 100644 --- a/packages/@aws-cdk/aws-chatbot/package.json +++ b/packages/@aws-cdk/aws-chatbot/package.json @@ -73,13 +73,17 @@ "pkglint": "0.0.0" }, "dependencies": { + "@aws-cdk/aws-cloudwatch": "0.0.0", "@aws-cdk/aws-iam": "0.0.0", + "@aws-cdk/aws-logs": "0.0.0", "@aws-cdk/aws-sns": "0.0.0", "@aws-cdk/core": "0.0.0", "constructs": "^3.0.4" }, "peerDependencies": { + "@aws-cdk/aws-cloudwatch": "0.0.0", "@aws-cdk/aws-iam": "0.0.0", + "@aws-cdk/aws-logs": "0.0.0", "@aws-cdk/aws-sns": "0.0.0", "@aws-cdk/core": "0.0.0", "constructs": "^3.0.4" diff --git a/packages/@aws-cdk/aws-chatbot/test/integ.chatbot-logretention.expected.json b/packages/@aws-cdk/aws-chatbot/test/integ.chatbot-logretention.expected.json new file mode 100644 index 0000000000000..f2e0c5c3edda9 --- /dev/null +++ b/packages/@aws-cdk/aws-chatbot/test/integ.chatbot-logretention.expected.json @@ -0,0 +1,195 @@ +{ + "Resources": { + "MySlackChannelConfigurationRole1D3F23AE": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "chatbot.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + } + } + }, + "MySlackChannelConfigurationRoleDefaultPolicyE4C1FA62": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:GetObject", + "Effect": "Allow", + "Resource": "arn:aws:s3:::abc/xyz/123.txt" + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "MySlackChannelConfigurationRoleDefaultPolicyE4C1FA62", + "Roles": [ + { + "Ref": "MySlackChannelConfigurationRole1D3F23AE" + } + ] + } + }, + "MySlackChannelA8E0B56C": { + "Type": "AWS::Chatbot::SlackChannelConfiguration", + "Properties": { + "ConfigurationName": "test-channel", + "IamRoleArn": { + "Fn::GetAtt": [ + "MySlackChannelConfigurationRole1D3F23AE", + "Arn" + ] + }, + "SlackChannelId": "C0187JABUE9", + "SlackWorkspaceId": "T49239U4W", + "LoggingLevel": "NONE" + } + }, + "MySlackChannelLogRetention84AA443F": { + "Type": "Custom::LogRetention", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aFD4BFC8A", + "Arn" + ] + }, + "LogGroupName": "/aws/chatbot/test-channel", + "RetentionInDays": 30, + "LogGroupRegion": "us-east-1" + } + }, + "LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aServiceRole9741ECFB": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + } + }, + "LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aServiceRoleDefaultPolicyADDA7DEB": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "logs:PutRetentionPolicy", + "logs:DeleteRetentionPolicy" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aServiceRoleDefaultPolicyADDA7DEB", + "Roles": [ + { + "Ref": "LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aServiceRole9741ECFB" + } + ] + } + }, + "LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aFD4BFC8A": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParameters74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437bS3Bucket48EF98C9" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437bS3VersionKeyF33C73AF" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437bS3VersionKeyF33C73AF" + } + ] + } + ] + } + ] + ] + } + }, + "Handler": "index.handler", + "Role": { + "Fn::GetAtt": [ + "LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aServiceRole9741ECFB", + "Arn" + ] + }, + "Runtime": "nodejs10.x" + }, + "DependsOn": [ + "LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aServiceRoleDefaultPolicyADDA7DEB", + "LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aServiceRole9741ECFB" + ] + } + }, + "Parameters": { + "AssetParameters74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437bS3Bucket48EF98C9": { + "Type": "String", + "Description": "S3 bucket for asset \"74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437b\"" + }, + "AssetParameters74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437bS3VersionKeyF33C73AF": { + "Type": "String", + "Description": "S3 key for asset version \"74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437b\"" + }, + "AssetParameters74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437bArtifactHash976CF1BD": { + "Type": "String", + "Description": "Artifact hash for asset \"74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437b\"" + } + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-chatbot/test/integ.chatbot-logretention.ts b/packages/@aws-cdk/aws-chatbot/test/integ.chatbot-logretention.ts new file mode 100644 index 0000000000000..cbb8de485b295 --- /dev/null +++ b/packages/@aws-cdk/aws-chatbot/test/integ.chatbot-logretention.ts @@ -0,0 +1,33 @@ +import * as iam from '@aws-cdk/aws-iam'; +import * as logs from '@aws-cdk/aws-logs'; +import * as cdk from '@aws-cdk/core'; +import * as chatbot from '../lib'; + +class ChatbotLogRetentionInteg extends cdk.Stack { + constructor(scope: cdk.App, id: string, props?: cdk.StackProps) { + super(scope, id, props); + + const slackChannel = new chatbot.SlackChannelConfiguration(this, 'MySlackChannel', { + slackChannelConfigurationName: 'test-channel', + slackWorkspaceId: 'T49239U4W', // modify to your slack workspace id + slackChannelId: 'C0187JABUE9', // modify to your slack channel id + loggingLevel: chatbot.LoggingLevel.NONE, + logRetention: logs.RetentionDays.ONE_MONTH, + }); + + slackChannel.addToRolePolicy(new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: [ + 's3:GetObject', + ], + resources: ['arn:aws:s3:::abc/xyz/123.txt'], + })); + } +} + +const app = new cdk.App(); + +new ChatbotLogRetentionInteg(app, 'ChatbotLogRetentionInteg'); + +app.synth(); + diff --git a/packages/@aws-cdk/aws-chatbot/test/slack-channel-configuration.test.ts b/packages/@aws-cdk/aws-chatbot/test/slack-channel-configuration.test.ts index 3cf1189d9fee2..de5a5da9a63c1 100644 --- a/packages/@aws-cdk/aws-chatbot/test/slack-channel-configuration.test.ts +++ b/packages/@aws-cdk/aws-chatbot/test/slack-channel-configuration.test.ts @@ -1,5 +1,8 @@ import '@aws-cdk/assert/jest'; +import { ABSENT } from '@aws-cdk/assert'; +import * as cloudwatch from '@aws-cdk/aws-cloudwatch'; import * as iam from '@aws-cdk/aws-iam'; +import * as logs from '@aws-cdk/aws-logs'; import * as sns from '@aws-cdk/aws-sns'; import * as cdk from '@aws-cdk/core'; import * as chatbot from '../lib'; @@ -138,6 +141,83 @@ describe('SlackChannelConfiguration', () => { }); }); + test('specifying log retention', () => { + new chatbot.SlackChannelConfiguration(stack, 'MySlackChannel', { + slackWorkspaceId: 'ABC123', + slackChannelId: 'DEF456', + slackChannelConfigurationName: 'ConfigurationName', + logRetention: logs.RetentionDays.ONE_MONTH, + }); + + expect(stack).toHaveResourceLike('Custom::LogRetention', { + LogGroupName: '/aws/chatbot/ConfigurationName', + RetentionInDays: 30, + LogGroupRegion: 'us-east-1', + }); + }); + + test('getting configuration metric', () => { + const slackChannel = new chatbot.SlackChannelConfiguration(stack, 'MySlackChannel', { + slackWorkspaceId: 'ABC123', + slackChannelId: 'DEF456', + slackChannelConfigurationName: 'ConfigurationName', + logRetention: logs.RetentionDays.ONE_MONTH, + }); + const metric = slackChannel.metric('MetricName'); + new cloudwatch.Alarm(stack, 'Alarm', { + evaluationPeriods: 1, + threshold: 0, + comparisonOperator: cloudwatch.ComparisonOperator.GREATER_THAN_THRESHOLD, + metric: metric, + }); + + expect(metric).toEqual(new cloudwatch.Metric({ + namespace: 'AWS/Chatbot', + region: 'us-east-1', + dimensions: { + ConfigurationName: 'ConfigurationName', + }, + metricName: 'MetricName', + })); + expect(stack).toHaveResourceLike('AWS::CloudWatch::Alarm', { + Namespace: 'AWS/Chatbot', + MetricName: 'MetricName', + Dimensions: [ + { + Name: 'ConfigurationName', + Value: 'ConfigurationName', + }, + ], + ComparisonOperator: 'GreaterThanThreshold', + EvaluationPeriods: 1, + Threshold: 0, + }); + }); + + test('getting all configurations metric', () => { + const metric = chatbot.SlackChannelConfiguration.metricAll('MetricName'); + new cloudwatch.Alarm(stack, 'Alarm', { + evaluationPeriods: 1, + threshold: 0, + comparisonOperator: cloudwatch.ComparisonOperator.GREATER_THAN_THRESHOLD, + metric: metric, + }); + + expect(metric).toEqual(new cloudwatch.Metric({ + namespace: 'AWS/Chatbot', + region: 'us-east-1', + metricName: 'MetricName', + })); + expect(stack).toHaveResourceLike('AWS::CloudWatch::Alarm', { + Namespace: 'AWS/Chatbot', + MetricName: 'MetricName', + Dimensions: ABSENT, + ComparisonOperator: 'GreaterThanThreshold', + EvaluationPeriods: 1, + Threshold: 0, + }); + }); + test('added a iam policy to a from slack channel configuration ARN will nothing to do', () => { const imported = chatbot.SlackChannelConfiguration.fromSlackChannelConfigurationArn(stack, 'MySlackChannel', 'arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack'); diff --git a/packages/@aws-cdk/aws-cloudfront-origins/lib/s3-origin.ts b/packages/@aws-cdk/aws-cloudfront-origins/lib/s3-origin.ts index 43fdf3bc4ef9d..04c3d5175cace 100644 --- a/packages/@aws-cdk/aws-cloudfront-origins/lib/s3-origin.ts +++ b/packages/@aws-cdk/aws-cloudfront-origins/lib/s3-origin.ts @@ -59,7 +59,7 @@ class S3BucketOrigin extends cloudfront.OriginBase { public bind(scope: cdk.Construct, options: cloudfront.OriginBindOptions): cloudfront.OriginBindConfig { if (!this.originAccessIdentity) { this.originAccessIdentity = new cloudfront.OriginAccessIdentity(scope, 'S3Origin', { - comment: `Access identity for ${options.originId}`, + comment: `Identity for ${options.originId}`, }); this.bucket.grantRead(this.originAccessIdentity); } diff --git a/packages/@aws-cdk/aws-cloudfront-origins/test/integ.origin-group.expected.json b/packages/@aws-cdk/aws-cloudfront-origins/test/integ.origin-group.expected.json index ed571931fa0d2..6efc530e7f5f6 100644 --- a/packages/@aws-cdk/aws-cloudfront-origins/test/integ.origin-group.expected.json +++ b/packages/@aws-cdk/aws-cloudfront-origins/test/integ.origin-group.expected.json @@ -60,7 +60,7 @@ "Type": "AWS::CloudFront::CloudFrontOriginAccessIdentity", "Properties": { "CloudFrontOriginAccessIdentityConfig": { - "Comment": "Access identity for cloudfrontorigingroupDistributionOrigin137659A54" + "Comment": "Identity for cloudfrontorigingroupDistributionOrigin137659A54" } } }, @@ -153,4 +153,4 @@ } } } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-cloudfront-origins/test/integ.s3-origin.expected.json b/packages/@aws-cdk/aws-cloudfront-origins/test/integ.s3-origin.expected.json index 446ccaae28d4f..cf4a342fc6583 100644 --- a/packages/@aws-cdk/aws-cloudfront-origins/test/integ.s3-origin.expected.json +++ b/packages/@aws-cdk/aws-cloudfront-origins/test/integ.s3-origin.expected.json @@ -60,7 +60,7 @@ "Type": "AWS::CloudFront::CloudFrontOriginAccessIdentity", "Properties": { "CloudFrontOriginAccessIdentityConfig": { - "Comment": "Access identity for cloudfronts3originDistributionOrigin1741C4E95" + "Comment": "Identity for cloudfronts3originDistributionOrigin1741C4E95" } } }, @@ -106,4 +106,4 @@ } } } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-cloudfront-origins/test/s3-origin.test.ts b/packages/@aws-cdk/aws-cloudfront-origins/test/s3-origin.test.ts index 1dcc17de37249..1d99534652d23 100644 --- a/packages/@aws-cdk/aws-cloudfront-origins/test/s3-origin.test.ts +++ b/packages/@aws-cdk/aws-cloudfront-origins/test/s3-origin.test.ts @@ -54,7 +54,7 @@ describe('With bucket', () => { expect(stack).toHaveResourceLike('AWS::CloudFront::CloudFrontOriginAccessIdentity', { CloudFrontOriginAccessIdentityConfig: { - Comment: 'Access identity for StackDistOrigin15754CE84', + Comment: 'Identity for StackDistOrigin15754CE84', }, }); expect(stack).toHaveResourceLike('AWS::S3::BucketPolicy', { diff --git a/packages/@aws-cdk/aws-cloudfront/lib/origin_access_identity.ts b/packages/@aws-cdk/aws-cloudfront/lib/origin_access_identity.ts index 88bb5365d6ef1..f62c5ad4d0228 100644 --- a/packages/@aws-cdk/aws-cloudfront/lib/origin_access_identity.ts +++ b/packages/@aws-cdk/aws-cloudfront/lib/origin_access_identity.ts @@ -106,10 +106,10 @@ export class OriginAccessIdentity extends OriginAccessIdentityBase implements IO constructor(scope: cdk.Construct, id: string, props?: OriginAccessIdentityProps) { super(scope, id); + // Comment has a max length of 128. + const comment = (props?.comment ?? 'Allows CloudFront to reach the bucket').substr(0, 128); this.resource = new CfnCloudFrontOriginAccessIdentity(this, 'Resource', { - cloudFrontOriginAccessIdentityConfig: { - comment: (props && props.comment) || 'Allows CloudFront to reach the bucket', - }, + cloudFrontOriginAccessIdentityConfig: { comment }, }); // physical id - OAI name this.originAccessIdentityName = this.getResourceNameAttribute(this.resource.ref); diff --git a/packages/@aws-cdk/aws-cloudfront/test/oai.test.ts b/packages/@aws-cdk/aws-cloudfront/test/oai.test.ts index 3f3a95327a9f1..ed93bad727eb0 100644 --- a/packages/@aws-cdk/aws-cloudfront/test/oai.test.ts +++ b/packages/@aws-cdk/aws-cloudfront/test/oai.test.ts @@ -1,69 +1,71 @@ -import { expect } from '@aws-cdk/assert'; +import '@aws-cdk/assert/jest'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { OriginAccessIdentity } from '../lib'; -/* eslint-disable quote-props */ - -nodeunitShim({ - 'Origin Access Identity with automatic comment'(test: Test) { +describe('Origin Access Identity', () => { + test('With automatic comment', () => { const stack = new cdk.Stack(); new OriginAccessIdentity(stack, 'OAI'); - expect(stack).toMatch( + expect(stack).toMatchTemplate( { - 'Resources': { - 'OAIE1EFC67F': { - 'Type': 'AWS::CloudFront::CloudFrontOriginAccessIdentity', - 'Properties': { - 'CloudFrontOriginAccessIdentityConfig': { - 'Comment': 'Allows CloudFront to reach the bucket', + Resources: { + OAIE1EFC67F: { + Type: 'AWS::CloudFront::CloudFrontOriginAccessIdentity', + Properties: { + CloudFrontOriginAccessIdentityConfig: { + Comment: 'Allows CloudFront to reach the bucket', }, }, }, }, }, ); + }); - test.done(); - }, - 'Origin Access Identity with comment'(test: Test) { + test('With provided comment', () => { const stack = new cdk.Stack(); new OriginAccessIdentity(stack, 'OAI', { comment: 'test comment', }); - expect(stack).toMatch( + expect(stack).toMatchTemplate( { - 'Resources': { - 'OAIE1EFC67F': { - 'Type': 'AWS::CloudFront::CloudFrontOriginAccessIdentity', - 'Properties': { - 'CloudFrontOriginAccessIdentityConfig': { - 'Comment': 'test comment', + Resources: { + OAIE1EFC67F: { + Type: 'AWS::CloudFront::CloudFrontOriginAccessIdentity', + Properties: { + CloudFrontOriginAccessIdentityConfig: { + Comment: 'test comment', }, }, }, }, }, ); + }); + + test('Truncates long comments', () => { + const stack = new cdk.Stack(); + + new OriginAccessIdentity(stack, 'OAI', { + comment: 'This is a really long comment. Auto-generated comments based on ids of origins might sometimes be this long or even longer and that will break', + }); - test.done(); - }, + expect(stack).toHaveResourceLike('AWS::CloudFront::CloudFrontOriginAccessIdentity', { + CloudFrontOriginAccessIdentityConfig: { + Comment: 'This is a really long comment. Auto-generated comments based on ids of origins might sometimes be this long or even longer and t', + }, + }); + }); - 'Builds ARN of CloudFront user'(test: Test) { + test('Builds ARN of CloudFront user', () => { const stack = new cdk.Stack(); const oai = OriginAccessIdentity.fromOriginAccessIdentityName(stack, 'OAI', 'OAITest'); - test.ok( - oai.grantPrincipal.policyFragment.principalJson.AWS[0].endsWith( - ':iam::cloudfront:user/CloudFront Origin Access Identity OAITest', - ), - ); - - test.done(); - }, + expect(oai.grantPrincipal.policyFragment.principalJson.AWS[0]).toMatch(/:iam::cloudfront:user\/CloudFront Origin Access Identity OAITest$/); + }); }); diff --git a/packages/@aws-cdk/aws-codebuild/README.md b/packages/@aws-cdk/aws-codebuild/README.md index e6a312d4b048b..208912c6657e9 100644 --- a/packages/@aws-cdk/aws-codebuild/README.md +++ b/packages/@aws-cdk/aws-codebuild/README.md @@ -115,6 +115,18 @@ const bbSource = codebuild.Source.bitBucket({ }); ``` +### For all Git sources + +For all Git sources, you can fetch submodules while cloing git repo. + +```typescript +const gitHubSource = codebuild.Source.gitHub({ + owner: 'awslabs', + repo: 'aws-cdk', + fetchSubmodules: true, +}); +``` + ## Artifacts CodeBuild Projects can produce Artifacts and upload them to S3. For example: diff --git a/packages/@aws-cdk/aws-codebuild/lib/source.ts b/packages/@aws-cdk/aws-codebuild/lib/source.ts index 52161c3f90050..1d85e525b7d55 100644 --- a/packages/@aws-cdk/aws-codebuild/lib/source.ts +++ b/packages/@aws-cdk/aws-codebuild/lib/source.ts @@ -119,6 +119,13 @@ interface GitSourceProps extends SourceProps { * @default the default branch's HEAD commit ID is used */ readonly branchOrRef?: string; + + /** + * Whether to fetch submodules while cloning git repo. + * + * @default false + */ + readonly fetchSubmodules?: boolean; } /** @@ -127,12 +134,14 @@ interface GitSourceProps extends SourceProps { abstract class GitSource extends Source { private readonly cloneDepth?: number; private readonly branchOrRef?: string; + private readonly fetchSubmodules?: boolean; protected constructor(props: GitSourceProps) { super(props); this.cloneDepth = props.cloneDepth; this.branchOrRef = props.branchOrRef; + this.fetchSubmodules = props.fetchSubmodules; } public bind(_scope: Construct, _project: IProject): SourceConfig { @@ -142,6 +151,9 @@ abstract class GitSource extends Source { sourceProperty: { ...superConfig.sourceProperty, gitCloneDepth: this.cloneDepth, + gitSubmodulesConfig: this.fetchSubmodules ? { + fetchSubmodules: this.fetchSubmodules, + } : undefined, }, }; } diff --git a/packages/@aws-cdk/aws-codebuild/test/test.codebuild.ts b/packages/@aws-cdk/aws-codebuild/test/test.codebuild.ts index bf52b1d7c86aa..a3450446606d2 100644 --- a/packages/@aws-cdk/aws-codebuild/test/test.codebuild.ts +++ b/packages/@aws-cdk/aws-codebuild/test/test.codebuild.ts @@ -546,6 +546,7 @@ export = { owner: 'testowner', repo: 'testrepo', cloneDepth: 3, + fetchSubmodules: true, webhook: true, reportBuildStatus: false, webhookFilters: [ @@ -561,6 +562,9 @@ export = { Location: 'https://github.com/testowner/testrepo.git', ReportBuildStatus: false, GitCloneDepth: 3, + GitSubmodulesConfig: { + FetchSubmodules: true, + }, }, })); diff --git a/packages/@aws-cdk/aws-cognito/test/integ.user-pool-domain-cfdist.expected.json b/packages/@aws-cdk/aws-cognito/test/integ.user-pool-domain-cfdist.expected.json index 13f653712c7cd..5f3a61539ea8a 100644 --- a/packages/@aws-cdk/aws-cognito/test/integ.user-pool-domain-cfdist.expected.json +++ b/packages/@aws-cdk/aws-cognito/test/integ.user-pool-domain-cfdist.expected.json @@ -78,7 +78,10 @@ "InstallLatestAwsSdk": true }, "UpdateReplacePolicy": "Delete", - "DeletionPolicy": "Delete" + "DeletionPolicy": "Delete", + "DependsOn": [ + "UserPoolDomainCloudFrontDomainNameCustomResourcePolicy7DE54188" + ] }, "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2": { "Type": "AWS::IAM::Role", @@ -111,25 +114,21 @@ ] } }, - "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleDefaultPolicyD28E1A5E": { + "UserPoolDomainCloudFrontDomainNameCustomResourcePolicy7DE54188": { "Type": "AWS::IAM::Policy", "Properties": { "PolicyDocument": { "Statement": [ { - "Action": "cognito-idp:DescribeUserPoolDomain", - "Effect": "Allow", + "Action":"cognito-idp:DescribeUserPoolDomain", + "Effect":"Allow", "Resource": "*" } ], "Version": "2012-10-17" }, - "PolicyName": "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleDefaultPolicyD28E1A5E", - "Roles": [ - { - "Ref": "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2" - } - ] + "PolicyName": "UserPoolDomainCloudFrontDomainNameCustomResourcePolicy7DE54188", + "Roles": [{"Ref":"AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2"}] } }, "AWS679f53fac002430cb0da5b7982bd22872D164C4C": { @@ -184,7 +183,6 @@ "Timeout": 120 }, "DependsOn": [ - "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleDefaultPolicyD28E1A5E", "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2" ] } diff --git a/packages/@aws-cdk/aws-docdb/test/cluster.test.ts b/packages/@aws-cdk/aws-docdb/test/cluster.test.ts index fb26905d1a027..f227293310bf3 100644 --- a/packages/@aws-cdk/aws-docdb/test/cluster.test.ts +++ b/packages/@aws-cdk/aws-docdb/test/cluster.test.ts @@ -580,7 +580,7 @@ describe('DatabaseCluster', () => { expectCDK(stack).to(haveResource('AWS::Serverless::Application', { Location: { ApplicationId: 'arn:aws:serverlessrepo:us-east-1:297356227824:applications/SecretsManagerMongoDBRotationSingleUser', - SemanticVersion: '1.1.3', + SemanticVersion: '1.1.60', }, Parameters: { endpoint: { @@ -698,7 +698,7 @@ describe('DatabaseCluster', () => { expectCDK(stack).to(haveResource('AWS::Serverless::Application', { Location: { ApplicationId: 'arn:aws:serverlessrepo:us-east-1:297356227824:applications/SecretsManagerMongoDBRotationMultiUser', - SemanticVersion: '1.1.3', + SemanticVersion: '1.1.60', }, Parameters: { endpoint: { diff --git a/packages/@aws-cdk/aws-dynamodb-global/lib/aws-dynamodb-global.ts b/packages/@aws-cdk/aws-dynamodb-global/lib/aws-dynamodb-global.ts index d72ff1f42c176..a76e2b8940abc 100644 --- a/packages/@aws-cdk/aws-dynamodb-global/lib/aws-dynamodb-global.ts +++ b/packages/@aws-cdk/aws-dynamodb-global/lib/aws-dynamodb-global.ts @@ -40,7 +40,7 @@ export class GlobalTable extends cdk.Construct { constructor(scope: cdk.Construct, id: string, props: GlobalTableProps) { super(scope, id); - this.node.addWarning('The @aws-cdk/aws-dynamodb-global module has been deprecated in favor of @aws-cdk/aws-dynamodb.Table.replicationRegions'); + cdk.Annotations.of(this).addWarning('The @aws-cdk/aws-dynamodb-global module has been deprecated in favor of @aws-cdk/aws-dynamodb.Table.replicationRegions'); this._regionalTables = []; diff --git a/packages/@aws-cdk/aws-ec2/lib/cfn-init-elements.ts b/packages/@aws-cdk/aws-ec2/lib/cfn-init-elements.ts index 0cebf01904b1d..73153cd1025a3 100644 --- a/packages/@aws-cdk/aws-ec2/lib/cfn-init-elements.ts +++ b/packages/@aws-cdk/aws-ec2/lib/cfn-init-elements.ts @@ -432,6 +432,7 @@ export abstract class InitFile extends InitElement { source: asset.httpUrl, }), authentication: standardS3Auth(bindOptions.instanceRole, asset.s3BucketName), + assetHash: asset.assetHash, }; } }(targetFileName, options); @@ -449,6 +450,7 @@ export abstract class InitFile extends InitElement { source: asset.httpUrl, }), authentication: standardS3Auth(bindOptions.instanceRole, asset.s3BucketName), + assetHash: asset.assetHash, }; } }(targetFileName, options); @@ -899,6 +901,7 @@ export abstract class InitSource extends InitElement { return { config: { [this.targetDirectory]: asset.httpUrl }, authentication: standardS3Auth(bindOptions.instanceRole, asset.s3BucketName), + assetHash: asset.assetHash, }; } }(targetDirectory, options.serviceRestartHandles); @@ -915,6 +918,7 @@ export abstract class InitSource extends InitElement { return { config: { [this.targetDirectory]: asset.httpUrl }, authentication: standardS3Auth(bindOptions.instanceRole, asset.s3BucketName), + assetHash: asset.assetHash, }; } }(targetDirectory, options.serviceRestartHandles); diff --git a/packages/@aws-cdk/aws-ec2/lib/cfn-init.ts b/packages/@aws-cdk/aws-ec2/lib/cfn-init.ts index f6cd804f9c698..e3930b1be9f22 100644 --- a/packages/@aws-cdk/aws-ec2/lib/cfn-init.ts +++ b/packages/@aws-cdk/aws-ec2/lib/cfn-init.ts @@ -97,7 +97,12 @@ export class CloudFormationInit { // Note: This will not reflect mutations made after attaching. const bindResult = this.bind(attachedResource.stack, attachOptions); attachedResource.addMetadata('AWS::CloudFormation::Init', bindResult.configData); - const fingerprint = contentHash(JSON.stringify(bindResult.configData)).substr(0, 16); + + // Need to resolve the various tokens from assets in the config, + // as well as include any asset hashes provided so the fingerprint is accurate. + const resolvedConfig = attachedResource.stack.resolve(bindResult.configData); + const fingerprintInput = { config: resolvedConfig, assetHash: bindResult.assetHash }; + const fingerprint = contentHash(JSON.stringify(fingerprintInput)).substr(0, 16); attachOptions.instanceRole.addToPolicy(new iam.PolicyStatement({ actions: ['cloudformation:DescribeStackResource', 'cloudformation:SignalResource'], @@ -140,7 +145,7 @@ export class CloudFormationInit { } } - private bind(scope: Construct, options: AttachInitOptions): { configData: any, authData: any } { + private bind(scope: Construct, options: AttachInitOptions): { configData: any, authData: any, assetHash?: any } { const nonEmptyConfigs = mapValues(this._configs, c => c.isEmpty() ? undefined : c); const configNameToBindResult = mapValues(nonEmptyConfigs, c => c._bind(scope, options)); @@ -151,6 +156,7 @@ export class CloudFormationInit { ...mapValues(configNameToBindResult, c => c.config), }, authData: Object.values(configNameToBindResult).map(c => c.authentication).reduce(deepMerge, undefined), + assetHash: combineAssetHashesOrUndefined(Object.values(configNameToBindResult).map(c => c.assetHash)), }; } @@ -201,9 +207,9 @@ export class InitConfig { // Must be last! const servicesConfig = this.bindForType(InitElementType.SERVICE, bindOptions); - const authentication = [packageConfig, groupsConfig, usersConfig, sourcesConfig, filesConfig, commandsConfig, servicesConfig] - .map(c => c?.authentication) - .reduce(deepMerge, undefined); + const allConfig = [packageConfig, groupsConfig, usersConfig, sourcesConfig, filesConfig, commandsConfig, servicesConfig]; + const authentication = allConfig.map(c => c?.authentication).reduce(deepMerge, undefined); + const assetHash = combineAssetHashesOrUndefined(allConfig.map(c => c?.assetHash)); return { config: { @@ -216,6 +222,7 @@ export class InitConfig { services: servicesConfig?.config, }, authentication, + assetHash, }; } @@ -228,6 +235,7 @@ export class InitConfig { return { config: bindResults.map(r => r.config).reduce(deepMerge, undefined) ?? {}, authentication: bindResults.map(r => r.authentication).reduce(deepMerge, undefined), + assetHash: combineAssetHashesOrUndefined(bindResults.map(r => r.assetHash)), }; } @@ -310,6 +318,12 @@ function mapValues(xs: Record, fn: (x: A) => B | undefined): Re return ret; } +// Combines all input asset hashes into one, or if no hashes are present, returns undefined. +function combineAssetHashesOrUndefined(hashes: (string | undefined)[]): string | undefined { + const hashArray = hashes.filter((x): x is string => x !== undefined); + return hashArray.length > 0 ? hashArray.join('') : undefined; +} + function contentHash(content: string) { return crypto.createHash('sha256').update(content).digest('hex'); } diff --git a/packages/@aws-cdk/aws-ec2/lib/instance.ts b/packages/@aws-cdk/aws-ec2/lib/instance.ts index a9ef8112dc379..7a4ef214101b3 100644 --- a/packages/@aws-cdk/aws-ec2/lib/instance.ts +++ b/packages/@aws-cdk/aws-ec2/lib/instance.ts @@ -1,7 +1,7 @@ import * as crypto from 'crypto'; import * as iam from '@aws-cdk/aws-iam'; -import { Construct, Duration, Fn, IResource, Lazy, Resource, Stack, Tags } from '@aws-cdk/core'; +import { Annotations, Construct, Duration, Fn, IResource, Lazy, Resource, Stack, Tags } from '@aws-cdk/core'; import { CloudFormationInit } from './cfn-init'; import { Connections, IConnectable } from './connections'; import { CfnInstance } from './ec2.generated'; @@ -333,13 +333,13 @@ export class Instance extends Resource implements IInstance { if (selected.length === 1) { subnet = selected[0]; } else { - this.node.addError(`Need exactly 1 subnet to match AZ '${props.availabilityZone}', found ${selected.length}. Use a different availabilityZone.`); + Annotations.of(this).addError(`Need exactly 1 subnet to match AZ '${props.availabilityZone}', found ${selected.length}. Use a different availabilityZone.`); } } else { if (subnets.length > 0) { subnet = subnets[0]; } else { - this.node.addError(`Did not find any subnets matching '${JSON.stringify(props.vpcSubnets)}', please use a different selection.`); + Annotations.of(this).addError(`Did not find any subnets matching '${JSON.stringify(props.vpcSubnets)}', please use a different selection.`); } } if (!subnet) { diff --git a/packages/@aws-cdk/aws-ec2/lib/private/cfn-init-internal.ts b/packages/@aws-cdk/aws-ec2/lib/private/cfn-init-internal.ts index bab8c68e0038f..bdfdc8fdd6c2a 100644 --- a/packages/@aws-cdk/aws-ec2/lib/private/cfn-init-internal.ts +++ b/packages/@aws-cdk/aws-ec2/lib/private/cfn-init-internal.ts @@ -72,6 +72,13 @@ export interface InitElementConfig { * @default - No authentication associated with the config */ readonly authentication?: Record; + + /** + * Optional string representing a hash of the asset associated with this element (if any). + * + * @default - No hash is provided + */ + readonly assetHash?: string; } /** diff --git a/packages/@aws-cdk/aws-ec2/lib/security-group.ts b/packages/@aws-cdk/aws-ec2/lib/security-group.ts index 1736d17df8a86..868db46dd462a 100644 --- a/packages/@aws-cdk/aws-ec2/lib/security-group.ts +++ b/packages/@aws-cdk/aws-ec2/lib/security-group.ts @@ -1,4 +1,4 @@ -import { Construct, IResource, Lazy, Resource, ResourceProps, Stack, Token } from '@aws-cdk/core'; +import { Annotations, Construct, IResource, Lazy, Resource, ResourceProps, Stack, Token } from '@aws-cdk/core'; import { Connections } from './connections'; import { CfnSecurityGroup, CfnSecurityGroupEgress, CfnSecurityGroupIngress } from './ec2.generated'; import { IPeer } from './peer'; @@ -404,7 +404,7 @@ export class SecurityGroup extends SecurityGroupBase { // In the case of "allowAllOutbound", we don't add any more rules. There // is only one rule which allows all traffic and that subsumes any other // rule. - this.node.addWarning('Ignoring Egress rule since \'allowAllOutbound\' is set to true; To add customize rules, set allowAllOutbound=false on the SecurityGroup'); + Annotations.of(this).addWarning('Ignoring Egress rule since \'allowAllOutbound\' is set to true; To add customize rules, set allowAllOutbound=false on the SecurityGroup'); return; } else { // Otherwise, if the bogus rule exists we can now remove it because the diff --git a/packages/@aws-cdk/aws-ec2/lib/volume.ts b/packages/@aws-cdk/aws-ec2/lib/volume.ts index 72c9af36fd079..e7ce7d52b2977 100644 --- a/packages/@aws-cdk/aws-ec2/lib/volume.ts +++ b/packages/@aws-cdk/aws-ec2/lib/volume.ts @@ -2,7 +2,7 @@ import * as crypto from 'crypto'; import { AccountRootPrincipal, Grant, IGrantable } from '@aws-cdk/aws-iam'; import { IKey, ViaServicePrincipal } from '@aws-cdk/aws-kms'; -import { Construct, IResource, Resource, Size, SizeRoundingBehavior, Stack, Token, Tags } from '@aws-cdk/core'; +import { Annotations, Construct, IResource, Resource, Size, SizeRoundingBehavior, Stack, Token, Tags } from '@aws-cdk/core'; import { CfnInstance, CfnVolume } from './ec2.generated'; import { IInstance } from './instance'; @@ -176,7 +176,7 @@ export function synthesizeBlockDeviceMappings(construct: Construct, blockDevices throw new Error('iops property is required with volumeType: EbsDeviceVolumeType.IO1'); } } else if (volumeType !== EbsDeviceVolumeType.IO1) { - construct.node.addWarning('iops will be ignored without volumeType: EbsDeviceVolumeType.IO1'); + Annotations.of(construct).addWarning('iops will be ignored without volumeType: EbsDeviceVolumeType.IO1'); } } diff --git a/packages/@aws-cdk/aws-ec2/lib/vpc.ts b/packages/@aws-cdk/aws-ec2/lib/vpc.ts index c0b65abee39df..63118badb5977 100644 --- a/packages/@aws-cdk/aws-ec2/lib/vpc.ts +++ b/packages/@aws-cdk/aws-ec2/lib/vpc.ts @@ -1,6 +1,6 @@ import * as cxschema from '@aws-cdk/cloud-assembly-schema'; import { - ConcreteDependable, Construct, ContextProvider, DependableTrait, IConstruct, + Annotations, ConcreteDependable, Construct, ContextProvider, DependableTrait, IConstruct, IDependable, IResource, Lazy, Resource, Stack, Token, Tags, } from '@aws-cdk/core'; import * as cxapi from '@aws-cdk/cx-api'; @@ -380,7 +380,7 @@ abstract class VpcBase extends Resource implements IVpc { const routeTableIds = allRouteTableIds(flatten(vpnRoutePropagation.map(s => this.selectSubnets(s).subnets))); if (routeTableIds.length === 0) { - this.node.addError(`enableVpnGateway: no subnets matching selection: '${JSON.stringify(vpnRoutePropagation)}'. Select other subnets to add routes to.`); + Annotations.of(this).addError(`enableVpnGateway: no subnets matching selection: '${JSON.stringify(vpnRoutePropagation)}'. Select other subnets to add routes to.`); } const routePropagation = new CfnVPNGatewayRoutePropagation(this, 'RoutePropagation', { @@ -1899,7 +1899,7 @@ class ImportedSubnet extends Resource implements ISubnet, IPublicSubnet, IPrivat ? `at '${scope.node.path}/${id}'` : `'${attrs.subnetId}'`; // eslint-disable-next-line max-len - scope.node.addWarning(`No routeTableId was provided to the subnet ${ref}. Attempting to read its .routeTable.routeTableId will return null/undefined. (More info: https://github.com/aws/aws-cdk/pull/3171)`); + Annotations.of(this).addWarning(`No routeTableId was provided to the subnet ${ref}. Attempting to read its .routeTable.routeTableId will return null/undefined. (More info: https://github.com/aws/aws-cdk/pull/3171)`); } this._availabilityZone = attrs.availabilityZone; diff --git a/packages/@aws-cdk/aws-ec2/test/cfn-init.test.ts b/packages/@aws-cdk/aws-ec2/test/cfn-init.test.ts index d082183bf0081..38270500d4a9e 100644 --- a/packages/@aws-cdk/aws-ec2/test/cfn-init.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/cfn-init.test.ts @@ -1,10 +1,12 @@ +import * as fs from 'fs'; +import * as os from 'os'; import * as path from 'path'; import { arrayWith, ResourcePart, stringLike } from '@aws-cdk/assert'; import '@aws-cdk/assert/jest'; import * as iam from '@aws-cdk/aws-iam'; import * as s3 from '@aws-cdk/aws-s3'; -import * as s3_assets from '@aws-cdk/aws-s3-assets'; -import { App, Aws, CfnResource, Stack } from '@aws-cdk/core'; +import { Asset } from '@aws-cdk/aws-s3-assets'; +import { App, Aws, CfnResource, Stack, DefaultStackSynthesizer, IStackSynthesizer, FileAssetSource, FileAssetLocation } from '@aws-cdk/core'; import * as ec2 from '../lib'; let app: App; @@ -13,10 +15,15 @@ let instanceRole: iam.Role; let resource: CfnResource; let linuxUserData: ec2.UserData; -beforeEach(() => { +function resetState() { + resetStateWithSynthesizer(); +} + +function resetStateWithSynthesizer(customSynthesizer?: IStackSynthesizer) { app = new App(); stack = new Stack(app, 'Stack', { env: { account: '1234', region: 'testregion' }, + synthesizer: customSynthesizer, }); instanceRole = new iam.Role(stack, 'InstanceRole', { assumedBy: new iam.ServicePrincipal('ec2.amazonaws.com'), @@ -25,7 +32,9 @@ beforeEach(() => { type: 'CDK::Test::Resource', }); linuxUserData = ec2.UserData.forLinux(); -}); +}; + +beforeEach(resetState); test('whole config with restart handles', () => { // WHEN @@ -238,7 +247,7 @@ describe('assets n buckets', () => { [''], ])('InitFile.from%sAsset', (existing: string) => { // GIVEN - const asset = new s3_assets.Asset(stack, 'Asset', { path: __filename }); + const asset = new Asset(stack, 'Asset', { path: __filename }); const init = ec2.CloudFormationInit.fromElements( existing ? ec2.InitFile.fromExistingAsset('/etc/fun.js', asset) @@ -292,7 +301,7 @@ describe('assets n buckets', () => { [''], ])('InitSource.from%sAsset', (existing: string) => { // GIVEN - const asset = new s3_assets.Asset(stack, 'Asset', { path: path.join(__dirname, 'asset-fixture') }); + const asset = new Asset(stack, 'Asset', { path: path.join(__dirname, 'asset-fixture') }); const init = ec2.CloudFormationInit.fromElements( existing ? ec2.InitSource.fromExistingAsset('/etc/fun', asset) @@ -472,6 +481,64 @@ describe('assets n buckets', () => { }, }); }); + + test('fingerprint data changes on asset hash update', () => { + function calculateFingerprint(assetFilePath: string): string | undefined { + resetState(); // Needed so the same resources/assets/filenames can be used. + const init = ec2.CloudFormationInit.fromElements( + ec2.InitFile.fromAsset('/etc/myFile', assetFilePath), + ); + init._attach(resource, linuxOptions()); + + return linuxUserData.render().split('\n').find(line => line.match(/# fingerprint:/)); + } + + // Setup initial asset file + const assetFileDir = fs.mkdtempSync(path.join(os.tmpdir(), 'cfn-init-test')); + const assetFilePath = path.join(assetFileDir, 'fingerprint-test'); + fs.writeFileSync(assetFilePath, 'hello'); + + const fingerprintOne = calculateFingerprint(assetFilePath); + const fingerprintOneAgain = calculateFingerprint(assetFilePath); + // Consistent without changes. + expect(fingerprintOneAgain).toEqual(fingerprintOne); + + // Change asset file content/hash + fs.writeFileSync(assetFilePath, ' world'); + + const fingerprintTwo = calculateFingerprint(assetFilePath); + + expect(fingerprintTwo).not.toEqual(fingerprintOne); + }); + + test('fingerprint data changes on existing asset update, even for assets with unchanging URLs', () => { + function calculateFingerprint(assetFilePath: string): string | undefined { + resetStateWithSynthesizer(new SingletonLocationSythesizer()); + const init = ec2.CloudFormationInit.fromElements( + ec2.InitFile.fromExistingAsset('/etc/myFile', new Asset(stack, 'FileAsset', { path: assetFilePath })), + ); + init._attach(resource, linuxOptions()); + + return linuxUserData.render().split('\n').find(line => line.match(/# fingerprint:/)); + } + + // Setup initial asset file + const assetFileDir = fs.mkdtempSync(path.join(os.tmpdir(), 'cfn-init-test')); + const assetFilePath = path.join(assetFileDir, 'fingerprint-test'); + fs.writeFileSync(assetFilePath, 'hello'); + + const fingerprintOne = calculateFingerprint(assetFilePath); + const fingerprintOneAgain = calculateFingerprint(assetFilePath); + // Consistent without changes. + expect(fingerprintOneAgain).toEqual(fingerprintOne); + + // Change asset file content/hash + fs.writeFileSync(assetFilePath, ' world'); + + const fingerprintTwo = calculateFingerprint(assetFilePath); + + expect(fingerprintTwo).not.toEqual(fingerprintOne); + }); }); function linuxOptions() { @@ -512,3 +579,17 @@ function cmdArg(command: string, argument: string) { function escapeRegex(s: string) { return s.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string } + +/** Creates file assets that have a hard-coded asset url, rather than the default based on asset hash */ +class SingletonLocationSythesizer extends DefaultStackSynthesizer { + public addFileAsset(_asset: FileAssetSource): FileAssetLocation { + const httpUrl = 'https://MyBucket.s3.amazonaws.com/MyAsset'; + return { + bucketName: 'MyAssetBucket', + objectKey: 'MyAssetFile', + httpUrl, + s3ObjectUrl: httpUrl, + s3Url: httpUrl, + }; + } +} diff --git a/packages/@aws-cdk/aws-ec2/test/integ.instance-init.expected.json b/packages/@aws-cdk/aws-ec2/test/integ.instance-init.expected.json index 0127cd6df4208..24467fddd0e96 100644 --- a/packages/@aws-cdk/aws-ec2/test/integ.instance-init.expected.json +++ b/packages/@aws-cdk/aws-ec2/test/integ.instance-init.expected.json @@ -130,7 +130,7 @@ ] } }, - "Instance255F3526589c13387332ee3de": { + "Instance255F35265813bd3c1f652ed5b": { "Type": "AWS::EC2::Instance", "Properties": { "AvailabilityZone": "us-east-1a", @@ -161,7 +161,7 @@ "Fn::Join": [ "", [ - "#!/bin/bash\n# fingerprint: 061ec8b06d437783\n(\n set +e\n /opt/aws/bin/cfn-init -v --region ", + "#!/bin/bash\n# fingerprint: 336ad3625c000098\n(\n set +e\n /opt/aws/bin/cfn-init -v --region ", { "Ref": "AWS::Region" }, @@ -169,7 +169,7 @@ { "Ref": "AWS::StackName" }, - " --resource Instance255F3526589c13387332ee3de -c default\n /opt/aws/bin/cfn-signal -e $? --region ", + " --resource Instance255F35265813bd3c1f652ed5b -c default\n /opt/aws/bin/cfn-signal -e $? --region ", { "Ref": "AWS::Region" }, @@ -177,7 +177,7 @@ { "Ref": "AWS::StackName" }, - " --resource Instance255F3526589c13387332ee3de\n cat /var/log/cfn-init.log >&2\n)" + " --resource Instance255F35265813bd3c1f652ed5b\n cat /var/log/cfn-init.log >&2\n)" ] ] } @@ -319,4 +319,4 @@ "Description": "Artifact hash for asset \"f8a1af398dac2fad92eeea4fb7620be1c4f504e23e3bfcd859fbb5744187930b\"" } } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-ecr-assets/lib/image-asset.ts b/packages/@aws-cdk/aws-ecr-assets/lib/image-asset.ts index 6e74297edfcaa..0db041f88d6c8 100644 --- a/packages/@aws-cdk/aws-ecr-assets/lib/image-asset.ts +++ b/packages/@aws-cdk/aws-ecr-assets/lib/image-asset.ts @@ -2,7 +2,7 @@ import * as fs from 'fs'; import * as path from 'path'; import * as assets from '@aws-cdk/assets'; import * as ecr from '@aws-cdk/aws-ecr'; -import { Construct, Stack, Token } from '@aws-cdk/core'; +import { Annotations, Construct, Stack, Token } from '@aws-cdk/core'; import * as minimatch from 'minimatch'; /** @@ -112,7 +112,7 @@ export class DockerImageAsset extends Construct implements assets.IAsset { }); if (props.repositoryName) { - this.node.addWarning('DockerImageAsset.repositoryName is deprecated. Override "core.Stack.addDockerImageAsset" to control asset locations'); + Annotations.of(this).addWarning('DockerImageAsset.repositoryName is deprecated. Override "core.Stack.addDockerImageAsset" to control asset locations'); } // include build context in "extra" so it will impact the hash diff --git a/packages/@aws-cdk/aws-ecr/test/integ.imagescan.expected.json b/packages/@aws-cdk/aws-ecr/test/integ.imagescan.expected.json index 2c976e6327438..76cd5933128c1 100644 --- a/packages/@aws-cdk/aws-ecr/test/integ.imagescan.expected.json +++ b/packages/@aws-cdk/aws-ecr/test/integ.imagescan.expected.json @@ -77,7 +77,32 @@ "InstallLatestAwsSdk": true }, "UpdateReplacePolicy": "Delete", - "DeletionPolicy": "Delete" + "DeletionPolicy": "Delete", + "DependsOn": [ + "RepoImageScanOnPushCustomResourcePolicy556E941E" + ] + }, + "RepoImageScanOnPushCustomResourcePolicy556E941E": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action":"ecr:PutImageScanningConfiguration", + "Effect":"Allow", + "Resource": { + "Fn::GetAtt": [ + "Repo02AC86CF", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "RepoImageScanOnPushCustomResourcePolicy556E941E", + "Roles": [{"Ref":"AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2"}] + } }, "RepoImageScanComplete7BC71935": { "Type": "AWS::Events::Rule", @@ -134,32 +159,6 @@ ] } }, - "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleDefaultPolicyD28E1A5E": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyDocument": { - "Statement": [ - { - "Action": "ecr:PutImageScanningConfiguration", - "Effect": "Allow", - "Resource": { - "Fn::GetAtt": [ - "Repo02AC86CF", - "Arn" - ] - } - } - ], - "Version": "2012-10-17" - }, - "PolicyName": "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleDefaultPolicyD28E1A5E", - "Roles": [ - { - "Ref": "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2" - } - ] - } - }, "AWS679f53fac002430cb0da5b7982bd22872D164C4C": { "Type": "AWS::Lambda::Function", "Properties": { @@ -212,7 +211,6 @@ "Timeout": 120 }, "DependsOn": [ - "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleDefaultPolicyD28E1A5E", "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2" ] } diff --git a/packages/@aws-cdk/aws-ecs/lib/base/base-service.ts b/packages/@aws-cdk/aws-ecs/lib/base/base-service.ts index c616698453a17..145a0c36a54bc 100644 --- a/packages/@aws-cdk/aws-ecs/lib/base/base-service.ts +++ b/packages/@aws-cdk/aws-ecs/lib/base/base-service.ts @@ -5,7 +5,7 @@ import * as elb from '@aws-cdk/aws-elasticloadbalancing'; import * as elbv2 from '@aws-cdk/aws-elasticloadbalancingv2'; import * as iam from '@aws-cdk/aws-iam'; import * as cloudmap from '@aws-cdk/aws-servicediscovery'; -import { Construct, Duration, IResolvable, IResource, Lazy, Resource, Stack } from '@aws-cdk/core'; +import { Annotations, Construct, Duration, IResolvable, IResource, Lazy, Resource, Stack } from '@aws-cdk/core'; import { LoadBalancerTargetOptions, NetworkMode, TaskDefinition } from '../base/task-definition'; import { ICluster } from '../cluster'; import { Protocol } from '../container-definition'; @@ -356,7 +356,7 @@ export abstract class BaseService extends Resource }); if (props.deploymentController?.type === DeploymentControllerType.EXTERNAL) { - this.node.addWarning('taskDefinition and launchType are blanked out when using external deployment controller.'); + Annotations.of(this).addWarning('taskDefinition and launchType are blanked out when using external deployment controller.'); } this.serviceArn = this.getResourceArnAttribute(this.resource.ref, { diff --git a/packages/@aws-cdk/aws-ecs/lib/images/repository.ts b/packages/@aws-cdk/aws-ecs/lib/images/repository.ts index 95d3675d63e11..3224a5571dd37 100644 --- a/packages/@aws-cdk/aws-ecs/lib/images/repository.ts +++ b/packages/@aws-cdk/aws-ecs/lib/images/repository.ts @@ -1,5 +1,5 @@ import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; -import { Construct, Token } from '@aws-cdk/core'; +import { Annotations, Construct, Token } from '@aws-cdk/core'; import { ContainerDefinition } from '../container-definition'; import { ContainerImage, ContainerImageConfig } from '../container-image'; @@ -37,7 +37,7 @@ export class RepositoryImage extends ContainerImage { public bind(scope: Construct, containerDefinition: ContainerDefinition): ContainerImageConfig { // name could be a Token - in that case, skip validation altogether if (!Token.isUnresolved(this.imageName) && ECR_IMAGE_REGEX.test(this.imageName)) { - scope.node.addWarning("Proper policies need to be attached before pulling from ECR repository, or use 'fromEcrRepository'."); + Annotations.of(scope).addWarning("Proper policies need to be attached before pulling from ECR repository, or use 'fromEcrRepository'."); } if (this.props.credentials) { diff --git a/packages/@aws-cdk/aws-eks-legacy/lib/cluster.ts b/packages/@aws-cdk/aws-eks-legacy/lib/cluster.ts index c7597df495297..61f3ccc01fe3d 100644 --- a/packages/@aws-cdk/aws-eks-legacy/lib/cluster.ts +++ b/packages/@aws-cdk/aws-eks-legacy/lib/cluster.ts @@ -4,7 +4,7 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import * as iam from '@aws-cdk/aws-iam'; import * as lambda from '@aws-cdk/aws-lambda'; import * as ssm from '@aws-cdk/aws-ssm'; -import { CfnOutput, Construct, Duration, IResource, Resource, Stack, Token, Tags } from '@aws-cdk/core'; +import { Annotations, CfnOutput, Construct, Duration, IResource, Resource, Stack, Token, Tags } from '@aws-cdk/core'; import { AwsAuth } from './aws-auth'; import { ClusterResource } from './cluster-resource'; import { CfnCluster, CfnClusterProps } from './eks.generated'; @@ -336,7 +336,7 @@ export class Cluster extends Resource implements ICluster { physicalName: props.clusterName, }); - this.node.addWarning('The @aws-cdk/aws-eks-legacy module will no longer be released as part of the AWS CDK starting March 1st, 2020. Please refer to https://github.com/aws/aws-cdk/issues/5544 for upgrade instructions'); + Annotations.of(this).addWarning('The @aws-cdk/aws-eks-legacy module will no longer be released as part of the AWS CDK starting March 1st, 2020. Please refer to https://github.com/aws/aws-cdk/issues/5544 for upgrade instructions'); const stack = Stack.of(this); @@ -636,7 +636,7 @@ export class Cluster extends Resource implements ICluster { // message (if token): "could not auto-tag public/private subnet with tag..." // message (if not token): "count not auto-tag public/private subnet xxxxx with tag..." const subnetID = Token.isUnresolved(subnet.subnetId) ? '' : ` ${subnet.subnetId}`; - this.node.addWarning(`Could not auto-tag ${type} subnet${subnetID} with "${tag}=1", please remember to do this manually`); + Annotations.of(this).addWarning(`Could not auto-tag ${type} subnet${subnetID} with "${tag}=1", please remember to do this manually`); continue; } diff --git a/packages/@aws-cdk/aws-eks/README.md b/packages/@aws-cdk/aws-eks/README.md index 59844cf4c179e..c96c98a0b1294 100644 --- a/packages/@aws-cdk/aws-eks/README.md +++ b/packages/@aws-cdk/aws-eks/README.md @@ -176,6 +176,59 @@ cluster.addNodegroup('nodegroup', { }); ``` +#### Custom AMI and Launch Template support + +Specify the launch template for the nodegroup with your custom AMI. When using a custom AMI, +Amazon EKS doesn't merge any user data. Rather, You are responsible for supplying the required +bootstrap commands for nodes to join the cluster. In the following sample, `/ect/eks/bootstrap.sh` from the AMI will be used to bootstrap the node. See [Using a custom AMI](https://docs.aws.amazon.com/en_ca/eks/latest/userguide/launch-templates.html) for more details. + +```ts +const userData = ec2.UserData.forLinux(); +userData.addCommands( + 'set -o xtrace', + `/etc/eks/bootstrap.sh ${this.cluster.clusterName}`, +); +const lt = new ec2.CfnLaunchTemplate(this, 'LaunchTemplate', { + launchTemplateData: { + // specify your custom AMI below + imageId, + instanceType: new ec2.InstanceType('t3.small').toString(), + userData: Fn.base64(userData.render()), + }, +}); +this.cluster.addNodegroup('extra-ng', { + launchTemplate: { + id: lt.ref, + version: lt.attrDefaultVersionNumber, + }, +}); +``` + +### ARM64 Support + +Instance types with `ARM64` architecture are supported in both managed nodegroup and self-managed capacity. Simply specify an ARM64 `instanceType` (such as `m6g.medium`), and the latest +Amazon Linux 2 AMI for ARM64 will be automatically selected. + +```ts +// create a cluster with a default managed nodegroup +cluster = new eks.Cluster(this, 'Cluster', { + vpc, + mastersRole, + version: eks.KubernetesVersion.V1_17, +}); + +// add a managed ARM64 nodegroup +cluster.addNodegroup('extra-ng-arm', { + instanceType: new ec2.InstanceType('m6g.medium'), + minSize: 2, +}); + +// add a self-managed ARM64 nodegroup +cluster.addCapacity('self-ng-arm', { + instanceType: new ec2.InstanceType('m6g.medium'), + minCapacity: 2, +}) +``` ### Fargate @@ -239,9 +292,19 @@ Spot instance nodes will be labeled with `lifecycle=Ec2Spot` and tainted with `P The [AWS Node Termination Handler](https://github.com/aws/aws-node-termination-handler) DaemonSet will be installed from [ Amazon EKS Helm chart repository -](https://github.com/aws/eks-charts/tree/master/stable/aws-node-termination-handler) on these nodes. The termination handler ensures that the Kubernetes control plane responds appropriately to events that can cause your EC2 instance to become unavailable, such as [EC2 maintenance events](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) and [EC2 Spot interruptions](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html) and helps gracefully stop all pods running on spot nodes that are about to be +](https://github.com/aws/eks-charts/tree/master/stable/aws-node-termination-handler) on these nodes. +The termination handler ensures that the Kubernetes control plane responds appropriately to events that +can cause your EC2 instance to become unavailable, such as [EC2 maintenance events](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) +and [EC2 Spot interruptions](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html) and helps gracefully stop all pods running on spot nodes that are about to be terminated. +Current version: + +| name | version | +|------------|---------| +| Helm Chart | 0.9.5 | +| App | 1.7.0 | + ### Bootstrapping When adding capacity, you can specify options for diff --git a/packages/@aws-cdk/aws-eks/lib/cluster.ts b/packages/@aws-cdk/aws-eks/lib/cluster.ts index 3889ea41d6d32..4087b6e4931a3 100644 --- a/packages/@aws-cdk/aws-eks/lib/cluster.ts +++ b/packages/@aws-cdk/aws-eks/lib/cluster.ts @@ -6,12 +6,13 @@ import * as iam from '@aws-cdk/aws-iam'; import * as kms from '@aws-cdk/aws-kms'; import * as lambda from '@aws-cdk/aws-lambda'; import * as ssm from '@aws-cdk/aws-ssm'; -import { CfnOutput, CfnResource, Construct, IResource, Resource, Stack, Tags, Token, Duration } from '@aws-cdk/core'; +import { Annotations, CfnOutput, CfnResource, Construct, IResource, Resource, Stack, Tags, Token, Duration } from '@aws-cdk/core'; import * as YAML from 'yaml'; import { AwsAuth } from './aws-auth'; import { ClusterResource, clusterArnComponents } from './cluster-resource'; import { FargateProfile, FargateProfileOptions } from './fargate-profile'; import { HelmChart, HelmChartOptions } from './helm-chart'; +import { INSTANCE_TYPES } from './instance-types'; import { KubernetesManifest } from './k8s-manifest'; import { KubernetesObjectValue } from './k8s-object-value'; import { KubernetesPatch } from './k8s-patch'; @@ -460,6 +461,11 @@ export class EndpointAccess { * @param cidr CIDR blocks. */ public onlyFrom(...cidr: string[]) { + if (!this._config.privateAccess) { + // when private access is disabled, we can't restric public + // access since it will render the kubectl provider unusable. + throw new Error('Cannot restric public access to endpoint when private access is disabled. Use PUBLIC_AND_PRIVATE.onlyFrom() instead.'); + } return new EndpointAccess({ ...this._config, // override CIDR @@ -856,20 +862,23 @@ export class Cluster extends ClusterBase { this.kubectlEnvironment = props.kubectlEnvironment; this.kubectlLayer = props.kubectlLayer; - if (this.endpointAccess._config.privateAccess && this.vpc instanceof ec2.Vpc) { - // validate VPC properties according to: https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html - if (!this.vpc.dnsHostnamesEnabled || !this.vpc.dnsSupportEnabled) { - throw new Error('Private endpoint access requires the VPC to have DNS support and DNS hostnames enabled. Use `enableDnsHostnames: true` and `enableDnsSupport: true` when creating the VPC.'); - } - } + const privateSubents = this.selectPrivateSubnets().slice(0, 16); + const publicAccessDisabled = !this.endpointAccess._config.publicAccess; + const publicAccessRestricted = !publicAccessDisabled + && this.endpointAccess._config.publicCidrs + && this.endpointAccess._config.publicCidrs.length !== 0; - this.kubectlSecurityGroup = new ec2.SecurityGroup(this, 'KubectlProviderSecurityGroup', { - vpc: this.vpc, - description: 'Comminication between KubectlProvider and EKS Control Plane', - }); + // validate endpoint access configuration - // grant the kubectl provider access to the cluster control plane. - this.connections.allowFrom(this.kubectlSecurityGroup, this.connections.defaultPort!); + if (privateSubents.length === 0 && publicAccessDisabled) { + // no private subnets and no public access at all, no good. + throw new Error('Vpc must contain private subnets when public endpoint access is disabled'); + } + + if (privateSubents.length === 0 && publicAccessRestricted) { + // no private subents and public access is restricted, no good. + throw new Error('Vpc must contain private subnets when public endpoint access is restricted'); + } const resource = this._clusterResource = new ClusterResource(this, 'Resource', { name: this.physicalName, @@ -894,11 +903,32 @@ export class Cluster extends ClusterBase { vpc: this.vpc, }); - this.adminRole = resource.adminRole; + if (this.endpointAccess._config.privateAccess && privateSubents.length !== 0) { - // the security group and vpc must exist in order to properly delete the cluster (since we run `kubectl delete`). - // this ensures that. - this._clusterResource.node.addDependency(this.kubectlSecurityGroup, this.vpc); + // when private access is enabled and the vpc has private subnets, lets connect + // the provider to the vpc so that it will work even when restricting public access. + + // validate VPC properties according to: https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html + if (this.vpc instanceof ec2.Vpc && !(this.vpc.dnsHostnamesEnabled && this.vpc.dnsSupportEnabled)) { + throw new Error('Private endpoint access requires the VPC to have DNS support and DNS hostnames enabled. Use `enableDnsHostnames: true` and `enableDnsSupport: true` when creating the VPC.'); + } + + this.kubectlPrivateSubnets = privateSubents; + + this.kubectlSecurityGroup = new ec2.SecurityGroup(this, 'KubectlProviderSecurityGroup', { + vpc: this.vpc, + description: 'Comminication between KubectlProvider and EKS Control Plane', + }); + + // grant the kubectl provider access to the cluster control plane. + this.connections.allowFrom(this.kubectlSecurityGroup, this.connections.defaultPort!); + + // the security group and vpc must exist in order to properly delete the cluster (since we run `kubectl delete`). + // this ensures that. + this._clusterResource.node.addDependency(this.kubectlSecurityGroup, this.vpc); + } + + this.adminRole = resource.adminRole; // we use an SSM parameter as a barrier because it's free and fast. this._kubectlReadyBarrier = new CfnResource(this, 'KubectlReadyBarrier', { @@ -924,14 +954,6 @@ export class Cluster extends ClusterBase { // cluster is first created, that's the only role that has "system:masters" permissions this.kubectlRole = this.adminRole; - // specify private subnets for kubectl only if we don't have public k8s endpoint access - if (!this.endpointAccess._config.publicAccess) { - this.kubectlPrivateSubnets = this.selectPrivateSubnets().slice(0, 16); - if (this.kubectlPrivateSubnets.length === 0) { - throw new Error('Vpc must contain private subnets to configure private endpoint access'); - } - } - this._kubectlResourceProvider = this.defineKubectlProvider(); const updateConfigCommandPrefix = `aws eks update-kubeconfig --name ${this.clusterName}`; @@ -1026,6 +1048,7 @@ export class Cluster extends ClusterBase { new BottleRocketImage() : new EksOptimizedImage({ nodeType: nodeTypeForInstanceType(options.instanceType), + cpuArch: cpuArchForInstanceType(options.instanceType), kubernetesVersion: this.version.version, }), updateType: options.updateType, @@ -1311,7 +1334,7 @@ export class Cluster extends ClusterBase { if (!this._spotInterruptHandler) { this._spotInterruptHandler = this.addChart('spot-interrupt-handler', { chart: 'aws-node-termination-handler', - version: '0.7.3', + version: '0.9.5', repository: 'https://aws.github.io/eks-charts', namespace: 'kube-system', values: { @@ -1352,7 +1375,7 @@ export class Cluster extends ClusterBase { // message (if token): "could not auto-tag public/private subnet with tag..." // message (if not token): "count not auto-tag public/private subnet xxxxx with tag..." const subnetID = Token.isUnresolved(subnet.subnetId) ? '' : ` ${subnet.subnetId}`; - this.node.addWarning(`Could not auto-tag ${type} subnet${subnetID} with "${tag}=1", please remember to do this manually`); + Annotations.of(this).addWarning(`Could not auto-tag ${type} subnet${subnetID} with "${tag}=1", please remember to do this manually`); continue; } @@ -1618,6 +1641,13 @@ export interface EksOptimizedImageProps { */ readonly nodeType?: NodeType; + /** + * What cpu architecture to retrieve the image for (arm64 or x86_64) + * + * @default CpuArch.X86_64 + */ + readonly cpuArch?: CpuArch; + /** * The Kubernetes version to use * @@ -1631,8 +1661,8 @@ export interface EksOptimizedImageProps { */ export class EksOptimizedImage implements ec2.IMachineImage { private readonly nodeType?: NodeType; + private readonly cpuArch?: CpuArch; private readonly kubernetesVersion?: string; - private readonly amiParameterName: string; /** @@ -1640,11 +1670,13 @@ export class EksOptimizedImage implements ec2.IMachineImage { */ public constructor(props: EksOptimizedImageProps = {}) { this.nodeType = props.nodeType ?? NodeType.STANDARD; + this.cpuArch = props.cpuArch ?? CpuArch.X86_64; this.kubernetesVersion = props.kubernetesVersion ?? LATEST_KUBERNETES_VERSION; // set the SSM parameter name this.amiParameterName = `/aws/service/eks/optimized-ami/${this.kubernetesVersion}/` - + ( this.nodeType === NodeType.STANDARD ? 'amazon-linux-2/' : '' ) + + ( this.nodeType === NodeType.STANDARD ? this.cpuArch === CpuArch.X86_64 ? + 'amazon-linux-2/' : 'amazon-linux-2-arm64/' :'' ) + ( this.nodeType === NodeType.GPU ? 'amazon-linux-2-gpu/' : '' ) + (this.nodeType === NodeType.INFERENTIA ? 'amazon-linux-2-gpu/' : '') + 'recommended/image_id'; @@ -1718,6 +1750,21 @@ export enum NodeType { INFERENTIA = 'INFERENTIA', } +/** + * CPU architecture + */ +export enum CpuArch { + /** + * arm64 CPU type + */ + ARM_64 = 'arm64', + + /** + * x86_64 CPU type + */ + X86_64 = 'x86_64', +} + /** * The type of compute resources to use for CoreDNS. */ @@ -1761,12 +1808,15 @@ export enum MachineImageType { BOTTLEROCKET } -const GPU_INSTANCETYPES = ['p2', 'p3', 'g4']; -const INFERENTIA_INSTANCETYPES = ['inf1']; - function nodeTypeForInstanceType(instanceType: ec2.InstanceType) { - return GPU_INSTANCETYPES.includes(instanceType.toString().substring(0, 2)) ? NodeType.GPU : - INFERENTIA_INSTANCETYPES.includes(instanceType.toString().substring(0, 4)) ? NodeType.INFERENTIA : + return INSTANCE_TYPES.gpu.includes(instanceType.toString().substring(0, 2)) ? NodeType.GPU : + INSTANCE_TYPES.inferentia.includes(instanceType.toString().substring(0, 4)) ? NodeType.INFERENTIA : NodeType.STANDARD; } +function cpuArchForInstanceType(instanceType: ec2.InstanceType) { + return INSTANCE_TYPES.graviton2.includes(instanceType.toString().substring(0, 3)) ? CpuArch.ARM_64 : + INSTANCE_TYPES.graviton.includes(instanceType.toString().substring(0, 2)) ? CpuArch.ARM_64 : + CpuArch.X86_64; +} + diff --git a/packages/@aws-cdk/aws-eks/lib/instance-types.ts b/packages/@aws-cdk/aws-eks/lib/instance-types.ts new file mode 100644 index 0000000000000..0656757bd0120 --- /dev/null +++ b/packages/@aws-cdk/aws-eks/lib/instance-types.ts @@ -0,0 +1,6 @@ +export const INSTANCE_TYPES = { + gpu: ['p2', 'p3', 'g2', 'g3', 'g4'], + inferentia: ['inf1'], + graviton: ['a1'], + graviton2: ['c6g', 'm6g', 'r6g'], +}; diff --git a/packages/@aws-cdk/aws-eks/lib/legacy-cluster.ts b/packages/@aws-cdk/aws-eks/lib/legacy-cluster.ts index f7da66c79c2e0..348adff45f6df 100644 --- a/packages/@aws-cdk/aws-eks/lib/legacy-cluster.ts +++ b/packages/@aws-cdk/aws-eks/lib/legacy-cluster.ts @@ -3,7 +3,7 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import * as iam from '@aws-cdk/aws-iam'; import * as kms from '@aws-cdk/aws-kms'; import * as ssm from '@aws-cdk/aws-ssm'; -import { CfnOutput, Construct, Resource, Stack, Token, Tags } from '@aws-cdk/core'; +import { Annotations, CfnOutput, Construct, Resource, Stack, Token, Tags } from '@aws-cdk/core'; import { ICluster, ClusterAttributes, KubernetesVersion, NodeType, DefaultCapacityType, EksOptimizedImage, CapacityOptions, MachineImageType, AutoScalingGroupOptions, CommonClusterOptions } from './cluster'; import { clusterArnComponents } from './cluster-resource'; import { CfnCluster, CfnClusterProps } from './eks.generated'; @@ -385,7 +385,7 @@ export class LegacyCluster extends Resource implements ICluster { // message (if token): "could not auto-tag public/private subnet with tag..." // message (if not token): "count not auto-tag public/private subnet xxxxx with tag..." const subnetID = Token.isUnresolved(subnet.subnetId) ? '' : ` ${subnet.subnetId}`; - this.node.addWarning(`Could not auto-tag ${type} subnet${subnetID} with "${tag}=1", please remember to do this manually`); + Annotations.of(this).addWarning(`Could not auto-tag ${type} subnet${subnetID} with "${tag}=1", please remember to do this manually`); continue; } diff --git a/packages/@aws-cdk/aws-eks/lib/managed-nodegroup.ts b/packages/@aws-cdk/aws-eks/lib/managed-nodegroup.ts index db2130e01879d..f65f4f844c2c8 100644 --- a/packages/@aws-cdk/aws-eks/lib/managed-nodegroup.ts +++ b/packages/@aws-cdk/aws-eks/lib/managed-nodegroup.ts @@ -3,6 +3,7 @@ import { IRole, ManagedPolicy, Role, ServicePrincipal } from '@aws-cdk/aws-iam'; import { Construct, IResource, Resource } from '@aws-cdk/core'; import { Cluster, ICluster } from './cluster'; import { CfnNodegroup } from './eks.generated'; +import { INSTANCE_TYPES } from './instance-types'; /** * NodeGroup interface @@ -22,13 +23,17 @@ export interface INodegroup extends IResource { */ export enum NodegroupAmiType { /** - * Amazon Linux 2 + * Amazon Linux 2 (x86-64) */ AL2_X86_64 = 'AL2_x86_64', /** * Amazon Linux 2 with GPU support */ AL2_X86_64_GPU = 'AL2_x86_64_GPU', + /** + * Amazon Linux 2 (ARM-64) + */ + AL2_ARM_64 = 'AL2_ARM_64' } /** @@ -51,6 +56,22 @@ export interface NodegroupRemoteAccess { readonly sourceSecurityGroups?: ISecurityGroup[]; } +/** + * Launch template property specification + */ +export interface LaunchTemplate { + /** + * The Launch template ID + */ + readonly id: string; + /** + * The launch template version to be used (optional). + * + * @default - the default version of the launch template + */ + readonly version?: string; +} + /** * The Nodegroup Options for addNodeGroup() method */ @@ -73,7 +94,7 @@ export interface NodegroupOptions { /** * The AMI type for your node group. * - * @default AL2_x86_64 + * @default - auto-determined from the instanceType property. */ readonly amiType?: NodegroupAmiType; /** @@ -155,6 +176,12 @@ export interface NodegroupOptions { * @default - None */ readonly tags?: { [name: string]: string }; + /** + * Launch template used for the nodegroup + * @see - https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html + * @default - no launch template + */ + readonly launchTemplate?: LaunchTemplate; } /** @@ -243,7 +270,8 @@ export class Nodegroup extends Resource implements INodegroup { nodegroupName: props.nodegroupName, nodeRole: this.role.roleArn, subnets: this.cluster.vpc.selectSubnets(props.subnets).subnetIds, - amiType: props.amiType, + amiType: props.amiType ?? (props.instanceType ? getAmiTypeForInstanceType(props.instanceType).toString() : + undefined), diskSize: props.diskSize, forceUpdateEnabled: props.forceUpdate ?? true, instanceTypes: props.instanceType ? [props.instanceType.toString()] : undefined, @@ -262,6 +290,25 @@ export class Nodegroup extends Resource implements INodegroup { tags: props.tags, }); + if (props.launchTemplate) { + if (props.diskSize) { + // see - https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html + // and https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-nodegroup.html#cfn-eks-nodegroup-disksize + throw new Error('diskSize must be specified within the launch template'); + } + if (props.instanceType) { + // see - https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html + // and https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-nodegroup.html#cfn-eks-nodegroup-disksize + throw new Error('Instance types must be specified within the launch template'); + } + // TODO: update this when the L1 resource spec is updated. + resource.addPropertyOverride('LaunchTemplate', { + Id: props.launchTemplate.id, + Version: props.launchTemplate.version, + }); + } + + // managed nodegroups update the `aws-auth` on creation, but we still need to track // its state for consistency. if (this.cluster instanceof Cluster) { @@ -282,5 +329,13 @@ export class Nodegroup extends Resource implements INodegroup { }); this.nodegroupName = this.getResourceNameAttribute(resource.ref); } +} +function getAmiTypeForInstanceType(instanceType: InstanceType) { + return INSTANCE_TYPES.graviton2.includes(instanceType.toString().substring(0, 3)) ? NodegroupAmiType.AL2_ARM_64 : + INSTANCE_TYPES.graviton.includes(instanceType.toString().substring(0, 2)) ? NodegroupAmiType.AL2_ARM_64 : + INSTANCE_TYPES.gpu.includes(instanceType.toString().substring(0, 2)) ? NodegroupAmiType.AL2_X86_64_GPU : + INSTANCE_TYPES.inferentia.includes(instanceType.toString().substring(0, 4)) ? NodegroupAmiType.AL2_X86_64_GPU : + NodegroupAmiType.AL2_X86_64; } + diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster-private-endpoint.expected.json b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster-private-endpoint.expected.json index e4ebed6a16b4f..6f10ead0fe30d 100644 --- a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster-private-endpoint.expected.json +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster-private-endpoint.expected.json @@ -602,22 +602,6 @@ "ToPort": 443 } }, - "ClusterKubectlProviderSecurityGroup2D90691C": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "Comminication between KubectlProvider and EKS Control Plane", - "SecurityGroupEgress": [ - { - "CidrIp": "0.0.0.0/0", - "Description": "Allow all outbound traffic by default", - "IpProtocol": "-1" - } - ], - "VpcId": { - "Ref": "Vpc8378EB38" - } - } - }, "ClusterCreationRole360249B6": { "Type": "AWS::IAM::Role", "Properties": { @@ -896,6 +880,22 @@ "UpdateReplacePolicy": "Delete", "DeletionPolicy": "Delete" }, + "ClusterKubectlProviderSecurityGroup2D90691C": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "Comminication between KubectlProvider and EKS Control Plane", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "VpcId": { + "Ref": "Vpc8378EB38" + } + } + }, "ClusterKubectlReadyBarrier200052AF": { "Type": "AWS::SSM::Parameter", "Properties": { @@ -1050,6 +1050,7 @@ "Ref": "VpcPrivateSubnet3SubnetF258B56E" } ], + "AmiType": "AL2_x86_64", "ForceUpdateEnabled": true, "InstanceTypes": [ "m5.large" @@ -1167,7 +1168,7 @@ }, "/", { - "Ref": "AssetParameters239f3911452e16bf26eaf985b77bc9f361a22cb4adbc3e1d4fe5301abd724ccbS3Bucket475C950C" + "Ref": "AssetParametersdaac37af2b50452c854a73ef7e2c57d5229667e390db39773ffb9dfb497bbd20S3Bucket12418C8C" }, "/", { @@ -1177,7 +1178,7 @@ "Fn::Split": [ "||", { - "Ref": "AssetParameters239f3911452e16bf26eaf985b77bc9f361a22cb4adbc3e1d4fe5301abd724ccbS3VersionKeyA1911337" + "Ref": "AssetParametersdaac37af2b50452c854a73ef7e2c57d5229667e390db39773ffb9dfb497bbd20S3VersionKey8C9B24CA" } ] } @@ -1190,7 +1191,7 @@ "Fn::Split": [ "||", { - "Ref": "AssetParameters239f3911452e16bf26eaf985b77bc9f361a22cb4adbc3e1d4fe5301abd724ccbS3VersionKeyA1911337" + "Ref": "AssetParametersdaac37af2b50452c854a73ef7e2c57d5229667e390db39773ffb9dfb497bbd20S3VersionKey8C9B24CA" } ] } @@ -1334,17 +1335,17 @@ "Type": "String", "Description": "Artifact hash for asset \"570f91ed45d0c45e8ff145969f7499419312e806c83f009b76539ce989960e51\"" }, - "AssetParameters239f3911452e16bf26eaf985b77bc9f361a22cb4adbc3e1d4fe5301abd724ccbS3Bucket475C950C": { + "AssetParametersdaac37af2b50452c854a73ef7e2c57d5229667e390db39773ffb9dfb497bbd20S3Bucket12418C8C": { "Type": "String", - "Description": "S3 bucket for asset \"239f3911452e16bf26eaf985b77bc9f361a22cb4adbc3e1d4fe5301abd724ccb\"" + "Description": "S3 bucket for asset \"daac37af2b50452c854a73ef7e2c57d5229667e390db39773ffb9dfb497bbd20\"" }, - "AssetParameters239f3911452e16bf26eaf985b77bc9f361a22cb4adbc3e1d4fe5301abd724ccbS3VersionKeyA1911337": { + "AssetParametersdaac37af2b50452c854a73ef7e2c57d5229667e390db39773ffb9dfb497bbd20S3VersionKey8C9B24CA": { "Type": "String", - "Description": "S3 key for asset version \"239f3911452e16bf26eaf985b77bc9f361a22cb4adbc3e1d4fe5301abd724ccb\"" + "Description": "S3 key for asset version \"daac37af2b50452c854a73ef7e2c57d5229667e390db39773ffb9dfb497bbd20\"" }, - "AssetParameters239f3911452e16bf26eaf985b77bc9f361a22cb4adbc3e1d4fe5301abd724ccbArtifactHash3D850561": { + "AssetParametersdaac37af2b50452c854a73ef7e2c57d5229667e390db39773ffb9dfb497bbd20ArtifactHash90BA6C4A": { "Type": "String", - "Description": "Artifact hash for asset \"239f3911452e16bf26eaf985b77bc9f361a22cb4adbc3e1d4fe5301abd724ccb\"" + "Description": "Artifact hash for asset \"daac37af2b50452c854a73ef7e2c57d5229667e390db39773ffb9dfb497bbd20\"" } } } \ No newline at end of file diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.expected.json b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.expected.json index 99e86fcf55825..b8e24cf58b4a0 100644 --- a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.expected.json +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.expected.json @@ -670,6 +670,27 @@ "ToPort": 443 } }, + "ClusterControlPlaneSecurityGroupfromawscdkeksclustertestClusterNodesArmInstanceSecurityGroup52C45858443B84847DA": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "tcp", + "Description": "from awscdkeksclustertestClusterNodesArmInstanceSecurityGroup52C45858:443", + "FromPort": 443, + "GroupId": { + "Fn::GetAtt": [ + "ClusterControlPlaneSecurityGroupD274242C", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "ClusterNodesArmInstanceSecurityGroup599F388B", + "GroupId" + ] + }, + "ToPort": 443 + } + }, "ClusterControlPlaneSecurityGroupfromawscdkeksclustertestClusterBottlerocketNodesInstanceSecurityGroup83FE7914443ECEF3F30": { "Type": "AWS::EC2::SecurityGroupIngress", "Properties": { @@ -733,22 +754,6 @@ "ToPort": 443 } }, - "ClusterKubectlProviderSecurityGroup2D90691C": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "Comminication between KubectlProvider and EKS Control Plane", - "SecurityGroupEgress": [ - { - "CidrIp": "0.0.0.0/0", - "Description": "Allow all outbound traffic by default", - "IpProtocol": "-1" - } - ], - "VpcId": { - "Ref": "Vpc8378EB38" - } - } - }, "ClusterCreationRole360249B6": { "Type": "AWS::IAM::Role", "Properties": { @@ -967,7 +972,7 @@ ] }, "Config": { - "version": "1.16", + "version": "1.17", "roleArn": { "Fn::GetAtt": [ "ClusterRoleFA261979", @@ -1067,6 +1072,22 @@ "UpdateReplacePolicy": "Delete", "DeletionPolicy": "Delete" }, + "ClusterKubectlProviderSecurityGroup2D90691C": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "Comminication between KubectlProvider and EKS Control Plane", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "VpcId": { + "Ref": "Vpc8378EB38" + } + } + }, "ClusterKubectlReadyBarrier200052AF": { "Type": "AWS::SSM::Parameter", "Properties": { @@ -1130,6 +1151,13 @@ ] }, "\\\",\\\"username\\\":\\\"system:node:{{EC2PrivateDNSName}}\\\",\\\"groups\\\":[\\\"system:bootstrappers\\\",\\\"system:nodes\\\"]},{\\\"rolearn\\\":\\\"", + { + "Fn::GetAtt": [ + "ClusterNodesArmInstanceRoleB93D3298", + "Arn" + ] + }, + "\\\",\\\"username\\\":\\\"system:node:{{EC2PrivateDNSName}}\\\",\\\"groups\\\":[\\\"system:bootstrappers\\\",\\\"system:nodes\\\"]},{\\\"rolearn\\\":\\\"", { "Fn::GetAtt": [ "ClusterBottlerocketNodesInstanceRole68E4BCFB", @@ -1157,6 +1185,20 @@ "Arn" ] }, + "\\\",\\\"username\\\":\\\"system:node:{{EC2PrivateDNSName}}\\\",\\\"groups\\\":[\\\"system:bootstrappers\\\",\\\"system:nodes\\\"]},{\\\"rolearn\\\":\\\"", + { + "Fn::GetAtt": [ + "ClusterNodegroupextrangarmNodeGroupRoleADF5749F", + "Arn" + ] + }, + "\\\",\\\"username\\\":\\\"system:node:{{EC2PrivateDNSName}}\\\",\\\"groups\\\":[\\\"system:bootstrappers\\\",\\\"system:nodes\\\"]},{\\\"rolearn\\\":\\\"", + { + "Fn::GetAtt": [ + "ClusterNodegroupDefaultCapacityNodeGroupRole55953B04", + "Arn" + ] + }, "\\\",\\\"username\\\":\\\"system:node:{{EC2PrivateDNSName}}\\\",\\\"groups\\\":[\\\"system:bootstrappers\\\",\\\"system:nodes\\\"]}]\",\"mapUsers\":\"[]\",\"mapAccounts\":\"[]\"}}]" ] ] @@ -1265,6 +1307,7 @@ "Ref": "VpcPrivateSubnet3SubnetF258B56E" } ], + "AmiType": "AL2_x86_64", "ForceUpdateEnabled": true, "InstanceTypes": [ "m5.large" @@ -1538,7 +1581,7 @@ "Type": "AWS::AutoScaling::LaunchConfiguration", "Properties": { "ImageId": { - "Ref": "SsmParameterValueawsserviceeksoptimizedami116amazonlinux2recommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter" + "Ref": "SsmParameterValueawsserviceeksoptimizedami117amazonlinux2recommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter" }, "InstanceType": "t2.medium", "IamInstanceProfile": { @@ -1619,6 +1662,283 @@ } } }, + "ClusterNodesArmInstanceSecurityGroup599F388B": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "aws-cdk-eks-cluster-test/Cluster/NodesArm/InstanceSecurityGroup", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "Tags": [ + { + "Key": { + "Fn::Join": [ + "", + [ + "kubernetes.io/cluster/", + { + "Ref": "Cluster9EE0221C" + } + ] + ] + }, + "Value": "owned" + }, + { + "Key": "Name", + "Value": "aws-cdk-eks-cluster-test/Cluster/NodesArm" + } + ], + "VpcId": { + "Ref": "Vpc8378EB38" + } + } + }, + "ClusterNodesArmInstanceSecurityGroupfromawscdkeksclustertestClusterNodesArmInstanceSecurityGroup52C45858ALLTRAFFIC83BB7106": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "-1", + "Description": "from awscdkeksclustertestClusterNodesArmInstanceSecurityGroup52C45858:ALL TRAFFIC", + "GroupId": { + "Fn::GetAtt": [ + "ClusterNodesArmInstanceSecurityGroup599F388B", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "ClusterNodesArmInstanceSecurityGroup599F388B", + "GroupId" + ] + } + } + }, + "ClusterNodesArmInstanceSecurityGroupfromawscdkeksclustertestClusterControlPlaneSecurityGroup2F13013444328ED4211": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "tcp", + "Description": "from awscdkeksclustertestClusterControlPlaneSecurityGroup2F130134:443", + "FromPort": 443, + "GroupId": { + "Fn::GetAtt": [ + "ClusterNodesArmInstanceSecurityGroup599F388B", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "ClusterControlPlaneSecurityGroupD274242C", + "GroupId" + ] + }, + "ToPort": 443 + } + }, + "ClusterNodesArmInstanceSecurityGroupfromawscdkeksclustertestClusterControlPlaneSecurityGroup2F13013410256553586052D07": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "IpProtocol": "tcp", + "Description": "from awscdkeksclustertestClusterControlPlaneSecurityGroup2F130134:1025-65535", + "FromPort": 1025, + "GroupId": { + "Fn::GetAtt": [ + "ClusterNodesArmInstanceSecurityGroup599F388B", + "GroupId" + ] + }, + "SourceSecurityGroupId": { + "Fn::GetAtt": [ + "ClusterControlPlaneSecurityGroupD274242C", + "GroupId" + ] + }, + "ToPort": 65535 + } + }, + "ClusterNodesArmInstanceRoleB93D3298": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "ec2.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKSWorkerNodePolicy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKS_CNI_Policy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + ] + ] + } + ], + "Tags": [ + { + "Key": { + "Fn::Join": [ + "", + [ + "kubernetes.io/cluster/", + { + "Ref": "Cluster9EE0221C" + } + ] + ] + }, + "Value": "owned" + }, + { + "Key": "Name", + "Value": "aws-cdk-eks-cluster-test/Cluster/NodesArm" + } + ] + } + }, + "ClusterNodesArmInstanceProfile158C5C9F": { + "Type": "AWS::IAM::InstanceProfile", + "Properties": { + "Roles": [ + { + "Ref": "ClusterNodesArmInstanceRoleB93D3298" + } + ] + } + }, + "ClusterNodesArmLaunchConfigAAF61344": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": { + "ImageId": { + "Ref": "SsmParameterValueawsserviceeksoptimizedami117amazonlinux2arm64recommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter" + }, + "InstanceType": "m6g.medium", + "IamInstanceProfile": { + "Ref": "ClusterNodesArmInstanceProfile158C5C9F" + }, + "SecurityGroups": [ + { + "Fn::GetAtt": [ + "ClusterNodesArmInstanceSecurityGroup599F388B", + "GroupId" + ] + } + ], + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "#!/bin/bash\nset -o xtrace\n/etc/eks/bootstrap.sh ", + { + "Ref": "Cluster9EE0221C" + }, + " --kubelet-extra-args \"--node-labels lifecycle=OnDemand\" --use-max-pods true\n/opt/aws/bin/cfn-signal --exit-code $? --stack aws-cdk-eks-cluster-test --resource ClusterNodesArmASG40A593D0 --region test-region" + ] + ] + } + } + }, + "DependsOn": [ + "ClusterNodesArmInstanceRoleB93D3298" + ] + }, + "ClusterNodesArmASG40A593D0": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "MaxSize": "1", + "MinSize": "1", + "LaunchConfigurationName": { + "Ref": "ClusterNodesArmLaunchConfigAAF61344" + }, + "Tags": [ + { + "Key": { + "Fn::Join": [ + "", + [ + "kubernetes.io/cluster/", + { + "Ref": "Cluster9EE0221C" + } + ] + ] + }, + "PropagateAtLaunch": true, + "Value": "owned" + }, + { + "Key": "Name", + "PropagateAtLaunch": true, + "Value": "aws-cdk-eks-cluster-test/Cluster/NodesArm" + } + ], + "VPCZoneIdentifier": [ + { + "Ref": "VpcPrivateSubnet1Subnet536B997A" + }, + { + "Ref": "VpcPrivateSubnet2Subnet3788AAA1" + }, + { + "Ref": "VpcPrivateSubnet3SubnetF258B56E" + } + ] + }, + "UpdatePolicy": { + "AutoScalingScheduledAction": { + "IgnoreUnmodifiedGroupSizeProperties": true + } + } + }, "ClusterBottlerocketNodesInstanceSecurityGroup3794A94B": { "Type": "AWS::EC2::SecurityGroup", "Properties": { @@ -2106,7 +2426,7 @@ "Type": "AWS::AutoScaling::LaunchConfiguration", "Properties": { "ImageId": { - "Ref": "SsmParameterValueawsserviceeksoptimizedami116amazonlinux2recommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter" + "Ref": "SsmParameterValueawsserviceeksoptimizedami117amazonlinux2recommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter" }, "InstanceType": "t3.large", "IamInstanceProfile": { @@ -2208,7 +2528,7 @@ }, "Release": "ksclustertestclusterchartspotinterrupthandlerf41ba997", "Chart": "aws-node-termination-handler", - "Version": "0.7.3", + "Version": "0.9.5", "Values": "{\"nodeSelector.lifecycle\":\"Ec2Spot\"}", "Namespace": "kube-system", "Repository": "https://aws.github.io/eks-charts", @@ -2416,7 +2736,7 @@ "Type": "AWS::AutoScaling::LaunchConfiguration", "Properties": { "ImageId": { - "Ref": "SsmParameterValueawsserviceeksoptimizedami116amazonlinux2gpurecommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter" + "Ref": "SsmParameterValueawsserviceeksoptimizedami117amazonlinux2gpurecommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter" }, "InstanceType": "inf1.2xlarge", "IamInstanceProfile": { @@ -2611,6 +2931,7 @@ "Ref": "VpcPrivateSubnet3SubnetF258B56E" } ], + "AmiType": "AL2_x86_64", "ForceUpdateEnabled": true, "InstanceTypes": [ "t3.small" @@ -2622,6 +2943,148 @@ } } }, + "ClusterNodegroupextrangarmNodeGroupRoleADF5749F": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "ec2.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKSWorkerNodePolicy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEKS_CNI_Policy" + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + ] + ] + } + ] + } + }, + "ClusterNodegroupextrangarm7773987A": { + "Type": "AWS::EKS::Nodegroup", + "Properties": { + "ClusterName": { + "Ref": "Cluster9EE0221C" + }, + "NodeRole": { + "Fn::GetAtt": [ + "ClusterNodegroupextrangarmNodeGroupRoleADF5749F", + "Arn" + ] + }, + "Subnets": [ + { + "Ref": "VpcPrivateSubnet1Subnet536B997A" + }, + { + "Ref": "VpcPrivateSubnet2Subnet3788AAA1" + }, + { + "Ref": "VpcPrivateSubnet3SubnetF258B56E" + } + ], + "AmiType": "AL2_ARM_64", + "ForceUpdateEnabled": true, + "InstanceTypes": [ + "m6g.medium" + ], + "ScalingConfig": { + "DesiredSize": 1, + "MaxSize": 1, + "MinSize": 1 + } + } + }, + "ClusterNodegroupextrang2F1FB0D40": { + "Type": "AWS::EKS::Nodegroup", + "Properties": { + "ClusterName": { + "Ref": "Cluster9EE0221C" + }, + "NodeRole": { + "Fn::GetAtt": [ + "ClusterNodegroupDefaultCapacityNodeGroupRole55953B04", + "Arn" + ] + }, + "Subnets": [ + { + "Ref": "VpcPrivateSubnet1Subnet536B997A" + }, + { + "Ref": "VpcPrivateSubnet2Subnet3788AAA1" + }, + { + "Ref": "VpcPrivateSubnet3SubnetF258B56E" + } + ], + "ForceUpdateEnabled": true, + "ScalingConfig": { + "DesiredSize": 1, + "MaxSize": 1, + "MinSize": 1 + }, + "LaunchTemplate": { + "Id": { + "Ref": "LaunchTemplate" + }, + "Version": { + "Fn::GetAtt": [ + "LaunchTemplate", + "DefaultVersionNumber" + ] + } + } + } + }, "ClustermanifestHelloApp078A45D8": { "Type": "Custom::AWSCDK-EKS-KubernetesResource", "Properties": { @@ -3039,7 +3502,7 @@ }, "/", { - "Ref": "AssetParameters2944b93098bcfadbe7d696c05bc208c5956fbd7bf8bd0bc43a58410cdcee569aS3BucketFEA73057" + "Ref": "AssetParametersa298dd278c9ef814ebac4c9d8b2dc8e1b8374a14c5b7d0e79f041a296668f5dcS3BucketCA7ADF01" }, "/", { @@ -3049,7 +3512,7 @@ "Fn::Split": [ "||", { - "Ref": "AssetParameters2944b93098bcfadbe7d696c05bc208c5956fbd7bf8bd0bc43a58410cdcee569aS3VersionKey00C85273" + "Ref": "AssetParametersa298dd278c9ef814ebac4c9d8b2dc8e1b8374a14c5b7d0e79f041a296668f5dcS3VersionKey822F0346" } ] } @@ -3062,7 +3525,7 @@ "Fn::Split": [ "||", { - "Ref": "AssetParameters2944b93098bcfadbe7d696c05bc208c5956fbd7bf8bd0bc43a58410cdcee569aS3VersionKey00C85273" + "Ref": "AssetParametersa298dd278c9ef814ebac4c9d8b2dc8e1b8374a14c5b7d0e79f041a296668f5dcS3VersionKey822F0346" } ] } @@ -3090,6 +3553,21 @@ "referencetoawscdkeksclustertestAssetParametersb7d8a9750f8bfded8ac76be100e3bee1c3d4824df006766110d023f42952f5c2S3VersionKey901D947ARef": { "Ref": "AssetParametersb7d8a9750f8bfded8ac76be100e3bee1c3d4824df006766110d023f42952f5c2S3VersionKey40FF2C4A" }, + "referencetoawscdkeksclustertestVpcPrivateSubnet1Subnet32A4EC2ARef": { + "Ref": "VpcPrivateSubnet1Subnet536B997A" + }, + "referencetoawscdkeksclustertestVpcPrivateSubnet2Subnet5CC53627Ref": { + "Ref": "VpcPrivateSubnet2Subnet3788AAA1" + }, + "referencetoawscdkeksclustertestVpcPrivateSubnet3Subnet7F5D6918Ref": { + "Ref": "VpcPrivateSubnet3SubnetF258B56E" + }, + "referencetoawscdkeksclustertestClusterKubectlProviderSecurityGroupD167EE6BGroupId": { + "Fn::GetAtt": [ + "ClusterKubectlProviderSecurityGroup2D90691C", + "GroupId" + ] + }, "referencetoawscdkeksclustertestAssetParameters34131c2e554ab57ad3a47fc0a13173a5c2a4b65a7582fe9622277b3d04c8e1e1S3Bucket85526CA7Ref": { "Ref": "AssetParameters34131c2e554ab57ad3a47fc0a13173a5c2a4b65a7582fe9622277b3d04c8e1e1S3BucketD25BCC90" }, @@ -3099,6 +3577,30 @@ } } }, + "LaunchTemplate": { + "Type": "AWS::EC2::LaunchTemplate", + "Properties": { + "LaunchTemplateData": { + "ImageId": { + "Ref": "SsmParameterValueawsserviceeksoptimizedami114amazonlinux2recommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter" + }, + "InstanceType": "t3.small", + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "#!/bin/bash\nset -o xtrace\n/etc/eks/bootstrap.sh ", + { + "Ref": "Cluster9EE0221C" + } + ] + ] + } + } + } + } + }, "AWSCDKCfnUtilsProviderCustomResourceProviderRoleFE0EE867": { "Type": "AWS::IAM::Role", "Properties": { @@ -3765,29 +4267,37 @@ "Type": "String", "Description": "Artifact hash for asset \"04fa2d485a51abd8261468eb6fa053d3a72242fc068fa75683232a52960b30cf\"" }, - "AssetParameters2944b93098bcfadbe7d696c05bc208c5956fbd7bf8bd0bc43a58410cdcee569aS3BucketFEA73057": { + "AssetParametersa298dd278c9ef814ebac4c9d8b2dc8e1b8374a14c5b7d0e79f041a296668f5dcS3BucketCA7ADF01": { "Type": "String", - "Description": "S3 bucket for asset \"2944b93098bcfadbe7d696c05bc208c5956fbd7bf8bd0bc43a58410cdcee569a\"" + "Description": "S3 bucket for asset \"a298dd278c9ef814ebac4c9d8b2dc8e1b8374a14c5b7d0e79f041a296668f5dc\"" }, - "AssetParameters2944b93098bcfadbe7d696c05bc208c5956fbd7bf8bd0bc43a58410cdcee569aS3VersionKey00C85273": { + "AssetParametersa298dd278c9ef814ebac4c9d8b2dc8e1b8374a14c5b7d0e79f041a296668f5dcS3VersionKey822F0346": { "Type": "String", - "Description": "S3 key for asset version \"2944b93098bcfadbe7d696c05bc208c5956fbd7bf8bd0bc43a58410cdcee569a\"" + "Description": "S3 key for asset version \"a298dd278c9ef814ebac4c9d8b2dc8e1b8374a14c5b7d0e79f041a296668f5dc\"" }, - "AssetParameters2944b93098bcfadbe7d696c05bc208c5956fbd7bf8bd0bc43a58410cdcee569aArtifactHashF20FF2C0": { + "AssetParametersa298dd278c9ef814ebac4c9d8b2dc8e1b8374a14c5b7d0e79f041a296668f5dcArtifactHashA688F4F0": { "Type": "String", - "Description": "Artifact hash for asset \"2944b93098bcfadbe7d696c05bc208c5956fbd7bf8bd0bc43a58410cdcee569a\"" + "Description": "Artifact hash for asset \"a298dd278c9ef814ebac4c9d8b2dc8e1b8374a14c5b7d0e79f041a296668f5dc\"" }, - "SsmParameterValueawsserviceeksoptimizedami116amazonlinux2recommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter": { + "SsmParameterValueawsserviceeksoptimizedami117amazonlinux2recommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter": { "Type": "AWS::SSM::Parameter::Value", - "Default": "/aws/service/eks/optimized-ami/1.16/amazon-linux-2/recommended/image_id" + "Default": "/aws/service/eks/optimized-ami/1.17/amazon-linux-2/recommended/image_id" + }, + "SsmParameterValueawsserviceeksoptimizedami117amazonlinux2arm64recommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter": { + "Type": "AWS::SSM::Parameter::Value", + "Default": "/aws/service/eks/optimized-ami/1.17/amazon-linux-2-arm64/recommended/image_id" }, "SsmParameterValueawsservicebottlerocketawsk8s115x8664latestimageidC96584B6F00A464EAD1953AFF4B05118Parameter": { "Type": "AWS::SSM::Parameter::Value", "Default": "/aws/service/bottlerocket/aws-k8s-1.15/x86_64/latest/image_id" }, - "SsmParameterValueawsserviceeksoptimizedami116amazonlinux2gpurecommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter": { + "SsmParameterValueawsserviceeksoptimizedami117amazonlinux2gpurecommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter": { + "Type": "AWS::SSM::Parameter::Value", + "Default": "/aws/service/eks/optimized-ami/1.17/amazon-linux-2-gpu/recommended/image_id" + }, + "SsmParameterValueawsserviceeksoptimizedami114amazonlinux2recommendedimageidC96584B6F00A464EAD1953AFF4B05118Parameter": { "Type": "AWS::SSM::Parameter::Value", - "Default": "/aws/service/eks/optimized-ami/1.16/amazon-linux-2-gpu/recommended/image_id" + "Default": "/aws/service/eks/optimized-ami/1.14/amazon-linux-2/recommended/image_id" } } } \ No newline at end of file diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts index f4405b8f6913a..7f399939344f9 100644 --- a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts @@ -2,16 +2,17 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import * as iam from '@aws-cdk/aws-iam'; import * as kms from '@aws-cdk/aws-kms'; -import { App, CfnOutput, Duration, Token } from '@aws-cdk/core'; +import { App, CfnOutput, Duration, Token, Fn } from '@aws-cdk/core'; import * as eks from '../lib'; import * as hello from './hello-k8s'; import { Pinger } from './pinger/pinger'; import { TestStack } from './util'; + class EksClusterStack extends TestStack { private cluster: eks.Cluster; - private vpc: ec2.Vpc; + private vpc: ec2.IVpc; constructor(scope: App, id: string) { super(scope, id); @@ -31,13 +32,15 @@ class EksClusterStack extends TestStack { vpc: this.vpc, mastersRole, defaultCapacity: 2, - version: eks.KubernetesVersion.V1_16, + version: eks.KubernetesVersion.V1_17, secretsEncryptionKey, }); this.assertFargateProfile(); - this.assertCapacity(); + this.assertCapacityX86(); + + this.assertCapacityArm(); this.assertBottlerocket(); @@ -45,7 +48,11 @@ class EksClusterStack extends TestStack { this.assertInferenceInstances(); - this.assertNodeGroup(); + this.assertNodeGroupX86(); + + this.assertNodeGroupArm(); + + this.assertNodeGroupCustomAmi(); this.assertSimpleManifest(); @@ -105,7 +112,7 @@ class EksClusterStack extends TestStack { // apply a kubernetes manifest this.cluster.addManifest('HelloApp', ...hello.resources); } - private assertNodeGroup() { + private assertNodeGroupX86() { // add a extra nodegroup this.cluster.addNodegroup('extra-ng', { instanceType: new ec2.InstanceType('t3.small'), @@ -114,6 +121,39 @@ class EksClusterStack extends TestStack { nodeRole: this.cluster.defaultCapacity ? this.cluster.defaultCapacity.role : undefined, }); } + private assertNodeGroupCustomAmi() { + // add a extra nodegroup + const userData = ec2.UserData.forLinux(); + userData.addCommands( + 'set -o xtrace', + `/etc/eks/bootstrap.sh ${this.cluster.clusterName}`, + ); + const lt = new ec2.CfnLaunchTemplate(this, 'LaunchTemplate', { + launchTemplateData: { + imageId: new eks.EksOptimizedImage().getImage(this).imageId, + instanceType: new ec2.InstanceType('t3.small').toString(), + userData: Fn.base64(userData.render()), + }, + }); + this.cluster.addNodegroup('extra-ng2', { + minSize: 1, + // reusing the default capacity nodegroup instance role when available + nodeRole: this.cluster.defaultNodegroup?.role || this.cluster.defaultCapacity?.role, + launchTemplate: { + id: lt.ref, + version: lt.attrDefaultVersionNumber, + }, + }); + } + private assertNodeGroupArm() { + // add a extra nodegroup + this.cluster.addNodegroup('extra-ng-arm', { + instanceType: new ec2.InstanceType('m6g.medium'), + minSize: 1, + // reusing the default capacity nodegroup instance role when available + nodeRole: this.cluster.defaultCapacity ? this.cluster.defaultCapacity.role : undefined, + }); + } private assertInferenceInstances() { // inference instances this.cluster.addCapacity('InferenceInstances', { @@ -142,14 +182,24 @@ class EksClusterStack extends TestStack { }); } - private assertCapacity() { - // add some capacity to the cluster. The IAM instance role will + private assertCapacityX86() { + // add some x86_64 capacity to the cluster. The IAM instance role will // automatically be mapped via aws-auth to allow nodes to join the cluster. this.cluster.addCapacity('Nodes', { instanceType: new ec2.InstanceType('t2.medium'), minCapacity: 3, }); } + + private assertCapacityArm() { + // add some arm64 capacity to the cluster. The IAM instance role will + // automatically be mapped via aws-auth to allow nodes to join the cluster. + this.cluster.addCapacity('NodesArm', { + instanceType: new ec2.InstanceType('m6g.medium'), + minCapacity: 1, + }); + } + private assertFargateProfile() { // fargate profile for resources in the "default" namespace this.cluster.addFargateProfile('default', { diff --git a/packages/@aws-cdk/aws-eks/test/pinger/pinger.ts b/packages/@aws-cdk/aws-eks/test/pinger/pinger.ts index c2931c1de3478..42ad368667a66 100644 --- a/packages/@aws-cdk/aws-eks/test/pinger/pinger.ts +++ b/packages/@aws-cdk/aws-eks/test/pinger/pinger.ts @@ -6,7 +6,7 @@ import * as cr from '@aws-cdk/custom-resources'; export interface PingerProps { readonly url: string; readonly securityGroup?: ec2.SecurityGroup; - readonly vpc?: ec2.Vpc; + readonly vpc?: ec2.IVpc; } export class Pinger extends Construct { @@ -40,4 +40,4 @@ export class Pinger extends Construct { return Token.asString(this._resource.getAtt('Value')); } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-eks/test/test.cluster.ts b/packages/@aws-cdk/aws-eks/test/test.cluster.ts index 7ecd86cb269d1..5d19d16203c00 100644 --- a/packages/@aws-cdk/aws-eks/test/test.cluster.ts +++ b/packages/@aws-cdk/aws-eks/test/test.cluster.ts @@ -1068,6 +1068,44 @@ export = { test.done(); }, + 'default cluster capacity with ARM64 instance type comes with nodegroup with correct AmiType'(test: Test) { + // GIVEN + const { stack } = testFixtureNoVpc(); + + // WHEN + new eks.Cluster(stack, 'cluster', { + defaultCapacity: 1, + version: CLUSTER_VERSION, + defaultCapacityInstance: new ec2.InstanceType('m6g.medium'), + }); + + // THEN + expect(stack).to(haveResourceLike('AWS::EKS::Nodegroup', { + AmiType: 'AL2_ARM_64', + })); + test.done(); + }, + + 'addNodegroup with ARM64 instance type comes with nodegroup with correct AmiType'(test: Test) { + // GIVEN + const { stack } = testFixtureNoVpc(); + + // WHEN + new eks.Cluster(stack, 'cluster', { + defaultCapacity: 0, + version: CLUSTER_VERSION, + defaultCapacityInstance: new ec2.InstanceType('m6g.medium'), + }).addNodegroup('ng', { + instanceType: new ec2.InstanceType('m6g.medium'), + }); + + // THEN + expect(stack).to(haveResourceLike('AWS::EKS::Nodegroup', { + AmiType: 'AL2_ARM_64', + })); + test.done(); + }, + 'EKS-Optimized AMI with GPU support when addCapacity'(test: Test) { // GIVEN const { app, stack } = testFixtureNoVpc(); @@ -1089,6 +1127,27 @@ export = { test.done(); }, + 'EKS-Optimized AMI with ARM64 when addCapacity'(test: Test) { + // GIVEN + const { app, stack } = testFixtureNoVpc(); + + // WHEN + new eks.Cluster(stack, 'cluster', { + defaultCapacity: 0, + version: CLUSTER_VERSION, + }).addCapacity('ARMCapacity', { + instanceType: new ec2.InstanceType('m6g.medium'), + }); + + // THEN + const assembly = app.synth(); + const parameters = assembly.getStackByName(stack.stackName).template.Parameters; + test.ok(Object.entries(parameters).some( + ([k, v]) => k.startsWith('SsmParameterValueawsserviceeksoptimizedami') && (v as any).Default.includes('/amazon-linux-2-arm64/'), + ), 'EKS AMI with GPU should be in ssm parameters'); + test.done(); + }, + 'when using custom resource a creation role & policy is defined'(test: Test) { // GIVEN const { stack } = testFixture(); @@ -1616,18 +1675,153 @@ export = { 'endpoint access': { - 'private endpoint access fails if selected subnets are empty'(test: Test) { + 'public restricted'(test: Test) { + + test.throws(() => { + eks.EndpointAccess.PUBLIC.onlyFrom('1.2.3.4/32'); + }, /Cannot restric public access to endpoint when private access is disabled. Use PUBLIC_AND_PRIVATE.onlyFrom\(\) instead./); + test.done(); + }, + + 'public non restricted without private subnets'(test: Test) { + const { stack } = testFixture(); + + new eks.Cluster(stack, 'Cluster', { + version: CLUSTER_VERSION, + endpointAccess: eks.EndpointAccess.PUBLIC, + vpcSubnets: [{ subnetType: ec2.SubnetType.PUBLIC }], + }); + + const nested = stack.node.tryFindChild('@aws-cdk/aws-eks.KubectlProvider') as cdk.NestedStack; + const template = expect(nested).value; + + // we don't attach vpc config in case endpoint is public only, regardless of whether + // the vpc has private subnets or not. + test.equal(template.Resources.Handler886CB40B.Properties.VpcConfig, undefined); + + test.done(); + }, + + 'public non restricted with private subnets'(test: Test) { + + const { stack } = testFixture(); + + new eks.Cluster(stack, 'Cluster', { + version: CLUSTER_VERSION, + endpointAccess: eks.EndpointAccess.PUBLIC, + }); + + const nested = stack.node.tryFindChild('@aws-cdk/aws-eks.KubectlProvider') as cdk.NestedStack; + const template = expect(nested).value; + + // we don't attach vpc config in case endpoint is public only, regardless of whether + // the vpc has private subnets or not. + test.equal(template.Resources.Handler886CB40B.Properties.VpcConfig, undefined); + + test.done(); + + }, + + 'private without private subnets'(test: Test) { const { stack } = testFixture(); test.throws(() => { new eks.Cluster(stack, 'Cluster', { - vpc: new ec2.Vpc(stack, 'Vpc'), version: CLUSTER_VERSION, endpointAccess: eks.EndpointAccess.PRIVATE, vpcSubnets: [{ subnetType: ec2.SubnetType.PUBLIC }], }); - }, /Vpc must contain private subnets to configure private endpoint access/); + }, /Vpc must contain private subnets when public endpoint access is disabled/); + + test.done(); + }, + + 'private with private subnets'(test: Test) { + + const { stack } = testFixture(); + + new eks.Cluster(stack, 'Cluster', { + version: CLUSTER_VERSION, + endpointAccess: eks.EndpointAccess.PRIVATE, + }); + + const nested = stack.node.tryFindChild('@aws-cdk/aws-eks.KubectlProvider') as cdk.NestedStack; + const template = expect(nested).value; + + // handler should have vpc config + test.ok(template.Resources.Handler886CB40B.Properties.VpcConfig.SubnetIds.length !== 0); + test.ok(template.Resources.Handler886CB40B.Properties.VpcConfig.SecurityGroupIds.length !== 0); + + test.done(); + + }, + + 'private and non restricted public without private subnets'(test: Test) { + const { stack } = testFixture(); + + new eks.Cluster(stack, 'Cluster', { + version: CLUSTER_VERSION, + endpointAccess: eks.EndpointAccess.PUBLIC_AND_PRIVATE, + vpcSubnets: [{ subnetType: ec2.SubnetType.PUBLIC }], + }); + + const nested = stack.node.tryFindChild('@aws-cdk/aws-eks.KubectlProvider') as cdk.NestedStack; + const template = expect(nested).value; + + // we don't have private subnets, but we don't need them since public access + // is not restricted. + test.equal(template.Resources.Handler886CB40B.Properties.VpcConfig, undefined); + + test.done(); + }, + + 'private and non restricted public with private subnets'(test: Test) { + const { stack } = testFixture(); + + new eks.Cluster(stack, 'Cluster', { + version: CLUSTER_VERSION, + endpointAccess: eks.EndpointAccess.PUBLIC_AND_PRIVATE, + }); + + const nested = stack.node.tryFindChild('@aws-cdk/aws-eks.KubectlProvider') as cdk.NestedStack; + const template = expect(nested).value; + + // we have private subnets so we should use them. + test.ok(template.Resources.Handler886CB40B.Properties.VpcConfig.SubnetIds.length !== 0); + test.ok(template.Resources.Handler886CB40B.Properties.VpcConfig.SecurityGroupIds.length !== 0); + + test.done(); + }, + + 'private and restricted public without private subnets'(test: Test) { + const { stack } = testFixture(); + + test.throws(() => { + new eks.Cluster(stack, 'Cluster', { + version: CLUSTER_VERSION, + endpointAccess: eks.EndpointAccess.PUBLIC_AND_PRIVATE.onlyFrom('1.2.3.4/32'), + vpcSubnets: [{ subnetType: ec2.SubnetType.PUBLIC }], + }); + }, /Vpc must contain private subnets when public endpoint access is restricted/); + + test.done(); + }, + + 'private and restricted public with private subnets'(test: Test) { + const { stack } = testFixture(); + + new eks.Cluster(stack, 'Cluster', { + version: CLUSTER_VERSION, + endpointAccess: eks.EndpointAccess.PUBLIC_AND_PRIVATE.onlyFrom('1.2.3.4/32'), + }); + + const nested = stack.node.tryFindChild('@aws-cdk/aws-eks.KubectlProvider') as cdk.NestedStack; + const template = expect(nested).value; + + // we have private subnets so we should use them. + test.ok(template.Resources.Handler886CB40B.Properties.VpcConfig.SubnetIds.length !== 0); + test.ok(template.Resources.Handler886CB40B.Properties.VpcConfig.SecurityGroupIds.length !== 0); test.done(); }, @@ -1729,15 +1923,6 @@ export = { test.done(); }, - 'can configure cidr blocks in public endpoint access'(test: Test) { - // GIVEN - const { stack } = testFixture(); - new eks.Cluster(stack, 'Cluster1', { version: CLUSTER_VERSION, endpointAccess: eks.EndpointAccess.PUBLIC.onlyFrom('1.2.3.4/5') }); - - test.deepEqual(expect(stack).value.Resources.Cluster1B02DD5A2.Properties.Config.resourcesVpcConfig.publicAccessCidrs, ['1.2.3.4/5']); - test.done(); - }, - 'kubectl provider chooses only private subnets'(test: Test) { const { stack } = testFixture(); diff --git a/packages/@aws-cdk/aws-eks/test/test.nodegroup.ts b/packages/@aws-cdk/aws-eks/test/test.nodegroup.ts index 8848220c6de08..35f1d7e6ce3fd 100644 --- a/packages/@aws-cdk/aws-eks/test/test.nodegroup.ts +++ b/packages/@aws-cdk/aws-eks/test/test.nodegroup.ts @@ -328,4 +328,119 @@ export = { test.throws(() => cluster.addNodegroup('ng', { desiredSize: 2, minSize: 3 }), /Minimum capacity 3 can't be greater than desired size 2/); test.done(); }, + 'create nodegroup correctly with launch template'(test: Test) { + // GIVEN + const { stack, vpc } = testFixture(); + + // WHEN + const cluster = new eks.Cluster(stack, 'Cluster', { + vpc, + kubectlEnabled: true, + defaultCapacity: 0, + version: CLUSTER_VERSION, + }); + const userData = ec2.UserData.forLinux(); + userData.addCommands( + 'set -o xtrace', + `/etc/eks/bootstrap.sh ${cluster.clusterName}`, + ); + const lt = new ec2.CfnLaunchTemplate(stack, 'LaunchTemplate', { + launchTemplateData: { + imageId: new eks.EksOptimizedImage().getImage(stack).imageId, + instanceType: new ec2.InstanceType('t3.small').toString(), + userData: cdk.Fn.base64(userData.render()), + }, + }); + cluster.addNodegroup('ng-lt', { + launchTemplate: { + id: lt.ref, + version: lt.attrDefaultVersionNumber, + }, + }); + + // THEN + expect(stack).to(haveResourceLike('AWS::EKS::Nodegroup', { + LaunchTemplate: { + Id: { + Ref: 'LaunchTemplate', + }, + Version: { + 'Fn::GetAtt': [ + 'LaunchTemplate', + 'DefaultVersionNumber', + ], + }, + }, + }, + )); + test.done(); + }, + 'throws when both diskSize and launch template specified'(test: Test) { + // GIVEN + const { stack, vpc } = testFixture(); + + // WHEN + const cluster = new eks.Cluster(stack, 'Cluster', { + vpc, + kubectlEnabled: true, + defaultCapacity: 0, + version: CLUSTER_VERSION, + }); + const userData = ec2.UserData.forLinux(); + userData.addCommands( + 'set -o xtrace', + `/etc/eks/bootstrap.sh ${cluster.clusterName}`, + ); + const lt = new ec2.CfnLaunchTemplate(stack, 'LaunchTemplate', { + launchTemplateData: { + imageId: new eks.EksOptimizedImage().getImage(stack).imageId, + instanceType: new ec2.InstanceType('t3.small').toString(), + userData: cdk.Fn.base64(userData.render()), + }, + }); + // THEN + test.throws(() => + cluster.addNodegroup('ng-lt', { + diskSize: 100, + launchTemplate: { + id: lt.ref, + version: lt.attrDefaultVersionNumber, + }, + }), /diskSize must be specified within the launch template/); + test.done(); + }, + 'throws when both instanceType and launch template specified'(test: Test) { + // GIVEN + const { stack, vpc } = testFixture(); + + // WHEN + const cluster = new eks.Cluster(stack, 'Cluster', { + vpc, + kubectlEnabled: true, + defaultCapacity: 0, + version: CLUSTER_VERSION, + }); + const userData = ec2.UserData.forLinux(); + userData.addCommands( + 'set -o xtrace', + `/etc/eks/bootstrap.sh ${cluster.clusterName}`, + ); + const lt = new ec2.CfnLaunchTemplate(stack, 'LaunchTemplate', { + launchTemplateData: { + imageId: new eks.EksOptimizedImage().getImage(stack).imageId, + instanceType: new ec2.InstanceType('t3.small').toString(), + userData: cdk.Fn.base64(userData.render()), + }, + }); + // THEN + test.throws(() => + cluster.addNodegroup('ng-lt', { + instanceType: new ec2.InstanceType('c5.large'), + launchTemplate: { + id: lt.ref, + version: lt.attrDefaultVersionNumber, + }, + }), /Instance types must be specified within the launch template/); + test.done(); + }, }; diff --git a/packages/@aws-cdk/aws-elasticloadbalancingv2/README.md b/packages/@aws-cdk/aws-elasticloadbalancingv2/README.md index 5aa4968ca33db..93cbb9948b449 100644 --- a/packages/@aws-cdk/aws-elasticloadbalancingv2/README.md +++ b/packages/@aws-cdk/aws-elasticloadbalancingv2/README.md @@ -1,4 +1,5 @@ ## Amazon Elastic Load Balancing V2 Construct Library + --- @@ -61,6 +62,21 @@ listener.addTargets('ApplicationFleet', { The security groups of the load balancer and the target are automatically updated to allow the network traffic. +One (or more) security groups can be associated with the load balancer; +if a security group isn't provided, one will be automatically created. + +```ts +const securityGroup1 = new ec2.SecurityGroup(stack, 'SecurityGroup1', { vpc }); +const lb = new elbv2.ApplicationLoadBalancer(this, 'LB', { + vpc, + internetFacing: true, + securityGroup: securityGroup1, // Optional - will be automatically created otherwise +}); + +const securityGroup2 = new ec2.SecurityGroup(stack, 'SecurityGroup2', { vpc }); +lb.addSecurityGroup(securityGroup2); +``` + #### Conditions It's possible to route traffic to targets based on conditions in the incoming @@ -320,6 +336,7 @@ public attachToApplicationTargetGroup(targetGroup: ApplicationTargetGroup): Load }; } ``` + `targetType` should be one of `Instance` or `Ip`. If the target can be directly added to the target group, `targetJson` should contain the `id` of the target (either instance ID or IP address depending on the type) and diff --git a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/application-listener-rule.ts b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/application-listener-rule.ts index d34846aa303b3..ca80758306777 100644 --- a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/application-listener-rule.ts +++ b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/application-listener-rule.ts @@ -298,7 +298,7 @@ export class ApplicationListenerRule extends cdk.Construct { // Instead, signal this through a warning. // @deprecate: upon the next major version bump, replace this with a `throw` if (this.action) { - this.node.addWarning('An Action already existed on this ListenerRule and was replaced. Configure exactly one default Action.'); + cdk.Annotations.of(this).addWarning('An Action already existed on this ListenerRule and was replaced. Configure exactly one default Action.'); } action.bind(this, this.listener, this); diff --git a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/application-load-balancer.ts b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/application-load-balancer.ts index 77d5f80033cac..a7678a51dfdf5 100644 --- a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/application-load-balancer.ts +++ b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/application-load-balancer.ts @@ -58,22 +58,21 @@ export class ApplicationLoadBalancer extends BaseLoadBalancer implements IApplic public readonly connections: ec2.Connections; public readonly ipAddressType?: IpAddressType; - private readonly securityGroup: ec2.ISecurityGroup; constructor(scope: Construct, id: string, props: ApplicationLoadBalancerProps) { super(scope, id, props, { type: 'application', - securityGroups: Lazy.listValue({ produce: () => [this.securityGroup.securityGroupId] }), + securityGroups: Lazy.listValue({ produce: () => this.connections.securityGroups.map(sg => sg.securityGroupId) }), ipAddressType: props.ipAddressType, }); this.ipAddressType = props.ipAddressType ?? IpAddressType.IPV4; - this.securityGroup = props.securityGroup || new ec2.SecurityGroup(this, 'SecurityGroup', { + const securityGroups = [props.securityGroup || new ec2.SecurityGroup(this, 'SecurityGroup', { vpc: props.vpc, description: `Automatically created Security Group for ELB ${this.node.uniqueId}`, allowAllOutbound: false, - }); - this.connections = new ec2.Connections({ securityGroups: [this.securityGroup] }); + })]; + this.connections = new ec2.Connections({ securityGroups }); if (props.http2Enabled === false) { this.setAttribute('routing.http2.enabled', 'false'); } if (props.idleTimeout !== undefined) { this.setAttribute('idle_timeout.timeout_seconds', props.idleTimeout.toSeconds().toString()); } @@ -107,6 +106,13 @@ export class ApplicationLoadBalancer extends BaseLoadBalancer implements IApplic }); } + /** + * Add a security group to this load balancer + */ + public addSecurityGroup(securityGroup: ec2.ISecurityGroup) { + this.connections.addSecurityGroup(securityGroup); + } + /** * Return the given named metric for this Application Load Balancer * diff --git a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/application-target-group.ts b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/application-target-group.ts index d5539789cc491..2e2cfec3089f6 100644 --- a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/application-target-group.ts +++ b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/application-target-group.ts @@ -1,6 +1,6 @@ import * as cloudwatch from '@aws-cdk/aws-cloudwatch'; import * as ec2 from '@aws-cdk/aws-ec2'; -import { Construct, Duration, IConstruct } from '@aws-cdk/core'; +import { Annotations, Construct, Duration, IConstruct } from '@aws-cdk/core'; import { BaseTargetGroupProps, ITargetGroup, loadBalancerNameFromListenerArn, LoadBalancerTargetProps, TargetGroupAttributes, TargetGroupBase, TargetGroupImportProps, @@ -374,11 +374,11 @@ export interface IApplicationTargetGroup extends ITargetGroup { class ImportedApplicationTargetGroup extends ImportedTargetGroupBase implements IApplicationTargetGroup { public registerListener(_listener: IApplicationListener, _associatingConstruct?: IConstruct) { // Nothing to do, we know nothing of our members - this.node.addWarning('Cannot register listener on imported target group -- security groups might need to be updated manually'); + Annotations.of(this).addWarning('Cannot register listener on imported target group -- security groups might need to be updated manually'); } public registerConnectable(_connectable: ec2.IConnectable, _portRange?: ec2.Port | undefined): void { - this.node.addWarning('Cannot register connectable on imported target group -- security groups might need to be updated manually'); + Annotations.of(this).addWarning('Cannot register connectable on imported target group -- security groups might need to be updated manually'); } public addTarget(...targets: IApplicationLoadBalancerTarget[]) { diff --git a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/conditions.ts b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/conditions.ts index ba5ebce6f70e4..b4bca4cf2b900 100644 --- a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/conditions.ts +++ b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/alb/conditions.ts @@ -141,6 +141,9 @@ class HttpRequestMethodListenerCondition extends ListenerCondition { class PathPatternListenerCondition extends ListenerCondition { constructor(public readonly values: string[]) { super(); + if (values && values.length > 5) { + throw new Error("A rule can only have '5' condition values"); + } } public renderRawCondition(): any { diff --git a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/nlb/network-target-group.ts b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/nlb/network-target-group.ts index 4700bf3a6a374..eff0a6efc0c97 100644 --- a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/nlb/network-target-group.ts +++ b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/nlb/network-target-group.ts @@ -126,6 +126,28 @@ export class NetworkTargetGroup extends TargetGroupBase implements INetworkTarge } } + if (healthCheck.healthyThresholdCount) { + const thresholdCount = healthCheck.healthyThresholdCount; + if (thresholdCount < 2 || thresholdCount > 10) { + ret.push(`Healthy Threshold Count '${thresholdCount}' not supported. Must be a number between 2 and 10.`); + } + } + + if (healthCheck.unhealthyThresholdCount) { + const thresholdCount = healthCheck.unhealthyThresholdCount; + if (thresholdCount < 2 || thresholdCount > 10) { + ret.push(`Unhealthy Threshold Count '${thresholdCount}' not supported. Must be a number between 2 and 10.`); + } + } + + if (healthCheck.healthyThresholdCount && healthCheck.unhealthyThresholdCount && + healthCheck.healthyThresholdCount !== healthCheck.unhealthyThresholdCount) { + ret.push([ + `Healthy and Unhealthy Threshold Counts must be the same: ${healthCheck.healthyThresholdCount}`, + `is not equal to ${healthCheck.unhealthyThresholdCount}.`, + ].join(' ')); + } + if (!healthCheck.protocol) { return ret; } diff --git a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/shared/base-listener.ts b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/shared/base-listener.ts index d9405bff1f729..8f92a086b1d6b 100644 --- a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/shared/base-listener.ts +++ b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/shared/base-listener.ts @@ -1,4 +1,4 @@ -import { Construct, Lazy, Resource } from '@aws-cdk/core'; +import { Annotations, Construct, Lazy, Resource } from '@aws-cdk/core'; import { CfnListener } from '../elasticloadbalancingv2.generated'; import { IListenerAction } from './listener-action'; @@ -50,7 +50,7 @@ export abstract class BaseListener extends Resource { // Instead, signal this through a warning. // @deprecate: upon the next major version bump, replace this with a `throw` if (this.defaultAction) { - this.node.addWarning('A default Action already existed on this Listener and was replaced. Configure exactly one default Action.'); + Annotations.of(this).addWarning('A default Action already existed on this Listener and was replaced. Configure exactly one default Action.'); } this.defaultAction = action; diff --git a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/shared/base-target-group.ts b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/shared/base-target-group.ts index 1d368d7e17b8f..bb7d211cac80d 100644 --- a/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/shared/base-target-group.ts +++ b/packages/@aws-cdk/aws-elasticloadbalancingv2/lib/shared/base-target-group.ts @@ -315,7 +315,7 @@ export abstract class TargetGroupBase extends cdk.Construct implements ITargetGr const ret = super.validate(); if (this.targetType === undefined && this.targetsJson.length === 0) { - this.node.addWarning("When creating an empty TargetGroup, you should specify a 'targetType' (this warning may become an error in the future)."); + cdk.Annotations.of(this).addWarning("When creating an empty TargetGroup, you should specify a 'targetType' (this warning may become an error in the future)."); } if (this.targetType !== TargetType.LAMBDA && this.vpc === undefined) { diff --git a/packages/@aws-cdk/aws-elasticloadbalancingv2/test/alb/conditions.test.ts b/packages/@aws-cdk/aws-elasticloadbalancingv2/test/alb/conditions.test.ts new file mode 100644 index 0000000000000..f900f04c19158 --- /dev/null +++ b/packages/@aws-cdk/aws-elasticloadbalancingv2/test/alb/conditions.test.ts @@ -0,0 +1,21 @@ +import '@aws-cdk/assert/jest'; +import * as elbv2 from '../../lib'; + +describe('tests', () => { + + test('pathPatterns length greater than 5 will throw exception', () => { + //GIVEN + const array = ['/u1', '/u2', '/u3', '/u4', '/u5']; + + //WHEN + elbv2.ListenerCondition.pathPatterns(array); // Does not throw + array.push('/u6'); + + // THEN + expect(() => { + elbv2.ListenerCondition.pathPatterns(array); + }).toThrow(/A rule can only have '5' condition values/); + + }); + +}); diff --git a/packages/@aws-cdk/aws-elasticloadbalancingv2/test/alb/load-balancer.test.ts b/packages/@aws-cdk/aws-elasticloadbalancingv2/test/alb/load-balancer.test.ts index 4ec206e8d9616..41063924e863c 100644 --- a/packages/@aws-cdk/aws-elasticloadbalancingv2/test/alb/load-balancer.test.ts +++ b/packages/@aws-cdk/aws-elasticloadbalancingv2/test/alb/load-balancer.test.ts @@ -314,4 +314,24 @@ describe('tests', () => { const listener = alb.addListener('Listener', { port: 80 }); expect(() => listener.addTargets('Targets', { port: 8080 })).not.toThrow(); }); + + test.only('can add secondary security groups', () => { + const stack = new cdk.Stack(); + const vpc = new ec2.Vpc(stack, 'Stack'); + + const alb = new elbv2.ApplicationLoadBalancer(stack, 'LB', { + vpc, + securityGroup: new ec2.SecurityGroup(stack, 'SecurityGroup1', { vpc }), + }); + alb.addSecurityGroup(new ec2.SecurityGroup(stack, 'SecurityGroup2', { vpc })); + + // THEN + expect(stack).toHaveResource('AWS::ElasticLoadBalancingV2::LoadBalancer', { + SecurityGroups: [ + { 'Fn::GetAtt': ['SecurityGroup1F554B36F', 'GroupId'] }, + { 'Fn::GetAtt': ['SecurityGroup23BE86BB7', 'GroupId'] }, + ], + Type: 'application', + }); + }); }); diff --git a/packages/@aws-cdk/aws-elasticloadbalancingv2/test/nlb/target-group.test.ts b/packages/@aws-cdk/aws-elasticloadbalancingv2/test/nlb/target-group.test.ts index 692d76e5f42ac..6f50cd19be618 100644 --- a/packages/@aws-cdk/aws-elasticloadbalancingv2/test/nlb/target-group.test.ts +++ b/packages/@aws-cdk/aws-elasticloadbalancingv2/test/nlb/target-group.test.ts @@ -91,4 +91,117 @@ describe('tests', () => { }); }).toThrow(); }); + + test('Throws error for invalid health check interval', () => { + const app = new cdk.App(); + const stack = new cdk.Stack(app, 'Stack'); + const vpc = new ec2.Vpc(stack, 'Vpc'); + + new elbv2.NetworkTargetGroup(stack, 'Group', { + vpc, + port: 80, + healthCheck: { + interval: cdk.Duration.seconds(5), + }, + }); + + expect(() => { + app.synth(); + }).toThrow(/Health check interval '5' not supported. Must be one of the following values '10,30'./); + }); + + test('Throws error for invalid health check protocol', () => { + const app = new cdk.App(); + const stack = new cdk.Stack(app, 'Stack'); + const vpc = new ec2.Vpc(stack, 'Vpc'); + + new elbv2.NetworkTargetGroup(stack, 'Group', { + vpc, + port: 80, + healthCheck: { + protocol: elbv2.Protocol.UDP, + }, + }); + + expect(() => { + app.synth(); + }).toThrow(/Health check protocol 'UDP' is not supported. Must be one of \[HTTP, HTTPS, TCP\]/); + }); + + test('Throws error for health check path property when protocol does not support it', () => { + const app = new cdk.App(); + const stack = new cdk.Stack(app, 'Stack'); + const vpc = new ec2.Vpc(stack, 'Vpc'); + + new elbv2.NetworkTargetGroup(stack, 'Group', { + vpc, + port: 80, + healthCheck: { + path: '/my-path', + protocol: elbv2.Protocol.TCP, + }, + }); + + expect(() => { + app.synth(); + }).toThrow(/'TCP' health checks do not support the path property. Must be one of \[HTTP, HTTPS\]/); + }); + + test('Throws error for invalid health check healthy threshold', () => { + const app = new cdk.App(); + const stack = new cdk.Stack(app, 'Stack'); + const vpc = new ec2.Vpc(stack, 'Vpc'); + + new elbv2.NetworkTargetGroup(stack, 'Group', { + vpc, + port: 80, + healthCheck: { + protocol: elbv2.Protocol.TCP, + healthyThresholdCount: 11, + }, + }); + + expect(() => { + app.synth(); + }).toThrow(/Healthy Threshold Count '11' not supported. Must be a number between 2 and 10./); + }); + + test('Throws error for invalid health check unhealthy threshold', () => { + const app = new cdk.App(); + const stack = new cdk.Stack(app, 'Stack'); + const vpc = new ec2.Vpc(stack, 'Vpc'); + + new elbv2.NetworkTargetGroup(stack, 'Group', { + vpc, + port: 80, + healthCheck: { + protocol: elbv2.Protocol.TCP, + unhealthyThresholdCount: 1, + }, + }); + + expect(() => { + app.synth(); + }).toThrow(/Unhealthy Threshold Count '1' not supported. Must be a number between 2 and 10./); + }); + + test('Throws error for unequal healthy and unhealthy threshold counts', () => { + const app = new cdk.App(); + const stack = new cdk.Stack(app, 'Stack'); + const vpc = new ec2.Vpc(stack, 'Vpc'); + + new elbv2.NetworkTargetGroup(stack, 'Group', { + vpc, + port: 80, + healthCheck: { + protocol: elbv2.Protocol.TCP, + healthyThresholdCount: 5, + unhealthyThresholdCount: 3, + }, + }); + + expect(() => { + app.synth(); + }).toThrow(/Healthy and Unhealthy Threshold Counts must be the same: 5 is not equal to 3./); + }); }); diff --git a/packages/@aws-cdk/aws-events-targets/lib/ecs-task.ts b/packages/@aws-cdk/aws-events-targets/lib/ecs-task.ts index 77fdae5d37208..3a31758c3a8c0 100644 --- a/packages/@aws-cdk/aws-events-targets/lib/ecs-task.ts +++ b/packages/@aws-cdk/aws-events-targets/lib/ecs-task.ts @@ -2,6 +2,7 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import * as ecs from '@aws-cdk/aws-ecs'; import * as events from '@aws-cdk/aws-events'; import * as iam from '@aws-cdk/aws-iam'; +import * as cdk from '@aws-cdk/core'; import { ContainerOverride } from './ecs-task-properties'; import { singletonEventRole } from './util'; @@ -115,7 +116,7 @@ export class EcsTask implements events.IRuleTarget { // Security groups are only configurable with the "awsvpc" network mode. if (this.taskDefinition.networkMode !== ecs.NetworkMode.AWS_VPC) { if (props.securityGroup !== undefined || props.securityGroups !== undefined) { - this.taskDefinition.node.addWarning('security groups are ignored when network mode is not awsvpc'); + cdk.Annotations.of(this.taskDefinition).addWarning('security groups are ignored when network mode is not awsvpc'); } return; } diff --git a/packages/@aws-cdk/aws-globalaccelerator/test/globalaccelerator-security-group.test.ts b/packages/@aws-cdk/aws-globalaccelerator/test/globalaccelerator-security-group.test.ts index 053e4da712caa..c7eb0d87f8fa8 100644 --- a/packages/@aws-cdk/aws-globalaccelerator/test/globalaccelerator-security-group.test.ts +++ b/packages/@aws-cdk/aws-globalaccelerator/test/globalaccelerator-security-group.test.ts @@ -58,6 +58,7 @@ test('custom resource exists', () => { InstallLatestAwsSdk: true, }, DependsOn: [ + 'GlobalAcceleratorSGCustomResourceCustomResourcePolicyF3294553', 'GroupC77FDACD', ], }, ResourcePart.CompleteDefinition)); diff --git a/packages/@aws-cdk/aws-iam/lib/unknown-principal.ts b/packages/@aws-cdk/aws-iam/lib/unknown-principal.ts index da30dbf08227e..1228ba26ff585 100644 --- a/packages/@aws-cdk/aws-iam/lib/unknown-principal.ts +++ b/packages/@aws-cdk/aws-iam/lib/unknown-principal.ts @@ -1,4 +1,4 @@ -import { ConcreteDependable, IConstruct, Stack } from '@aws-cdk/core'; +import { Annotations, ConcreteDependable, IConstruct, Stack } from '@aws-cdk/core'; import { PolicyStatement } from './policy-statement'; import { AddToPrincipalPolicyResult, IPrincipal, PrincipalPolicyFragment } from './principals'; @@ -40,7 +40,7 @@ export class UnknownPrincipal implements IPrincipal { public addToPrincipalPolicy(statement: PolicyStatement): AddToPrincipalPolicyResult { const stack = Stack.of(this.resource); const repr = JSON.stringify(stack.resolve(statement)); - this.resource.node.addWarning(`Add statement to this resource's role: ${repr}`); + Annotations.of(this.resource).addWarning(`Add statement to this resource's role: ${repr}`); // Pretend we did the work. The human will do it for us, eventually. return { statementAdded: true, policyDependable: new ConcreteDependable() }; } diff --git a/packages/@aws-cdk/aws-kms/lib/key.ts b/packages/@aws-cdk/aws-kms/lib/key.ts index 94c65b5c1ee59..cad47395d791b 100644 --- a/packages/@aws-cdk/aws-kms/lib/key.ts +++ b/packages/@aws-cdk/aws-kms/lib/key.ts @@ -1,5 +1,5 @@ import * as iam from '@aws-cdk/aws-iam'; -import { Construct, IResource, RemovalPolicy, Resource, Stack } from '@aws-cdk/core'; +import { Construct, IConstruct, IResource, RemovalPolicy, Resource, Stack } from '@aws-cdk/core'; import { Alias } from './alias'; import { CfnKey } from './kms.generated'; @@ -207,11 +207,18 @@ abstract class KeyBase extends Resource implements IKey { * undefined otherwise */ private granteeStackDependsOnKeyStack(grantee: iam.IGrantable): string | undefined { - if (!(Construct.isConstruct(grantee))) { + const grantPrincipal = grantee.grantPrincipal; + if (!(Construct.isConstruct(grantPrincipal))) { + return undefined; + } + // this logic should only apply to newly created + // (= not imported) resources + if (!this.principalIsANewlyCreatedResource(grantPrincipal)) { return undefined; } + // return undefined; const keyStack = Stack.of(this); - const granteeStack = Stack.of(grantee); + const granteeStack = Stack.of(grantPrincipal); if (keyStack === granteeStack) { return undefined; } @@ -220,6 +227,14 @@ abstract class KeyBase extends Resource implements IKey { : undefined; } + private principalIsANewlyCreatedResource(principal: IConstruct): boolean { + // yes, this sucks + // this is just a temporary stopgap to stem the bleeding while we work on a proper fix + return principal instanceof iam.Role || + principal instanceof iam.User || + principal instanceof iam.Group; + } + private isGranteeFromAnotherRegion(grantee: iam.IGrantable): boolean { if (!(Construct.isConstruct(grantee))) { return false; diff --git a/packages/@aws-cdk/aws-lambda-nodejs/README.md b/packages/@aws-cdk/aws-lambda-nodejs/README.md index 48cfc6407e7a4..25a27d85042f9 100644 --- a/packages/@aws-cdk/aws-lambda-nodejs/README.md +++ b/packages/@aws-cdk/aws-lambda-nodejs/README.md @@ -64,6 +64,24 @@ new lambda.NodejsFunction(this, 'my-handler', { }); ``` +### Project root +The `NodejsFunction` tries to automatically determine your project root, that is +the root of your node project. This is usually where the top level `node_modules` +folder of your project is located. When bundling in a Docker container, the +project root is used as the source (`/asset-input`) for the volume mounted in +the container. + +The following folders are considered by walking up parent folders starting from +the current working directory (order matters): +* the folder containing your `.git` folder +* the folder containing a `yarn.lock` file +* the folder containing a `package-lock.json` file +* the folder containing a `package.json` file + +Alternatively, you can specify the `projectRoot` prop manually. In this case you +need to ensure that this path includes `entry` and any module/dependencies used +by your function. Otherwise bundling will fail. + ### Configuring Parcel The `NodejsFunction` construct exposes some [Parcel](https://parceljs.org/) options via properties: `minify`, `sourceMaps` and `cacheDir`. diff --git a/packages/@aws-cdk/aws-lambda-nodejs/lib/bundling.ts b/packages/@aws-cdk/aws-lambda-nodejs/lib/bundling.ts index b05bfd254c3d1..d6a0368ad450b 100644 --- a/packages/@aws-cdk/aws-lambda-nodejs/lib/bundling.ts +++ b/packages/@aws-cdk/aws-lambda-nodejs/lib/bundling.ts @@ -115,7 +115,11 @@ export class Bundling { */ public static parcel(options: ParcelOptions): lambda.AssetCode { // Find project root - const projectRoot = options.projectRoot ?? findUp(`.git${path.sep}`); + const projectRoot = options.projectRoot + ?? findUp(`.git${path.sep}`) + ?? findUp(LockFile.YARN) + ?? findUp(LockFile.NPM) + ?? findUp('package.json'); if (!projectRoot) { throw new Error('Cannot find project root. Please specify it with `projectRoot`.'); } diff --git a/packages/@aws-cdk/aws-lambda-nodejs/parcel/Dockerfile b/packages/@aws-cdk/aws-lambda-nodejs/parcel/Dockerfile index f3c90a65be151..40081228fd554 100644 --- a/packages/@aws-cdk/aws-lambda-nodejs/parcel/Dockerfile +++ b/packages/@aws-cdk/aws-lambda-nodejs/parcel/Dockerfile @@ -9,7 +9,7 @@ RUN npm install --global yarn # Install parcel 2 (fix the version since it's still in beta) # install at "/" so that node_modules will be in the path for /asset-input ARG PARCEL_VERSION=2.0.0-beta.1 -RUN cd / && npm install parcel@$PARCEL_VERSION +RUN cd / && npm install parcel@$PARCEL_VERSION --no-package-lock # Ensure all users can write to npm cache RUN mkdir /tmp/npm-cache && \ diff --git a/packages/@aws-cdk/aws-lambda-nodejs/test/bundling.test.ts b/packages/@aws-cdk/aws-lambda-nodejs/test/bundling.test.ts index 1c2ff90357d95..1ff71b6422ece 100644 --- a/packages/@aws-cdk/aws-lambda-nodejs/test/bundling.test.ts +++ b/packages/@aws-cdk/aws-lambda-nodejs/test/bundling.test.ts @@ -303,3 +303,17 @@ test('LocalBundler.runsLocally with incorrect parcel version', () => { expect(LocalBundler.runsLocally).toBe(false); }); + +test('Project root detection', () => { + findUpMock.mockImplementation(() => undefined); + + expect(() => Bundling.parcel({ + entry: '/project/folder/entry.ts', + runtime: Runtime.NODEJS_12_X, + })).toThrow(/Cannot find project root/); + + expect(findUpMock).toHaveBeenNthCalledWith(1, `.git${path.sep}`); + expect(findUpMock).toHaveBeenNthCalledWith(2, LockFile.YARN); + expect(findUpMock).toHaveBeenNthCalledWith(3, LockFile.NPM); + expect(findUpMock).toHaveBeenNthCalledWith(4, 'package.json'); +}); diff --git a/packages/@aws-cdk/aws-lambda-nodejs/test/docker.test.ts b/packages/@aws-cdk/aws-lambda-nodejs/test/docker.test.ts index bce8c270e99d3..dfb8d07b0fb4a 100644 --- a/packages/@aws-cdk/aws-lambda-nodejs/test/docker.test.ts +++ b/packages/@aws-cdk/aws-lambda-nodejs/test/docker.test.ts @@ -14,6 +14,18 @@ test('parcel is available', async () => { expect(proc.status).toEqual(0); }); +test('parcel is installed without a package-lock.json file', async () => { + // We don't want a lock file at / to prevent Parcel from considering that /asset-input + // is part of a monorepo. + // See https://github.com/aws/aws-cdk/pull/10039#issuecomment-682738396 + const proc = spawnSync('docker', [ + 'run', 'parcel', + 'sh', '-c', + 'test ! -f /package-lock.json', + ]); + expect(proc.status).toEqual(0); +}); + test('can npm install with non root user', async () => { const proc = spawnSync('docker', [ 'run', '-u', '1000:1000', diff --git a/packages/@aws-cdk/aws-lambda/lib/function.ts b/packages/@aws-cdk/aws-lambda/lib/function.ts index 38de4583abeb0..7ab131b094c56 100644 --- a/packages/@aws-cdk/aws-lambda/lib/function.ts +++ b/packages/@aws-cdk/aws-lambda/lib/function.ts @@ -4,7 +4,7 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import * as iam from '@aws-cdk/aws-iam'; import * as logs from '@aws-cdk/aws-logs'; import * as sqs from '@aws-cdk/aws-sqs'; -import { CfnResource, Construct, Duration, Fn, Lazy, Stack } from '@aws-cdk/core'; +import { Annotations, CfnResource, Construct, Duration, Fn, Lazy, Stack } from '@aws-cdk/core'; import { Code, CodeConfig } from './code'; import { EventInvokeConfigOptions } from './event-invoke-config'; import { IEventSource } from './event-source'; @@ -775,7 +775,7 @@ export class Function extends FunctionBase { for (const [key, config] of envEntries) { if (config.removeInEdge) { delete this.environment[key]; - this.node.addInfo(`Removed ${key} environment variable for Lambda@Edge compatibility`); + Annotations.of(this).addInfo(`Removed ${key} environment variable for Lambda@Edge compatibility`); } } const envKeys = Object.keys(this.environment); diff --git a/packages/@aws-cdk/aws-rds/README.md b/packages/@aws-cdk/aws-rds/README.md index 1d0bfbe9b947a..1a5d4165d7409 100644 --- a/packages/@aws-cdk/aws-rds/README.md +++ b/packages/@aws-cdk/aws-rds/README.md @@ -26,7 +26,7 @@ your instances will be launched privately or publicly: ```ts const cluster = new rds.DatabaseCluster(this, 'Database', { - engine: rds.DatabaseClusterEngine.AURORA, + engine: rds.DatabaseClusterEngine.auroraMysql({ version: rds.AuroraMysqlEngineVersion.VER_2_08_1 }), masterUser: { username: 'clusteradmin' }, @@ -41,27 +41,30 @@ const cluster = new rds.DatabaseCluster(this, 'Database', { }); ``` -To use a specific version of the engine -(which is recommended, in order to avoid surprise updates when RDS add support for a newer version of the engine), -use the static factory methods on `DatabaseClusterEngine`: - -```typescript -new rds.DatabaseCluster(this, 'Database', { - engine: rds.DatabaseClusterEngine.aurora({ - version: rds.AuroraEngineVersion.VER_1_17_9, // different version class for each engine type - }), - ... -}); -``` - If there isn't a constant for the exact version you want to use, all of the `Version` classes have a static `of` method that can be used to create an arbitrary version. +```ts +const customEngineVersion = rds.AuroraMysqlEngineVersion.of('5.7.mysql_aurora.2.08.1'); +``` + By default, the master password will be generated and stored in AWS Secrets Manager with auto-generated description. Your cluster will be empty by default. To add a default database upon construction, specify the `defaultDatabaseName` attribute. +Use `DatabaseClusterFromSnapshot` to create a cluster from a snapshot: + +```ts +new rds.DatabaseClusterFromSnapshot(stack, 'Database', { + engine: rds.DatabaseClusterEngine.aurora({ version: rds.AuroraEngineVersion.VER_1_22_2 }), + instanceProps: { + vpc, + }, + snapshotIdentifier: 'mySnapshot', +}); +``` + ### Starting an instance database To set up a instance database, define a `DatabaseInstance`. You must @@ -70,9 +73,9 @@ your instances will be launched privately or publicly: ```ts const instance = new rds.DatabaseInstance(this, 'Instance', { - engine: rds.DatabaseInstanceEngine.ORACLE_SE1, + engine: rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }), // optional, defaults to m5.large - instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), + instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL), masterUsername: 'syscdk', vpc, vpcSubnets: { @@ -81,23 +84,14 @@ const instance = new rds.DatabaseInstance(this, 'Instance', { }); ``` -By default, the master password will be generated and stored in AWS Secrets Manager. - -To use a specific version of the engine -(which is recommended, in order to avoid surprise updates when RDS add support for a newer version of the engine), -use the static factory methods on `DatabaseInstanceEngine`: +If there isn't a constant for the exact engine version you want to use, +all of the `Version` classes have a static `of` method that can be used to create an arbitrary version. -```typescript -const instance = new rds.DatabaseInstance(this, 'Instance', { - engine: rds.DatabaseInstanceEngine.oracleSe2({ - version: rds.OracleEngineVersion.VER_19, // different version class for each engine type - }), - ... -}); +```ts +const customEngineVersion = rds.OracleEngineVersion.of('19.0.0.0.ru-2020-04.rur-2020-04.r1', '19'); ``` -If there isn't a constant for the exact version you want to use, -all of the `Version` classes have a static `of` method that can be used to create an arbitrary version. +By default, the master password will be generated and stored in AWS Secrets Manager. To use the storage auto scaling option of RDS you can specify the maximum allocated storage. This is the upper limit to which RDS can automatically scale the storage. More info can be found @@ -106,7 +100,7 @@ Example for max storage configuration: ```ts const instance = new rds.DatabaseInstance(this, 'Instance', { - engine: rds.DatabaseInstanceEngine.ORACLE_SE1, + engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }), // optional, defaults to m5.large instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), masterUsername: 'syscdk', @@ -121,7 +115,7 @@ a source database respectively: ```ts new rds.DatabaseInstanceFromSnapshot(stack, 'Instance', { snapshotIdentifier: 'my-snapshot', - engine: rds.DatabaseInstanceEngine.POSTGRES, + engine: rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_12_3 }), // optional, defaults to m5.large instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.LARGE), vpc, @@ -261,12 +255,15 @@ on configuring users for each available database engine. ### Metrics -Database instances expose metrics (`cloudwatch.Metric`): +Database instances and clusters both expose metrics (`cloudwatch.Metric`): ```ts // The number of database connections in use (average over 5 minutes) const dbConnections = instance.metricDatabaseConnections(); +// Average CPU utilization over 5 minutes +const cpuUtilization = cluster.metricCPUUtilization(); + // The average amount of time taken per disk I/O operation (average over 1 minute) const readLatency = instance.metric('ReadLatency', { statistic: 'Average', periodSec: 60 }); ``` @@ -366,8 +363,8 @@ that are available for a particular Amazon RDS DB instance. const vpc: ec2.IVpc = ...; const securityGroup: ec2.ISecurityGroup = ...; new rds.OptionGroup(stack, 'Options', { - engine: DatabaseInstanceEngine.oracleSe({ - version: OracleLegacyEngineVersion.VER_11_2, + engine: rds.DatabaseInstanceEngine.oracleSe2({ + version: rds.OracleEngineVersion.VER_19, }), configurations: [ { diff --git a/packages/@aws-cdk/aws-rds/lib/cluster-ref.ts b/packages/@aws-cdk/aws-rds/lib/cluster-ref.ts index 0db0389b9cb67..a464e2a0fd5c0 100644 --- a/packages/@aws-cdk/aws-rds/lib/cluster-ref.ts +++ b/packages/@aws-cdk/aws-rds/lib/cluster-ref.ts @@ -46,38 +46,50 @@ export interface IDatabaseCluster extends IResource, ec2.IConnectable, secretsma */ export interface DatabaseClusterAttributes { /** - * The database port + * Identifier for the cluster */ - readonly port: number; + readonly clusterIdentifier: string; /** - * The security groups of the database cluster + * The database port + * + * @default - none */ - readonly securityGroups: ec2.ISecurityGroup[]; + readonly port?: number; /** - * Identifier for the cluster + * The security groups of the database cluster + * + * @default - no security groups */ - readonly clusterIdentifier: string; + readonly securityGroups?: ec2.ISecurityGroup[]; /** * Identifier for the instances + * + * @default - no instance identifiers */ - readonly instanceIdentifiers: string[]; + readonly instanceIdentifiers?: string[]; // Actual underlying type: DBInstanceId[], but we have to type it more loosely for Java's benefit. /** * Cluster endpoint address + * + * @default - no endpoint address */ - readonly clusterEndpointAddress: string; + readonly clusterEndpointAddress?: string; /** * Reader endpoint address + * + * @default - no reader address */ - readonly readerEndpointAddress: string; + readonly readerEndpointAddress?: string; /** * Endpoint addresses of individual instances + * + * @default - no instance endpoints */ - readonly instanceEndpointAddresses: string[]; + readonly instanceEndpointAddresses?: string[]; } diff --git a/packages/@aws-cdk/aws-rds/lib/cluster.ts b/packages/@aws-cdk/aws-rds/lib/cluster.ts index 4ddba423ba8ea..9939a6de78a57 100644 --- a/packages/@aws-cdk/aws-rds/lib/cluster.ts +++ b/packages/@aws-cdk/aws-rds/lib/cluster.ts @@ -4,7 +4,7 @@ import * as kms from '@aws-cdk/aws-kms'; import * as logs from '@aws-cdk/aws-logs'; import * as s3 from '@aws-cdk/aws-s3'; import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; -import { CfnDeletionPolicy, Construct, Duration, RemovalPolicy, Resource, Token } from '@aws-cdk/core'; +import { Annotations, CfnDeletionPolicy, Construct, Duration, RemovalPolicy, Resource, Token } from '@aws-cdk/core'; import { IClusterEngine } from './cluster-engine'; import { DatabaseClusterAttributes, IDatabaseCluster } from './cluster-ref'; import { DatabaseSecret } from './database-secret'; @@ -12,12 +12,12 @@ import { Endpoint } from './endpoint'; import { IParameterGroup } from './parameter-group'; import { BackupProps, InstanceProps, Login, PerformanceInsightRetention, RotationMultiUserOptions } from './props'; import { DatabaseProxy, DatabaseProxyOptions, ProxyTarget } from './proxy'; -import { CfnDBCluster, CfnDBInstance, CfnDBSubnetGroup } from './rds.generated'; +import { CfnDBCluster, CfnDBClusterProps, CfnDBInstance, CfnDBSubnetGroup } from './rds.generated'; /** - * Properties for a new database cluster + * Common properties for a new database cluster or cluster from snapshot. */ -export interface DatabaseClusterProps { +interface DatabaseClusterBaseProps { /** * What kind of database to start */ @@ -37,11 +37,6 @@ export interface DatabaseClusterProps { */ readonly instanceProps: InstanceProps; - /** - * Username and password for the administrative user - */ - readonly masterUser: Login; - /** * Backup settings * @@ -90,21 +85,6 @@ export interface DatabaseClusterProps { */ readonly deletionProtection?: boolean; - /** - * Whether to enable storage encryption. - * - * @default - true if storageEncryptionKey is provided, false otherwise - */ - readonly storageEncrypted?: boolean - - /** - * The KMS key for storage encryption. - * If specified, {@link storageEncrypted} will be set to `true`. - * - * @default - if storageEncrypted is true then the default master key, no key otherwise - */ - readonly storageEncryptionKey?: kms.IKey; - /** * A preferred maintenance window day/time range. Should be specified as a range ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). * @@ -237,7 +217,7 @@ export interface DatabaseClusterProps { /** * A new or imported clustered database. */ -abstract class DatabaseClusterBase extends Resource implements IDatabaseCluster { +export abstract class DatabaseClusterBase extends Resource implements IDatabaseCluster { /** * Identifier of the cluster */ @@ -289,120 +269,42 @@ abstract class DatabaseClusterBase extends Resource implements IDatabaseCluster } /** - * Create a clustered database with a given number of instances. - * - * @resource AWS::RDS::DBCluster + * Abstract base for ``DatabaseCluster`` and ``DatabaseClusterFromSnapshot`` */ -export class DatabaseCluster extends DatabaseClusterBase { - /** - * Import an existing DatabaseCluster from properties - */ - public static fromDatabaseClusterAttributes(scope: Construct, id: string, attrs: DatabaseClusterAttributes): IDatabaseCluster { - class Import extends DatabaseClusterBase implements IDatabaseCluster { - public readonly defaultPort = ec2.Port.tcp(attrs.port); - public readonly connections = new ec2.Connections({ - securityGroups: attrs.securityGroups, - defaultPort: this.defaultPort, - }); - public readonly clusterIdentifier = attrs.clusterIdentifier; - public readonly instanceIdentifiers: string[] = []; - public readonly clusterEndpoint = new Endpoint(attrs.clusterEndpointAddress, attrs.port); - public readonly clusterReadEndpoint = new Endpoint(attrs.readerEndpointAddress, attrs.port); - public readonly instanceEndpoints = attrs.instanceEndpointAddresses.map(a => new Endpoint(a, attrs.port)); - } - - return new Import(scope, id); - } - - /** - * Identifier of the cluster - */ - public readonly clusterIdentifier: string; +abstract class DatabaseClusterNew extends DatabaseClusterBase { - /** - * Identifiers of the replicas - */ public readonly instanceIdentifiers: string[] = []; - - /** - * The endpoint to use for read/write operations - */ - public readonly clusterEndpoint: Endpoint; - - /** - * Endpoint to use for load-balanced read-only operations. - */ - public readonly clusterReadEndpoint: Endpoint; - - /** - * Endpoints which address each individual replica. - */ public readonly instanceEndpoints: Endpoint[] = []; - /** - * Access to the network connections - */ - public readonly connections: ec2.Connections; + protected readonly newCfnProps: CfnDBClusterProps; + protected readonly subnetGroup: CfnDBSubnetGroup; + protected readonly securityGroups: ec2.ISecurityGroup[]; - /** - * The secret attached to this cluster - */ - public readonly secret?: secretsmanager.ISecret; - - private readonly singleUserRotationApplication: secretsmanager.SecretRotationApplication; - private readonly multiUserRotationApplication: secretsmanager.SecretRotationApplication; - - /** - * The VPC where the DB subnet group is created. - */ - private readonly vpc: ec2.IVpc; - - /** - * The subnets used by the DB subnet group. - * - * @default - the Vpc default strategy if not specified. - */ - private readonly vpcSubnets?: ec2.SubnetSelection; - - constructor(scope: Construct, id: string, props: DatabaseClusterProps) { + constructor(scope: Construct, id: string, props: DatabaseClusterBaseProps) { super(scope, id); - this.vpc = props.instanceProps.vpc; - this.vpcSubnets = props.instanceProps.vpcSubnets; - const { subnetIds } = props.instanceProps.vpc.selectSubnets(props.instanceProps.vpcSubnets); // Cannot test whether the subnets are in different AZs, but at least we can test the amount. if (subnetIds.length < 2) { - this.node.addError(`Cluster requires at least 2 subnets, got ${subnetIds.length}`); + Annotations.of(this).addError(`Cluster requires at least 2 subnets, got ${subnetIds.length}`); } - const subnetGroup = new CfnDBSubnetGroup(this, 'Subnets', { + this.subnetGroup = new CfnDBSubnetGroup(this, 'Subnets', { dbSubnetGroupDescription: `Subnets for ${id} database`, subnetIds, }); if (props.removalPolicy === RemovalPolicy.RETAIN) { - subnetGroup.applyRemovalPolicy(RemovalPolicy.RETAIN); + this.subnetGroup.applyRemovalPolicy(RemovalPolicy.RETAIN); } - const securityGroups = props.instanceProps.securityGroups ?? [ + this.securityGroups = props.instanceProps.securityGroups ?? [ new ec2.SecurityGroup(this, 'SecurityGroup', { description: 'RDS security group', vpc: props.instanceProps.vpc, }), ]; - let secret: DatabaseSecret | undefined; - if (!props.masterUser.password) { - secret = new DatabaseSecret(this, 'Secret', { - username: props.masterUser.username, - encryptionKey: props.masterUser.encryptionKey, - }); - } - - this.singleUserRotationApplication = props.engine.singleUserRotationApplication; - this.multiUserRotationApplication = props.engine.multiUserRotationApplication; - const clusterAssociatedRoles: CfnDBCluster.DBClusterRoleProperty[] = []; let { s3ImportRole, s3ExportRole } = this.setupS3ImportExport(props); if (s3ImportRole) { @@ -421,44 +323,216 @@ export class DatabaseCluster extends DatabaseClusterBase { const clusterParameterGroup = props.parameterGroup ?? clusterEngineBindConfig.parameterGroup; const clusterParameterGroupConfig = clusterParameterGroup?.bindToCluster({}); - const cluster = new CfnDBCluster(this, 'Resource', { + this.newCfnProps = { // Basic engine: props.engine.engineType, engineVersion: props.engine.engineVersion?.fullVersion, dbClusterIdentifier: props.clusterIdentifier, - dbSubnetGroupName: subnetGroup.ref, - vpcSecurityGroupIds: securityGroups.map(sg => sg.securityGroupId), + dbSubnetGroupName: this.subnetGroup.ref, + vpcSecurityGroupIds: this.securityGroups.map(sg => sg.securityGroupId), port: props.port ?? clusterEngineBindConfig.port, dbClusterParameterGroupName: clusterParameterGroupConfig?.parameterGroupName, associatedRoles: clusterAssociatedRoles.length > 0 ? clusterAssociatedRoles : undefined, deletionProtection: props.deletionProtection, // Admin - masterUsername: secret ? secret.secretValueFromJson('username').toString() : props.masterUser.username, - masterUserPassword: secret - ? secret.secretValueFromJson('password').toString() - : (props.masterUser.password - ? props.masterUser.password.toString() - : undefined), backupRetentionPeriod: props.backup?.retention?.toDays(), preferredBackupWindow: props.backup?.preferredWindow, preferredMaintenanceWindow: props.preferredMaintenanceWindow, databaseName: props.defaultDatabaseName, enableCloudwatchLogsExports: props.cloudwatchLogsExports, - // Encryption - kmsKeyId: props.storageEncryptionKey?.keyArn, - storageEncrypted: props.storageEncryptionKey ? true : props.storageEncrypted, - }); + }; + } + protected setRemovalPolicy(cluster: CfnDBCluster, removalPolicy?: RemovalPolicy) { // if removalPolicy was not specified, // leave it as the default, which is Snapshot - if (props.removalPolicy) { - cluster.applyRemovalPolicy(props.removalPolicy); + if (removalPolicy) { + cluster.applyRemovalPolicy(removalPolicy); } else { // The CFN default makes sense for DeletionPolicy, // but doesn't cover UpdateReplacePolicy. // Fix that here. cluster.cfnOptions.updateReplacePolicy = CfnDeletionPolicy.SNAPSHOT; } + } + + private setupS3ImportExport(props: DatabaseClusterBaseProps): { s3ImportRole?: IRole, s3ExportRole?: IRole } { + let s3ImportRole = props.s3ImportRole; + if (props.s3ImportBuckets && props.s3ImportBuckets.length > 0) { + if (props.s3ImportRole) { + throw new Error('Only one of s3ImportRole or s3ImportBuckets must be specified, not both.'); + } + + s3ImportRole = new Role(this, 'S3ImportRole', { + assumedBy: new ServicePrincipal('rds.amazonaws.com'), + }); + for (const bucket of props.s3ImportBuckets) { + bucket.grantRead(s3ImportRole); + } + } + + let s3ExportRole = props.s3ExportRole; + if (props.s3ExportBuckets && props.s3ExportBuckets.length > 0) { + if (props.s3ExportRole) { + throw new Error('Only one of s3ExportRole or s3ExportBuckets must be specified, not both.'); + } + + s3ExportRole = new Role(this, 'S3ExportRole', { + assumedBy: new ServicePrincipal('rds.amazonaws.com'), + }); + for (const bucket of props.s3ExportBuckets) { + bucket.grantReadWrite(s3ExportRole); + } + } + + return { s3ImportRole, s3ExportRole }; + } +} + +/** + * Represents an imported database cluster. + */ +class ImportedDatabaseCluster extends DatabaseClusterBase implements IDatabaseCluster { + public readonly clusterIdentifier: string; + public readonly connections: ec2.Connections; + + private readonly _clusterEndpoint?: Endpoint; + private readonly _clusterReadEndpoint?: Endpoint; + private readonly _instanceIdentifiers?: string[]; + private readonly _instanceEndpoints?: Endpoint[]; + + constructor(scope: Construct, id: string, attrs: DatabaseClusterAttributes) { + super(scope, id); + + this.clusterIdentifier = attrs.clusterIdentifier; + + const defaultPort = attrs.port ? ec2.Port.tcp(attrs.port) : undefined; + this.connections = new ec2.Connections({ + securityGroups: attrs.securityGroups, + defaultPort, + }); + + this._clusterEndpoint = (attrs.clusterEndpointAddress && attrs.port) ? new Endpoint(attrs.clusterEndpointAddress, attrs.port) : undefined; + this._clusterReadEndpoint = (attrs.readerEndpointAddress && attrs.port) ? new Endpoint(attrs.readerEndpointAddress, attrs.port) : undefined; + this._instanceIdentifiers = attrs.instanceIdentifiers; + this._instanceEndpoints = (attrs.instanceEndpointAddresses && attrs.port) + ? attrs.instanceEndpointAddresses.map(addr => new Endpoint(addr, attrs.port!)) + : undefined; + } + + public get clusterEndpoint() { + if (!this._clusterEndpoint) { + throw new Error('Cannot access `clusterEndpoint` of an imported cluster without an endpoint address and port'); + } + return this._clusterEndpoint; + } + + public get clusterReadEndpoint() { + if (!this._clusterReadEndpoint) { + throw new Error('Cannot access `clusterReadEndpoint` of an imported cluster without a readerEndpointAddress and port'); + } + return this._clusterReadEndpoint; + } + + public get instanceIdentifiers() { + if (!this._instanceIdentifiers) { + throw new Error('Cannot access `instanceIdentifiers` of an imported cluster without provided instanceIdentifiers'); + } + return this._instanceIdentifiers; + } + + public get instanceEndpoints() { + if (!this._instanceEndpoints) { + throw new Error('Cannot access `instanceEndpoints` of an imported cluster without instanceEndpointAddresses and port'); + } + return this._instanceEndpoints; + } +} + +/** + * Properties for a new database cluster + */ +export interface DatabaseClusterProps extends DatabaseClusterBaseProps { + /** + * Username and password for the administrative user + */ + readonly masterUser: Login; + + /** + * Whether to enable storage encryption. + * + * @default - true if storageEncryptionKey is provided, false otherwise + */ + readonly storageEncrypted?: boolean + + /** + * The KMS key for storage encryption. + * If specified, {@link storageEncrypted} will be set to `true`. + * + * @default - if storageEncrypted is true then the default master key, no key otherwise + */ + readonly storageEncryptionKey?: kms.IKey; +} + +/** + * Create a clustered database with a given number of instances. + * + * @resource AWS::RDS::DBCluster + */ +export class DatabaseCluster extends DatabaseClusterNew { + /** + * Import an existing DatabaseCluster from properties + */ + public static fromDatabaseClusterAttributes(scope: Construct, id: string, attrs: DatabaseClusterAttributes): IDatabaseCluster { + return new ImportedDatabaseCluster(scope, id, attrs); + } + + public readonly clusterIdentifier: string; + public readonly clusterEndpoint: Endpoint; + public readonly clusterReadEndpoint: Endpoint; + public readonly connections: ec2.Connections; + + /** + * The secret attached to this cluster + */ + public readonly secret?: secretsmanager.ISecret; + + private readonly vpc: ec2.IVpc; + private readonly vpcSubnets?: ec2.SubnetSelection; + + private readonly singleUserRotationApplication: secretsmanager.SecretRotationApplication; + private readonly multiUserRotationApplication: secretsmanager.SecretRotationApplication; + + constructor(scope: Construct, id: string, props: DatabaseClusterProps) { + super(scope, id, props); + + this.vpc = props.instanceProps.vpc; + this.vpcSubnets = props.instanceProps.vpcSubnets; + + this.singleUserRotationApplication = props.engine.singleUserRotationApplication; + this.multiUserRotationApplication = props.engine.multiUserRotationApplication; + + let secret: DatabaseSecret | undefined; + if (!props.masterUser.password) { + secret = new DatabaseSecret(this, 'Secret', { + username: props.masterUser.username, + encryptionKey: props.masterUser.encryptionKey, + }); + } + + const cluster = new CfnDBCluster(this, 'Resource', { + ...this.newCfnProps, + // Admin + masterUsername: secret ? secret.secretValueFromJson('username').toString() : props.masterUser.username, + masterUserPassword: secret + ? secret.secretValueFromJson('password').toString() + : (props.masterUser.password + ? props.masterUser.password.toString() + : undefined), + // Encryption + kmsKeyId: props.storageEncryptionKey?.keyArn, + storageEncrypted: props.storageEncryptionKey ? true : props.storageEncrypted, + }); this.clusterIdentifier = cluster.ref; @@ -466,20 +540,21 @@ export class DatabaseCluster extends DatabaseClusterBase { const portAttribute = Token.asNumber(cluster.attrEndpointPort); this.clusterEndpoint = new Endpoint(cluster.attrEndpointAddress, portAttribute); this.clusterReadEndpoint = new Endpoint(cluster.attrReadEndpointAddress, portAttribute); + this.connections = new ec2.Connections({ + securityGroups: this.securityGroups, + defaultPort: ec2.Port.tcp(this.clusterEndpoint.port), + }); - this.setLogRetention(props); + this.setRemovalPolicy(cluster, props.removalPolicy); if (secret) { this.secret = secret.attach(this); } - this.createInstances(props, cluster, subnetGroup, portAttribute); - - const defaultPort = ec2.Port.tcp(this.clusterEndpoint.port); - this.connections = new ec2.Connections({ securityGroups, defaultPort }); + setLogRetention(this, props); + createInstances(this, props, this.subnetGroup); } - /** * Adds the single user rotation of the master password to this cluster. * @@ -524,131 +599,169 @@ export class DatabaseCluster extends DatabaseClusterBase { target: this, }); } +} - private setupS3ImportExport(props: DatabaseClusterProps): { s3ImportRole?: IRole, s3ExportRole?: IRole } { - let s3ImportRole = props.s3ImportRole; - if (props.s3ImportBuckets && props.s3ImportBuckets.length > 0) { - if (props.s3ImportRole) { - throw new Error('Only one of s3ImportRole or s3ImportBuckets must be specified, not both.'); - } +/** + * Properties for ``DatabaseClusterFromSnapshot`` + */ +export interface DatabaseClusterFromSnapshotProps extends DatabaseClusterBaseProps { + /** + * The identifier for the DB instance snapshot or DB cluster snapshot to restore from. + * You can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. + * However, you can use only the ARN to specify a DB instance snapshot. + */ + readonly snapshotIdentifier: string; +} - s3ImportRole = new Role(this, 'S3ImportRole', { - assumedBy: new ServicePrincipal('rds.amazonaws.com'), - }); - for (const bucket of props.s3ImportBuckets) { - bucket.grantRead(s3ImportRole); - } - } +/** + * A database cluster restored from a snapshot. + * + * @resource AWS::RDS::DBInstance + */ +export class DatabaseClusterFromSnapshot extends DatabaseClusterNew { + public readonly clusterIdentifier: string; + public readonly clusterEndpoint: Endpoint; + public readonly clusterReadEndpoint: Endpoint; + public readonly connections: ec2.Connections; - let s3ExportRole = props.s3ExportRole; - if (props.s3ExportBuckets && props.s3ExportBuckets.length > 0) { - if (props.s3ExportRole) { - throw new Error('Only one of s3ExportRole or s3ExportBuckets must be specified, not both.'); - } + constructor(scope: Construct, id: string, props: DatabaseClusterFromSnapshotProps) { + super(scope, id, props); - s3ExportRole = new Role(this, 'S3ExportRole', { - assumedBy: new ServicePrincipal('rds.amazonaws.com'), - }); - for (const bucket of props.s3ExportBuckets) { - bucket.grantReadWrite(s3ExportRole); - } - } + const cluster = new CfnDBCluster(this, 'Resource', { + ...this.newCfnProps, + snapshotIdentifier: props.snapshotIdentifier, + }); - return { s3ImportRole, s3ExportRole }; - } + this.clusterIdentifier = cluster.ref; - private createInstances(props: DatabaseClusterProps, cluster: CfnDBCluster, subnetGroup: CfnDBSubnetGroup, portAttribute: number) { - const instanceCount = props.instances != null ? props.instances : 2; - if (instanceCount < 1) { - throw new Error('At least one instance is required'); - } + // create a number token that represents the port of the cluster + const portAttribute = Token.asNumber(cluster.attrEndpointPort); + this.clusterEndpoint = new Endpoint(cluster.attrEndpointAddress, portAttribute); + this.clusterReadEndpoint = new Endpoint(cluster.attrReadEndpointAddress, portAttribute); + this.connections = new ec2.Connections({ + securityGroups: this.securityGroups, + defaultPort: ec2.Port.tcp(this.clusterEndpoint.port), + }); - const instanceProps = props.instanceProps; - // Get the actual subnet objects so we can depend on internet connectivity. - const internetConnected = instanceProps.vpc.selectSubnets(instanceProps.vpcSubnets).internetConnectivityEstablished; - - let monitoringRole; - if (props.monitoringInterval && props.monitoringInterval.toSeconds()) { - monitoringRole = props.monitoringRole || new Role(this, 'MonitoringRole', { - assumedBy: new ServicePrincipal('monitoring.rds.amazonaws.com'), - managedPolicies: [ - ManagedPolicy.fromAwsManagedPolicyName('service-role/AmazonRDSEnhancedMonitoringRole'), - ], - }); + this.setRemovalPolicy(cluster, props.removalPolicy); + + setLogRetention(this, props); + createInstances(this, props, this.subnetGroup); + } +} + +/** + * Sets up CloudWatch log retention if configured. + * A function rather than protected member to prevent exposing ``DatabaseClusterBaseProps``. + */ +function setLogRetention(cluster: DatabaseClusterNew, props: DatabaseClusterBaseProps) { + if (props.cloudwatchLogsExports) { + const unsupportedLogTypes = props.cloudwatchLogsExports.filter(logType => !props.engine.supportedLogTypes.includes(logType)); + if (unsupportedLogTypes.length > 0) { + throw new Error(`Unsupported logs for the current engine type: ${unsupportedLogTypes.join(',')}`); } - const enablePerformanceInsights = instanceProps.enablePerformanceInsights - || instanceProps.performanceInsightRetention !== undefined || instanceProps.performanceInsightEncryptionKey !== undefined; - if (enablePerformanceInsights && instanceProps.enablePerformanceInsights === false) { - throw new Error('`enablePerformanceInsights` disabled, but `performanceInsightRetention` or `performanceInsightEncryptionKey` was set'); + if (props.cloudwatchLogsRetention) { + for (const log of props.cloudwatchLogsExports) { + new logs.LogRetention(cluster, `LogRetention${log}`, { + logGroupName: `/aws/rds/cluster/${cluster.clusterIdentifier}/${log}`, + retention: props.cloudwatchLogsRetention, + role: props.cloudwatchLogsRetentionRole, + }); + } } + } +} - const instanceType = instanceProps.instanceType ?? ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.MEDIUM); - const instanceParameterGroupConfig = instanceProps.parameterGroup?.bindToInstance({}); - for (let i = 0; i < instanceCount; i++) { - const instanceIndex = i + 1; - const instanceIdentifier = props.instanceIdentifierBase != null ? `${props.instanceIdentifierBase}${instanceIndex}` : - props.clusterIdentifier != null ? `${props.clusterIdentifier}instance${instanceIndex}` : - undefined; - - const publiclyAccessible = instanceProps.vpcSubnets && instanceProps.vpcSubnets.subnetType === ec2.SubnetType.PUBLIC; - - const instance = new CfnDBInstance(this, `Instance${instanceIndex}`, { - // Link to cluster - engine: props.engine.engineType, - engineVersion: props.engine.engineVersion?.fullVersion, - dbClusterIdentifier: cluster.ref, - dbInstanceIdentifier: instanceIdentifier, - // Instance properties - dbInstanceClass: databaseInstanceType(instanceType), - publiclyAccessible, - enablePerformanceInsights: enablePerformanceInsights || instanceProps.enablePerformanceInsights, // fall back to undefined if not set - performanceInsightsKmsKeyId: instanceProps.performanceInsightEncryptionKey?.keyArn, - performanceInsightsRetentionPeriod: enablePerformanceInsights - ? (instanceProps.performanceInsightRetention || PerformanceInsightRetention.DEFAULT) - : undefined, - // This is already set on the Cluster. Unclear to me whether it should be repeated or not. Better yes. - dbSubnetGroupName: subnetGroup.ref, - dbParameterGroupName: instanceParameterGroupConfig?.parameterGroupName, - monitoringInterval: props.monitoringInterval && props.monitoringInterval.toSeconds(), - monitoringRoleArn: monitoringRole && monitoringRole.roleArn, - }); +/** Output from the createInstances method; used to set instance identifiers and endpoints */ +interface InstanceConfig { + readonly instanceIdentifiers: string[]; + readonly instanceEndpoints: Endpoint[]; +} - // If removalPolicy isn't explicitly set, - // it's Snapshot for Cluster. - // Because of that, in this case, - // we can safely use the CFN default of Delete for DbInstances with dbClusterIdentifier set. - if (props.removalPolicy) { - instance.applyRemovalPolicy(props.removalPolicy); - } +/** + * Creates the instances for the cluster. + * A function rather than a protected method on ``DatabaseClusterNew`` to avoid exposing + * ``DatabaseClusterNew`` and ``DatabaseClusterBaseProps`` in the API. + */ +function createInstances(cluster: DatabaseClusterNew, props: DatabaseClusterBaseProps, subnetGroup: CfnDBSubnetGroup): InstanceConfig { + const instanceCount = props.instances != null ? props.instances : 2; + if (instanceCount < 1) { + throw new Error('At least one instance is required'); + } - // We must have a dependency on the NAT gateway provider here to create - // things in the right order. - instance.node.addDependency(internetConnected); + const instanceIdentifiers: string[] = []; + const instanceEndpoints: Endpoint[] = []; + const portAttribute = cluster.clusterEndpoint.port; + const instanceProps = props.instanceProps; + + // Get the actual subnet objects so we can depend on internet connectivity. + const internetConnected = instanceProps.vpc.selectSubnets(instanceProps.vpcSubnets).internetConnectivityEstablished; + + let monitoringRole; + if (props.monitoringInterval && props.monitoringInterval.toSeconds()) { + monitoringRole = props.monitoringRole || new Role(cluster, 'MonitoringRole', { + assumedBy: new ServicePrincipal('monitoring.rds.amazonaws.com'), + managedPolicies: [ + ManagedPolicy.fromAwsManagedPolicyName('service-role/AmazonRDSEnhancedMonitoringRole'), + ], + }); + } - this.instanceIdentifiers.push(instance.ref); - this.instanceEndpoints.push(new Endpoint(instance.attrEndpointAddress, portAttribute)); - } + const enablePerformanceInsights = instanceProps.enablePerformanceInsights + || instanceProps.performanceInsightRetention !== undefined || instanceProps.performanceInsightEncryptionKey !== undefined; + if (enablePerformanceInsights && instanceProps.enablePerformanceInsights === false) { + throw new Error('`enablePerformanceInsights` disabled, but `performanceInsightRetention` or `performanceInsightEncryptionKey` was set'); } - private setLogRetention(props: DatabaseClusterProps) { - if (props.cloudwatchLogsExports) { - const unsupportedLogTypes = props.cloudwatchLogsExports.filter(logType => !props.engine.supportedLogTypes.includes(logType)); - if (unsupportedLogTypes.length > 0) { - throw new Error(`Unsupported logs for the current engine type: ${unsupportedLogTypes.join(',')}`); - } + const instanceType = instanceProps.instanceType ?? ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.MEDIUM); + const instanceParameterGroupConfig = instanceProps.parameterGroup?.bindToInstance({}); + for (let i = 0; i < instanceCount; i++) { + const instanceIndex = i + 1; + const instanceIdentifier = props.instanceIdentifierBase != null ? `${props.instanceIdentifierBase}${instanceIndex}` : + props.clusterIdentifier != null ? `${props.clusterIdentifier}instance${instanceIndex}` : + undefined; - if (props.cloudwatchLogsRetention) { - for (const log of props.cloudwatchLogsExports) { - new logs.LogRetention(this, `LogRetention${log}`, { - logGroupName: `/aws/rds/cluster/${this.clusterIdentifier}/${log}`, - retention: props.cloudwatchLogsRetention, - role: props.cloudwatchLogsRetentionRole, - }); - } - } + const publiclyAccessible = instanceProps.vpcSubnets && instanceProps.vpcSubnets.subnetType === ec2.SubnetType.PUBLIC; + + const instance = new CfnDBInstance(cluster, `Instance${instanceIndex}`, { + // Link to cluster + engine: props.engine.engineType, + engineVersion: props.engine.engineVersion?.fullVersion, + dbClusterIdentifier: cluster.clusterIdentifier, + dbInstanceIdentifier: instanceIdentifier, + // Instance properties + dbInstanceClass: databaseInstanceType(instanceType), + publiclyAccessible, + enablePerformanceInsights: enablePerformanceInsights || instanceProps.enablePerformanceInsights, // fall back to undefined if not set + performanceInsightsKmsKeyId: instanceProps.performanceInsightEncryptionKey?.keyArn, + performanceInsightsRetentionPeriod: enablePerformanceInsights + ? (instanceProps.performanceInsightRetention || PerformanceInsightRetention.DEFAULT) + : undefined, + // This is already set on the Cluster. Unclear to me whether it should be repeated or not. Better yes. + dbSubnetGroupName: subnetGroup.ref, + dbParameterGroupName: instanceParameterGroupConfig?.parameterGroupName, + monitoringInterval: props.monitoringInterval && props.monitoringInterval.toSeconds(), + monitoringRoleArn: monitoringRole && monitoringRole.roleArn, + }); + + // If removalPolicy isn't explicitly set, + // it's Snapshot for Cluster. + // Because of that, in this case, + // we can safely use the CFN default of Delete for DbInstances with dbClusterIdentifier set. + if (props.removalPolicy) { + instance.applyRemovalPolicy(props.removalPolicy); } + + // We must have a dependency on the NAT gateway provider here to create + // things in the right order. + instance.node.addDependency(internetConnected); + + instanceIdentifiers.push(instance.ref); + instanceEndpoints.push(new Endpoint(instance.attrEndpointAddress, portAttribute)); } + + return { instanceEndpoints, instanceIdentifiers }; } /** diff --git a/packages/@aws-cdk/aws-rds/lib/instance-engine.ts b/packages/@aws-cdk/aws-rds/lib/instance-engine.ts index df715c9e0387a..5c0062aba2771 100644 --- a/packages/@aws-cdk/aws-rds/lib/instance-engine.ts +++ b/packages/@aws-cdk/aws-rds/lib/instance-engine.ts @@ -507,6 +507,8 @@ class PostgresInstanceEngine extends InstanceEngineBase { * (those returned by {@link DatabaseInstanceEngine.oracleSe} * and {@link DatabaseInstanceEngine.oracleSe1}). * Note: RDS will stop allowing creating new databases with this version in August 2020. + * + * @deprecated instances can no longer be created with these engine versions. See https://forums.aws.amazon.com/ann.jspa?annID=7341 */ export class OracleLegacyEngineVersion { /** Version "11.2" (only a major version, without a specific minor version). */ @@ -710,12 +712,15 @@ interface OracleInstanceEngineProps { /** * Properties for Oracle Standard Edition instance engines. * Used in {@link DatabaseInstanceEngine.oracleSe}. + * + * @deprecated instances can no longer be created with this engine. See https://forums.aws.amazon.com/ann.jspa?annID=7341 */ export interface OracleSeInstanceEngineProps { /** The exact version of the engine to use. */ readonly version: OracleLegacyEngineVersion; } +/** @deprecated instances can no longer be created with this engine. See https://forums.aws.amazon.com/ann.jspa?annID=7341 */ class OracleSeInstanceEngine extends OracleInstanceEngineBase { constructor(version?: OracleLegacyEngineVersion) { super({ @@ -735,12 +740,15 @@ class OracleSeInstanceEngine extends OracleInstanceEngineBase { /** * Properties for Oracle Standard Edition 1 instance engines. * Used in {@link DatabaseInstanceEngine.oracleSe1}. + * + * @deprecated instances can no longer be created with this engine. See https://forums.aws.amazon.com/ann.jspa?annID=7341 */ export interface OracleSe1InstanceEngineProps { /** The exact version of the engine to use. */ readonly version: OracleLegacyEngineVersion; } +/** @deprecated instances can no longer be created with this engine. See https://forums.aws.amazon.com/ann.jspa?annID=7341 */ class OracleSe1InstanceEngine extends OracleInstanceEngineBase { constructor(version?: OracleLegacyEngineVersion) { super({ @@ -1033,16 +1041,14 @@ export class DatabaseInstanceEngine { /** * The unversioned 'oracle-se1' instance engine. * - * @deprecated using unversioned engines is an availability risk. - * We recommend using versioned engines created using the {@link oracleSe1()} method + * @deprecated instances can no longer be created with this engine. See https://forums.aws.amazon.com/ann.jspa?annID=7341 */ public static readonly ORACLE_SE1: IInstanceEngine = new OracleSe1InstanceEngine(); /** * The unversioned 'oracle-se' instance engine. * - * @deprecated using unversioned engines is an availability risk. - * We recommend using versioned engines created using the {@link oracleSe()} method + * @deprecated instances can no longer be created with this engine. See https://forums.aws.amazon.com/ann.jspa?annID=7341 */ public static readonly ORACLE_SE: IInstanceEngine = new OracleSeInstanceEngine(); @@ -1101,12 +1107,18 @@ export class DatabaseInstanceEngine { return new PostgresInstanceEngine(props.version); } - /** Creates a new Oracle Standard Edition instance engine. */ + /** + * Creates a new Oracle Standard Edition instance engine. + * @deprecated instances can no longer be created with this engine. See https://forums.aws.amazon.com/ann.jspa?annID=7341 + */ public static oracleSe(props: OracleSeInstanceEngineProps): IInstanceEngine { return new OracleSeInstanceEngine(props.version); } - /** Creates a new Oracle Standard Edition 1 instance engine. */ + /** + * Creates a new Oracle Standard Edition 1 instance engine. + * @deprecated instances can no longer be created with this engine. See https://forums.aws.amazon.com/ann.jspa?annID=7341 + */ public static oracleSe1(props: OracleSe1InstanceEngineProps): IInstanceEngine { return new OracleSe1InstanceEngine(props.version); } diff --git a/packages/@aws-cdk/aws-rds/package.json b/packages/@aws-cdk/aws-rds/package.json index 7c65dfad99e40..90a92079eb6e5 100644 --- a/packages/@aws-cdk/aws-rds/package.json +++ b/packages/@aws-cdk/aws-rds/package.json @@ -105,6 +105,7 @@ "exclude": [ "props-physical-name:@aws-cdk/aws-rds.ParameterGroupProps", "props-physical-name:@aws-cdk/aws-rds.DatabaseClusterProps", + "props-physical-name:@aws-cdk/aws-rds.DatabaseClusterFromSnapshotProps", "props-physical-name:@aws-cdk/aws-rds.DatabaseInstanceProps", "props-physical-name:@aws-cdk/aws-rds.DatabaseInstanceFromSnapshotProps", "props-physical-name:@aws-cdk/aws-rds.DatabaseInstanceReadReplicaProps", diff --git a/packages/@aws-cdk/aws-rds/test/integ.cluster-rotation.lit.expected.json b/packages/@aws-cdk/aws-rds/test/integ.cluster-rotation.lit.expected.json index 348dba3e65ae7..e58745e098767 100644 --- a/packages/@aws-cdk/aws-rds/test/integ.cluster-rotation.lit.expected.json +++ b/packages/@aws-cdk/aws-rds/test/integ.cluster-rotation.lit.expected.json @@ -765,7 +765,7 @@ "Properties": { "Location": { "ApplicationId": "arn:aws:serverlessrepo:us-east-1:297356227824:applications/SecretsManagerRDSMySQLRotationSingleUser", - "SemanticVersion": "1.1.3" + "SemanticVersion": "1.1.60" }, "Parameters": { "endpoint": { @@ -812,4 +812,4 @@ } } } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-rds/test/integ.instance.lit.expected.json b/packages/@aws-cdk/aws-rds/test/integ.instance.lit.expected.json index 9f591e2399a62..478b874b4d079 100644 --- a/packages/@aws-cdk/aws-rds/test/integ.instance.lit.expected.json +++ b/packages/@aws-cdk/aws-rds/test/integ.instance.lit.expected.json @@ -359,8 +359,8 @@ "ParameterGroup5E32DECB": { "Type": "AWS::RDS::DBParameterGroup", "Properties": { - "Description": "Parameter group for oracle-se1-11.2", - "Family": "oracle-se1-11.2", + "Description": "Parameter group for oracle-se2-19", + "Family": "oracle-se2-19", "Parameters": { "open_cursors": "2500" } @@ -394,11 +394,11 @@ "OptionGroupACA43DC1": { "Type": "AWS::RDS::OptionGroup", "Properties": { - "EngineName": "oracle-se1", - "MajorEngineVersion": "11.2", + "EngineName": "oracle-se2", + "MajorEngineVersion": "19", "OptionConfigurations": [ { - "OptionName": "XMLDB" + "OptionName": "LOCATOR" }, { "OptionName": "OEM", @@ -413,7 +413,7 @@ ] } ], - "OptionGroupDescription": "Option group for oracle-se1 11.2" + "OptionGroupDescription": "Option group for oracle-se2 19" } }, "InstanceSubnetGroupF2CBA54F": { @@ -644,7 +644,8 @@ "listener" ], "EnablePerformanceInsights": true, - "Engine": "oracle-se1", + "Engine": "oracle-se2", + "EngineVersion": "19.0.0.0.ru-2020-04.rur-2020-04.r1", "Iops": 1000, "LicenseModel": "bring-your-own-license", "MasterUsername": { @@ -813,7 +814,7 @@ "Properties": { "Location": { "ApplicationId": "arn:aws:serverlessrepo:us-east-1:297356227824:applications/SecretsManagerRDSOracleRotationSingleUser", - "SemanticVersion": "1.1.3" + "SemanticVersion": "1.1.60" }, "Parameters": { "endpoint": { @@ -965,9 +966,11 @@ "LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aFD4BFC8A": { "Type": "AWS::Lambda::Function", "Properties": { + "Handler": "index.handler", + "Runtime": "nodejs10.x", "Code": { "S3Bucket": { - "Ref": "AssetParameters11aa2ce8971716ca7c8d28d472ab5e937131e78e136d0de8f4997fb11c4de847S3Bucket46EF559D" + "Ref": "AssetParameters74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437bS3Bucket48EF98C9" }, "S3Key": { "Fn::Join": [ @@ -980,7 +983,7 @@ "Fn::Split": [ "||", { - "Ref": "AssetParameters11aa2ce8971716ca7c8d28d472ab5e937131e78e136d0de8f4997fb11c4de847S3VersionKey68B7BF84" + "Ref": "AssetParameters74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437bS3VersionKeyF33C73AF" } ] } @@ -993,7 +996,7 @@ "Fn::Split": [ "||", { - "Ref": "AssetParameters11aa2ce8971716ca7c8d28d472ab5e937131e78e136d0de8f4997fb11c4de847S3VersionKey68B7BF84" + "Ref": "AssetParameters74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437bS3VersionKeyF33C73AF" } ] } @@ -1003,14 +1006,12 @@ ] } }, - "Handler": "index.handler", "Role": { "Fn::GetAtt": [ "LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aServiceRole9741ECFB", "Arn" ] - }, - "Runtime": "nodejs10.x" + } }, "DependsOn": [ "LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8aServiceRoleDefaultPolicyADDA7DEB", @@ -1108,17 +1109,17 @@ } }, "Parameters": { - "AssetParameters11aa2ce8971716ca7c8d28d472ab5e937131e78e136d0de8f4997fb11c4de847S3Bucket46EF559D": { + "AssetParameters74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437bS3Bucket48EF98C9": { "Type": "String", - "Description": "S3 bucket for asset \"11aa2ce8971716ca7c8d28d472ab5e937131e78e136d0de8f4997fb11c4de847\"" + "Description": "S3 bucket for asset \"74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437b\"" }, - "AssetParameters11aa2ce8971716ca7c8d28d472ab5e937131e78e136d0de8f4997fb11c4de847S3VersionKey68B7BF84": { + "AssetParameters74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437bS3VersionKeyF33C73AF": { "Type": "String", - "Description": "S3 key for asset version \"11aa2ce8971716ca7c8d28d472ab5e937131e78e136d0de8f4997fb11c4de847\"" + "Description": "S3 key for asset version \"74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437b\"" }, - "AssetParameters11aa2ce8971716ca7c8d28d472ab5e937131e78e136d0de8f4997fb11c4de847ArtifactHash27BA7171": { + "AssetParameters74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437bArtifactHash976CF1BD": { "Type": "String", - "Description": "Artifact hash for asset \"11aa2ce8971716ca7c8d28d472ab5e937131e78e136d0de8f4997fb11c4de847\"" + "Description": "Artifact hash for asset \"74a1cab76f5603c5e27101cb3809d8745c50f708b0f4b497ed0910eb533d437b\"" } } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-rds/test/integ.instance.lit.ts b/packages/@aws-cdk/aws-rds/test/integ.instance.lit.ts index f98c65a0950f3..7f36806c35230 100644 --- a/packages/@aws-cdk/aws-rds/test/integ.instance.lit.ts +++ b/packages/@aws-cdk/aws-rds/test/integ.instance.lit.ts @@ -18,7 +18,7 @@ class DatabaseInstanceStack extends cdk.Stack { /// !show // Set open cursors with parameter group const parameterGroup = new rds.ParameterGroup(this, 'ParameterGroup', { - engine: rds.DatabaseInstanceEngine.ORACLE_SE1, + engine: rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }), parameters: { open_cursors: '2500', }, @@ -26,10 +26,10 @@ class DatabaseInstanceStack extends cdk.Stack { /// Add XMLDB and OEM with option group const optionGroup = new rds.OptionGroup(this, 'OptionGroup', { - engine: rds.DatabaseInstanceEngine.ORACLE_SE1, + engine: rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }), configurations: [ { - name: 'XMLDB', + name: 'LOCATOR', }, { name: 'OEM', @@ -44,7 +44,7 @@ class DatabaseInstanceStack extends cdk.Stack { // Database instance with production values const instance = new rds.DatabaseInstance(this, 'Instance', { - engine: rds.DatabaseInstanceEngine.ORACLE_SE1, + engine: rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }), licenseModel: rds.LicenseModel.BRING_YOUR_OWN_LICENSE, instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MEDIUM), multiAz: true, diff --git a/packages/@aws-cdk/aws-rds/test/test.cluster.ts b/packages/@aws-cdk/aws-rds/test/test.cluster.ts index 09637a8deea1c..8320b7a5e25c4 100644 --- a/packages/@aws-cdk/aws-rds/test/test.cluster.ts +++ b/packages/@aws-cdk/aws-rds/test/test.cluster.ts @@ -6,7 +6,10 @@ import * as logs from '@aws-cdk/aws-logs'; import * as s3 from '@aws-cdk/aws-s3'; import * as cdk from '@aws-cdk/core'; import { Test } from 'nodeunit'; -import { AuroraMysqlEngineVersion, AuroraPostgresEngineVersion, DatabaseCluster, DatabaseClusterEngine, ParameterGroup, PerformanceInsightRetention } from '../lib'; +import { + AuroraEngineVersion, AuroraMysqlEngineVersion, AuroraPostgresEngineVersion, DatabaseCluster, DatabaseClusterEngine, + DatabaseClusterFromSnapshot, ParameterGroup, PerformanceInsightRetention, +} from '../lib'; export = { 'creating a Cluster also creates 2 DB Instances'(test: Test) { @@ -479,6 +482,84 @@ export = { test.done(); }, + 'can import a cluster with minimal attributes'(test: Test) { + const stack = testStack(); + + const cluster = DatabaseCluster.fromDatabaseClusterAttributes(stack, 'Database', { + clusterIdentifier: 'identifier', + }); + + test.equals(cluster.clusterIdentifier, 'identifier'); + + test.done(); + }, + + 'minimal imported cluster throws on accessing attributes for unprovided parameters'(test: Test) { + const stack = testStack(); + + const cluster = DatabaseCluster.fromDatabaseClusterAttributes(stack, 'Database', { + clusterIdentifier: 'identifier', + }); + + test.throws(() => cluster.clusterEndpoint, /Cannot access `clusterEndpoint` of an imported cluster/); + test.throws(() => cluster.clusterReadEndpoint, /Cannot access `clusterReadEndpoint` of an imported cluster/); + test.throws(() => cluster.instanceIdentifiers, /Cannot access `instanceIdentifiers` of an imported cluster/); + test.throws(() => cluster.instanceEndpoints, /Cannot access `instanceEndpoints` of an imported cluster/); + + test.done(); + }, + + 'imported cluster can access properties if attributes are provided'(test: Test) { + const stack = testStack(); + + const cluster = DatabaseCluster.fromDatabaseClusterAttributes(stack, 'Database', { + clusterEndpointAddress: 'addr', + clusterIdentifier: 'identifier', + instanceEndpointAddresses: ['instance-addr'], + instanceIdentifiers: ['identifier'], + port: 3306, + readerEndpointAddress: 'reader-address', + securityGroups: [ec2.SecurityGroup.fromSecurityGroupId(stack, 'SG', 'sg-123456789', { + allowAllOutbound: false, + })], + }); + + test.equals(cluster.clusterEndpoint.socketAddress, 'addr:3306'); + test.equals(cluster.clusterReadEndpoint.socketAddress, 'reader-address:3306'); + test.deepEqual(cluster.instanceIdentifiers, ['identifier']); + test.deepEqual(cluster.instanceEndpoints.map(endpoint => endpoint.socketAddress), ['instance-addr:3306']); + + test.done(); + }, + + 'cluster supports metrics'(test: Test) { + const stack = testStack(); + const vpc = new ec2.Vpc(stack, 'VPC'); + + const cluster = new DatabaseCluster(stack, 'Database', { + engine: DatabaseClusterEngine.auroraMysql({ version: AuroraMysqlEngineVersion.VER_5_7_12 }), + masterUser: { + username: 'admin', + password: cdk.SecretValue.plainText('tooshort'), + }, + instanceProps: { + vpc, + }, + }); + + test.deepEqual(stack.resolve(cluster.metricCPUUtilization()), { + dimensions: { DBClusterIdentifier: { Ref: 'DatabaseB269D8BB' } }, + namespace: 'AWS/RDS', + metricName: 'CPUUtilization', + period: cdk.Duration.minutes(5), + statistic: 'Average', + account: '12345', + region: 'us-test-1', + }); + + test.done(); + }, + 'cluster with enabled monitoring'(test: Test) { // GIVEN const stack = testStack(); @@ -1310,6 +1391,37 @@ export = { test.done(); }, + + 'create a cluster from a snapshot'(test: Test) { + const stack = testStack(); + const vpc = new ec2.Vpc(stack, 'VPC'); + + // WHEN + new DatabaseClusterFromSnapshot(stack, 'Database', { + engine: DatabaseClusterEngine.aurora({ version: AuroraEngineVersion.VER_1_22_2 }), + instanceProps: { + vpc, + }, + snapshotIdentifier: 'mySnapshot', + }); + + // THEN + expect(stack).to(haveResource('AWS::RDS::DBCluster', { + Properties: { + Engine: 'aurora', + EngineVersion: '5.6.mysql_aurora.1.22.2', + DBSubnetGroupName: { Ref: 'DatabaseSubnets56F17B9A' }, + VpcSecurityGroupIds: [{ 'Fn::GetAtt': ['DatabaseSecurityGroup5C91FDCB', 'GroupId'] }], + SnapshotIdentifier: 'mySnapshot', + }, + DeletionPolicy: ABSENT, + UpdateReplacePolicy: 'Snapshot', + }, ResourcePart.CompleteDefinition)); + + expect(stack).to(countResources('AWS::RDS::DBInstance', 2)); + + test.done(); + }, }; function testStack() { diff --git a/packages/@aws-cdk/aws-rds/test/test.instance.ts b/packages/@aws-cdk/aws-rds/test/test.instance.ts index 3ba4d67cb1b93..074bb1438b3e2 100644 --- a/packages/@aws-cdk/aws-rds/test/test.instance.ts +++ b/packages/@aws-cdk/aws-rds/test/test.instance.ts @@ -22,7 +22,7 @@ export = { 'create a DB instance'(test: Test) { // WHEN new rds.DatabaseInstance(stack, 'Instance', { - engine: rds.DatabaseInstanceEngine.ORACLE_SE1, + engine: rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }), licenseModel: rds.LicenseModel.BRING_YOUR_OWN_LICENSE, instanceType: ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MEDIUM), multiAz: true, @@ -64,7 +64,8 @@ export = { 'listener', ], EnablePerformanceInsights: true, - Engine: 'oracle-se1', + Engine: 'oracle-se2', + EngineVersion: '19.0.0.0.ru-2020-04.rur-2020-04.r1', Iops: 1000, LicenseModel: 'bring-your-own-license', MasterUsername: { @@ -197,7 +198,7 @@ export = { 'instance with option and parameter group'(test: Test) { const optionGroup = new rds.OptionGroup(stack, 'OptionGroup', { - engine: rds.DatabaseInstanceEngine.ORACLE_SE1, + engine: rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }), configurations: [ { name: 'XMLDB', diff --git a/packages/@aws-cdk/aws-route53-patterns/README.md b/packages/@aws-cdk/aws-route53-patterns/README.md index a652b8ee153f6..0b21ed1340580 100644 --- a/packages/@aws-cdk/aws-route53-patterns/README.md +++ b/packages/@aws-cdk/aws-route53-patterns/README.md @@ -27,7 +27,7 @@ The `HttpsRedirect` constructs creates: * Amazon CloudFront distribution - makes website available from data centres around the world * Amazon S3 bucket - empty bucket used for website hosting redirect (`websiteRedirect`) capabilities. -* Amazon Route 53 Alias record - routes traffic to the CloudFront distribution +* Amazon Route 53 A/AAAA Alias records - routes traffic to the CloudFront distribution * AWS Certificate Manager certificate - SSL/TLS certificate used by CloudFront for your domain diff --git a/packages/@aws-cdk/aws-route53-patterns/lib/website-redirect.ts b/packages/@aws-cdk/aws-route53-patterns/lib/website-redirect.ts index 1dd1c97c11e66..450a535fa7c6a 100644 --- a/packages/@aws-cdk/aws-route53-patterns/lib/website-redirect.ts +++ b/packages/@aws-cdk/aws-route53-patterns/lib/website-redirect.ts @@ -1,7 +1,7 @@ import * as crypto from 'crypto'; import { DnsValidatedCertificate, ICertificate } from '@aws-cdk/aws-certificatemanager'; import { CloudFrontWebDistribution, OriginProtocolPolicy, PriceClass, ViewerProtocolPolicy } from '@aws-cdk/aws-cloudfront'; -import { ARecord, IHostedZone, RecordTarget } from '@aws-cdk/aws-route53'; +import { ARecord, AaaaRecord, IHostedZone, RecordTarget } from '@aws-cdk/aws-route53'; import { CloudFrontTarget } from '@aws-cdk/aws-route53-targets'; import { Bucket, RedirectProtocol } from '@aws-cdk/aws-s3'; import { Construct, RemovalPolicy, Stack, Token } from '@aws-cdk/core'; @@ -97,11 +97,13 @@ export class HttpsRedirect extends Construct { domainNames.forEach((domainName) => { const hash = crypto.createHash('md5').update(domainName).digest('hex').substr(0, 6); - new ARecord(this, `RedirectAliasRecord${hash}`, { + const aliasProps = { recordName: domainName, zone: props.zone, target: RecordTarget.fromAlias(new CloudFrontTarget(redirectDist)), - }); + }; + new ARecord(this, `RedirectAliasRecord${hash}`, aliasProps); + new AaaaRecord(this, `RedirectAliasRecordSix${hash}`, aliasProps); }); } } diff --git a/packages/@aws-cdk/aws-route53-patterns/test/bucket-website-target.test.ts b/packages/@aws-cdk/aws-route53-patterns/test/bucket-website-target.test.ts index ebbbf7dbba52c..a46d4b8707da9 100644 --- a/packages/@aws-cdk/aws-route53-patterns/test/bucket-website-target.test.ts +++ b/packages/@aws-cdk/aws-route53-patterns/test/bucket-website-target.test.ts @@ -35,10 +35,22 @@ test('create HTTPS redirect', () => { }, }); expect(stack).toHaveResource('AWS::Route53::RecordSet', { + Type: 'A', Name: 'foo.example.com.', HostedZoneId: 'ID', }); expect(stack).toHaveResource('AWS::Route53::RecordSet', { + Type: 'AAAA', + Name: 'foo.example.com.', + HostedZoneId: 'ID', + }); + expect(stack).toHaveResource('AWS::Route53::RecordSet', { + Type: 'A', + Name: 'baz.example.com.', + HostedZoneId: 'ID', + }); + expect(stack).toHaveResource('AWS::Route53::RecordSet', { + Type: 'AAAA', Name: 'baz.example.com.', HostedZoneId: 'ID', }); @@ -68,6 +80,11 @@ test('create HTTPS redirect for apex', () => { }, }); expect(stack).toHaveResource('AWS::Route53::RecordSet', { + Type: 'A', + Name: 'example.com.', + }); + expect(stack).toHaveResource('AWS::Route53::RecordSet', { + Type: 'AAAA', Name: 'example.com.', }); }); diff --git a/packages/@aws-cdk/aws-s3-deployment/lib/bucket-deployment.ts b/packages/@aws-cdk/aws-s3-deployment/lib/bucket-deployment.ts index 74b2478658fe7..cdd4e1413f766 100644 --- a/packages/@aws-cdk/aws-s3-deployment/lib/bucket-deployment.ts +++ b/packages/@aws-cdk/aws-s3-deployment/lib/bucket-deployment.ts @@ -8,7 +8,6 @@ import * as s3 from '@aws-cdk/aws-s3'; import * as cdk from '@aws-cdk/core'; import { ISource, SourceConfig } from './source'; -const now = Date.now(); const handlerCodeBundle = path.join(__dirname, '..', 'lambda', 'bundle.zip'); const handlerSourceDirectory = path.join(__dirname, '..', 'lambda', 'src'); @@ -129,7 +128,7 @@ export interface BucketDeploymentProps { * @default - The objects in the distribution will not expire. * @see https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#SysMetadata */ - readonly expires?: Expires; + readonly expires?: cdk.Expiration; /** * System-defined x-amz-server-side-encryption metadata to be set on all objects in the deployment. * @default - Server side encryption is not used. @@ -271,7 +270,7 @@ function mapSystemMetadata(metadata: BucketDeploymentProps) { const res: { [key: string]: string } = {}; if (metadata.cacheControl) { res['cache-control'] = metadata.cacheControl.map(c => c.value).join(', '); } - if (metadata.expires) { res.expires = metadata.expires.value; } + if (metadata.expires) { res.expires = metadata.expires.date.toUTCString(); } if (metadata.contentDisposition) { res['content-disposition'] = metadata.contentDisposition; } if (metadata.contentEncoding) { res['content-encoding'] = metadata.contentEncoding; } if (metadata.contentLanguage) { res['content-language'] = metadata.contentLanguage; } @@ -330,6 +329,8 @@ export enum StorageClass { /** * Used for HTTP expires header, which influences downstream caches. Does NOT influence deletion of the object. * @see https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#SysMetadata + * + * @deprecated use core.Expiration */ export class Expires { /** @@ -348,7 +349,7 @@ export class Expires { * Expire once the specified duration has passed since deployment time * @param t the duration to wait before expiring */ - public static after(t: cdk.Duration) { return Expires.atDate(new Date(now + t.toMilliseconds())); } + public static after(t: cdk.Duration) { return Expires.atDate(new Date(Date.now() + t.toMilliseconds())); } public static fromString(s: string) { return new Expires(s); } diff --git a/packages/@aws-cdk/aws-s3-deployment/test/bucket-deployment.test.ts b/packages/@aws-cdk/aws-s3-deployment/test/bucket-deployment.test.ts index 09b169298c0f3..3ba33793ab8b3 100644 --- a/packages/@aws-cdk/aws-s3-deployment/test/bucket-deployment.test.ts +++ b/packages/@aws-cdk/aws-s3-deployment/test/bucket-deployment.test.ts @@ -304,6 +304,7 @@ test('system metadata is correctly transformed', () => { // GIVEN const stack = new cdk.Stack(); const bucket = new s3.Bucket(stack, 'Dest'); + const expiration = cdk.Expiration.after(cdk.Duration.hours(12)); // WHEN new s3deploy.BucketDeployment(stack, 'Deploy', { @@ -318,7 +319,7 @@ test('system metadata is correctly transformed', () => { serverSideEncryptionCustomerAlgorithm: 'rot13', websiteRedirectLocation: 'example', cacheControl: [s3deploy.CacheControl.setPublic(), s3deploy.CacheControl.maxAge(cdk.Duration.hours(1))], - expires: s3deploy.Expires.after(cdk.Duration.hours(12)), + expires: expiration, }); // THEN @@ -331,7 +332,7 @@ test('system metadata is correctly transformed', () => { 'sse': 'aws:kms', 'sse-kms-key-id': 'mykey', 'cache-control': 'public, max-age=3600', - 'expires': s3deploy.Expires.after(cdk.Duration.hours(12)).value, + 'expires': expiration.date.toUTCString(), 'sse-c-copy-source': 'rot13', 'website-redirect': 'example', }, @@ -339,11 +340,10 @@ test('system metadata is correctly transformed', () => { }); test('expires type has correct values', () => { - expect(s3deploy.Expires.atDate(new Date('Sun, 26 Jan 2020 00:53:20 GMT')).value).toEqual('Sun, 26 Jan 2020 00:53:20 GMT'); - expect(s3deploy.Expires.atTimestamp(1580000000000).value).toEqual('Sun, 26 Jan 2020 00:53:20 GMT'); - expect(Math.abs(new Date(s3deploy.Expires.after(cdk.Duration.minutes(10)).value).getTime() - (Date.now() + 600000)) < 15000).toBeTruthy(); - expect(s3deploy.Expires.fromString('Tue, 04 Feb 2020 08:45:33 GMT').value).toEqual('Tue, 04 Feb 2020 08:45:33 GMT'); - + expect(cdk.Expiration.atDate(new Date('Sun, 26 Jan 2020 00:53:20 GMT')).date.toUTCString()).toEqual('Sun, 26 Jan 2020 00:53:20 GMT'); + expect(cdk.Expiration.atTimestamp(1580000000000).date.toUTCString()).toEqual('Sun, 26 Jan 2020 00:53:20 GMT'); + expect(Math.abs(new Date(cdk.Expiration.after(cdk.Duration.minutes(10)).date.toUTCString()).getTime() - (Date.now() + 600000)) < 15000).toBeTruthy(); + expect(cdk.Expiration.fromString('Tue, 04 Feb 2020 08:45:33 GMT').date.toUTCString()).toEqual('Tue, 04 Feb 2020 08:45:33 GMT'); }); test('cache control type has correct values', () => { diff --git a/packages/@aws-cdk/aws-s3/test/test.aspect.ts b/packages/@aws-cdk/aws-s3/test/test.aspect.ts index e85e4d243dca0..a1a94a44b0f1d 100644 --- a/packages/@aws-cdk/aws-s3/test/test.aspect.ts +++ b/packages/@aws-cdk/aws-s3/test/test.aspect.ts @@ -44,7 +44,7 @@ class BucketVersioningChecker implements cdk.IAspect { if (node instanceof s3.CfnBucket) { if (!node.versioningConfiguration || (!cdk.Tokenization.isResolvable(node.versioningConfiguration) && node.versioningConfiguration.status !== 'Enabled')) { - node.node.addError('Bucket versioning is not enabled'); + cdk.Annotations.of(node).addError('Bucket versioning is not enabled'); } } } diff --git a/packages/@aws-cdk/aws-secretsmanager/README.md b/packages/@aws-cdk/aws-secretsmanager/README.md index 540cc9a7fa0be..e8a511ecef269 100644 --- a/packages/@aws-cdk/aws-secretsmanager/README.md +++ b/packages/@aws-cdk/aws-secretsmanager/README.md @@ -43,7 +43,7 @@ A secret can set `RemovalPolicy`. If it set to `RETAIN`, that removing a secret ### Grant permission to use the secret to a role -You must grant permission to a resource for that resource to be allowed to +You must grant permission to a resource for that resource to be allowed to use a secret. This can be achieved with the `Secret.grantRead` and/or `Secret.grantUpdate` method, depending on your need: @@ -86,6 +86,7 @@ new SecretRotation(this, 'SecretRotation', { secret: mySecret, target: myDatabase, // a Connectable vpc: myVpc, // The VPC where the secret rotation application will be deployed + excludeCharacters: ` ;+%{}` + `@'"\`/\\#`, // A string of characters to never use when generating new passwords. Example is a superset of the characters which will break DMS endpoints and characters which cause problems in BASH scripts. }); ``` diff --git a/packages/@aws-cdk/aws-secretsmanager/lib/secret-rotation.ts b/packages/@aws-cdk/aws-secretsmanager/lib/secret-rotation.ts index a64d65f146d70..6ef72114413cf 100644 --- a/packages/@aws-cdk/aws-secretsmanager/lib/secret-rotation.ts +++ b/packages/@aws-cdk/aws-secretsmanager/lib/secret-rotation.ts @@ -22,84 +22,84 @@ export class SecretRotationApplication { /** * Conducts an AWS SecretsManager secret rotation for RDS MariaDB using the single user rotation scheme */ - public static readonly MARIADB_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerRDSMariaDBRotationSingleUser', '1.1.3'); + public static readonly MARIADB_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerRDSMariaDBRotationSingleUser', '1.1.60'); /** * Conducts an AWS SecretsManager secret rotation for RDS MariaDB using the multi user rotation scheme */ - public static readonly MARIADB_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerRDSMariaDBRotationMultiUser', '1.1.3', { + public static readonly MARIADB_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerRDSMariaDBRotationMultiUser', '1.1.60', { isMultiUser: true, }); /** * Conducts an AWS SecretsManager secret rotation for RDS MySQL using the single user rotation scheme */ - public static readonly MYSQL_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerRDSMySQLRotationSingleUser', '1.1.3'); + public static readonly MYSQL_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerRDSMySQLRotationSingleUser', '1.1.60'); /** * Conducts an AWS SecretsManager secret rotation for RDS MySQL using the multi user rotation scheme */ - public static readonly MYSQL_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerRDSMySQLRotationMultiUser', '1.1.3', { + public static readonly MYSQL_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerRDSMySQLRotationMultiUser', '1.1.60', { isMultiUser: true, }); /** * Conducts an AWS SecretsManager secret rotation for RDS Oracle using the single user rotation scheme */ - public static readonly ORACLE_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerRDSOracleRotationSingleUser', '1.1.3'); + public static readonly ORACLE_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerRDSOracleRotationSingleUser', '1.1.60'); /** * Conducts an AWS SecretsManager secret rotation for RDS Oracle using the multi user rotation scheme */ - public static readonly ORACLE_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerRDSOracleRotationMultiUser', '1.1.3', { + public static readonly ORACLE_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerRDSOracleRotationMultiUser', '1.1.60', { isMultiUser: true, }); /** * Conducts an AWS SecretsManager secret rotation for RDS PostgreSQL using the single user rotation scheme */ - public static readonly POSTGRES_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerRDSPostgreSQLRotationSingleUser', '1.1.3'); + public static readonly POSTGRES_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerRDSPostgreSQLRotationSingleUser', '1.1.60'); /** * Conducts an AWS SecretsManager secret rotation for RDS PostgreSQL using the multi user rotation scheme */ - public static readonly POSTGRES_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerRDSPostgreSQLRotationMultiUser', '1.1.3', { + public static readonly POSTGRES_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerRDSPostgreSQLRotationMultiUser', '1.1.60', { isMultiUser: true, }); /** * Conducts an AWS SecretsManager secret rotation for RDS SQL Server using the single user rotation scheme */ - public static readonly SQLSERVER_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerRDSSQLServerRotationSingleUser', '1.1.3'); + public static readonly SQLSERVER_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerRDSSQLServerRotationSingleUser', '1.1.60'); /** * Conducts an AWS SecretsManager secret rotation for RDS SQL Server using the multi user rotation scheme */ - public static readonly SQLSERVER_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerRDSSQLServerRotationMultiUser', '1.1.3', { + public static readonly SQLSERVER_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerRDSSQLServerRotationMultiUser', '1.1.60', { isMultiUser: true, }); /** * Conducts an AWS SecretsManager secret rotation for Amazon Redshift using the single user rotation scheme */ - public static readonly REDSHIFT_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerRedshiftRotationSingleUser', '1.1.3'); + public static readonly REDSHIFT_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerRedshiftRotationSingleUser', '1.1.60'); /** * Conducts an AWS SecretsManager secret rotation for Amazon Redshift using the multi user rotation scheme */ - public static readonly REDSHIFT_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerRedshiftRotationMultiUser', '1.1.3', { + public static readonly REDSHIFT_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerRedshiftRotationMultiUser', '1.1.60', { isMultiUser: true, }); /** * Conducts an AWS SecretsManager secret rotation for MongoDB using the single user rotation scheme */ - public static readonly MONGODB_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerMongoDBRotationSingleUser', '1.1.3'); + public static readonly MONGODB_ROTATION_SINGLE_USER = new SecretRotationApplication('SecretsManagerMongoDBRotationSingleUser', '1.1.60'); /** * Conducts an AWS SecretsManager secret rotation for MongoDB using the multi user rotation scheme */ - public static readonly MONGODB_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerMongoDBRotationMultiUser', '1.1.3', { + public static readonly MONGODB_ROTATION_MULTI_USER = new SecretRotationApplication('SecretsManagerMongoDBRotationMultiUser', '1.1.60', { isMultiUser: true, }); @@ -193,6 +193,13 @@ export interface SecretRotationProps { * @default - a new security group is created */ readonly securityGroup?: ec2.ISecurityGroup; + + /** + * Characters which should not appear in the generated password + * + * @default - no additional characters are explicitly excluded + */ + readonly excludeCharacters?: string; } /** @@ -226,6 +233,10 @@ export class SecretRotation extends Construct { vpcSecurityGroupIds: securityGroup.securityGroupId, }; + if (props.excludeCharacters) { + parameters.excludeCharacters = props.excludeCharacters; + } + if (props.secret.encryptionKey) { parameters.kmsKeyArn = props.secret.encryptionKey.keyArn; } diff --git a/packages/@aws-cdk/aws-secretsmanager/test/test.secret-rotation.ts b/packages/@aws-cdk/aws-secretsmanager/test/test.secret-rotation.ts index 463853ad8af08..79351afc059f3 100644 --- a/packages/@aws-cdk/aws-secretsmanager/test/test.secret-rotation.ts +++ b/packages/@aws-cdk/aws-secretsmanager/test/test.secret-rotation.ts @@ -14,6 +14,7 @@ export = { defaultPort: ec2.Port.tcp(3306), securityGroups: [new ec2.SecurityGroup(stack, 'SecurityGroup', { vpc })], }); + const excludeCharacters = ' ;+%{}' + '@\'"`/\\#'; // DMS and BASH problem chars // WHEN new secretsmanager.SecretRotation(stack, 'SecretRotation', { @@ -21,6 +22,7 @@ export = { secret, target, vpc, + excludeCharacters: excludeCharacters, }); // THEN @@ -65,7 +67,7 @@ export = { expect(stack).to(haveResource('AWS::Serverless::Application', { Location: { ApplicationId: 'arn:aws:serverlessrepo:us-east-1:297356227824:applications/SecretsManagerRDSMySQLRotationSingleUser', - SemanticVersion: '1.1.3', + SemanticVersion: '1.1.60', }, Parameters: { endpoint: { @@ -84,6 +86,7 @@ export = { ], }, functionName: 'SecretRotation', + excludeCharacters: excludeCharacters, vpcSecurityGroupIds: { 'Fn::GetAtt': [ 'SecretRotationSecurityGroup9985012B', diff --git a/packages/@aws-cdk/aws-ses-actions/lib/lambda.ts b/packages/@aws-cdk/aws-ses-actions/lib/lambda.ts index 3fa7d418e2ac4..290211bf1806f 100644 --- a/packages/@aws-cdk/aws-ses-actions/lib/lambda.ts +++ b/packages/@aws-cdk/aws-ses-actions/lib/lambda.ts @@ -71,7 +71,7 @@ export class Lambda implements ses.IReceiptRuleAction { rule.node.addDependency(permission); } else { // eslint-disable-next-line max-len - rule.node.addWarning('This rule is using a Lambda action with an imported function. Ensure permission is given to SES to invoke that function.'); + cdk.Annotations.of(rule).addWarning('This rule is using a Lambda action with an imported function. Ensure permission is given to SES to invoke that function.'); } return { diff --git a/packages/@aws-cdk/aws-ses-actions/lib/s3.ts b/packages/@aws-cdk/aws-ses-actions/lib/s3.ts index 5ac6ce5f373c9..35014d7a381f9 100644 --- a/packages/@aws-cdk/aws-ses-actions/lib/s3.ts +++ b/packages/@aws-cdk/aws-ses-actions/lib/s3.ts @@ -65,7 +65,7 @@ export class S3 implements ses.IReceiptRuleAction { if (policy) { // The bucket could be imported rule.node.addDependency(policy); } else { - rule.node.addWarning('This rule is using a S3 action with an imported bucket. Ensure permission is given to SES to write to that bucket.'); + cdk.Annotations.of(rule).addWarning('This rule is using a S3 action with an imported bucket. Ensure permission is given to SES to write to that bucket.'); } // Allow SES to use KMS master key diff --git a/packages/@aws-cdk/cfnspec/lib/augmentations/AWS_RDS_DBCluster.json b/packages/@aws-cdk/cfnspec/lib/augmentations/AWS_RDS_DBCluster.json new file mode 100644 index 0000000000000..854d6c6677047 --- /dev/null +++ b/packages/@aws-cdk/cfnspec/lib/augmentations/AWS_RDS_DBCluster.json @@ -0,0 +1,70 @@ +{ + "options": { + "classFile": "cluster", + "class": "DatabaseClusterBase", + "interfaceFile": "cluster-ref", + "interface": "IDatabaseCluster" + }, + "metrics": { + "namespace": "AWS/RDS", + "dimensions": { "DBClusterIdentifier": "this.clusterIdentifier" }, + "metrics": [ + { + "name": "CPUUtilization", + "documentation": "The percentage of CPU utilization." + }, + { + "name": "DatabaseConnections", + "documentation": "The number of database connections in use." + }, + { + "name": "Deadlocks", + "documentation": "The average number of deadlocks in the database per second." + }, + { + "name": "EngineUptime", + "documentation": "The amount of time that the instance has been running, in seconds." + }, + { + "name": "FreeableMemory", + "documentation": "The amount of available random access memory, in bytes." + }, + { + "name": "FreeLocalStorage", + "documentation": "The amount of local storage available, in bytes." + }, + { + "name": "NetworkReceiveThroughput", + "documentation": "The amount of network throughput received from clients by each instance, in bytes per second." + }, + { + "name": "NetworkThroughput", + "documentation": "The amount of network throughput both received from and transmitted to clients by each instance, in bytes per second." + }, + { + "name": "NetworkTransmitThroughput", + "documentation": "The amount of network throughput sent to clients by each instance, in bytes per second." + }, + { + "name": "SnapshotStorageUsed", + "documentation": "The total amount of backup storage in bytes consumed by all Aurora snapshots outside its backup retention window." + }, + { + "name": "TotalBackupStorageBilled", + "documentation": "The total amount of backup storage in bytes for which you are billed." + }, + { + "name": "VolumeBytesUsed", + "documentation": "The amount of storage used by your Aurora DB instance, in bytes." + }, + { + "name": "VolumeReadIOPs", + "documentation": "The number of billed read I/O operations from a cluster volume, reported at 5-minute intervals." + }, + { + "name": "VolumeWriteIOPs", + "documentation": "The number of write disk I/O operations to the cluster volume, reported at 5-minute intervals." + } + ] + } +} diff --git a/packages/@aws-cdk/cfnspec/lib/schema/augmentation.ts b/packages/@aws-cdk/cfnspec/lib/schema/augmentation.ts index 29de1490e0759..c6d805b190e44 100644 --- a/packages/@aws-cdk/cfnspec/lib/schema/augmentation.ts +++ b/packages/@aws-cdk/cfnspec/lib/schema/augmentation.ts @@ -30,6 +30,13 @@ export interface AugmentationOptions { */ class?: string; + /** + * The name of the file containing the interface to be "augmented". + * + * @default - same as ``classFile``. + */ + interfaceFile?: string; + /** * The name of the interface to be "augmented". * diff --git a/packages/@aws-cdk/cloudformation-include/README.md b/packages/@aws-cdk/cloudformation-include/README.md index fb1675d9d579f..52da55a585b02 100644 --- a/packages/@aws-cdk/cloudformation-include/README.md +++ b/packages/@aws-cdk/cloudformation-include/README.md @@ -61,9 +61,22 @@ const cfnTemplate = new cfn_inc.CfnInclude(this, 'Template', { }); ``` +**Note**: different YAML parsers sometimes don't agree on what exactly constitutes valid YAML. +If you get a YAML exception when including your template, +try converting it to JSON, and including that file instead. +If you're downloading your template from the CloudFormation AWS Console, +you can easily get it in JSON format by clicking the 'View in Designer' +button on the 'Template' tab - +once in Designer, select JSON in the "Choose template language" +radio buttons on the bottom pane. + This will add all resources from `my-template.json` / `my-template.yaml` into the CDK application, preserving their original logical IDs from the template file. +Note that this including process will _not_ execute any +[CloudFormation transforms](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-macros.html) - +including the [Serverless transform](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/transform-aws-serverless.html). + Any resource from the included template can be retrieved by referring to it by its logical ID from the template. If you know the class of the CDK object that corresponds to that resource, you can cast the returned object to the correct type: @@ -113,114 +126,100 @@ const bucket = s3.Bucket.fromBucketName(this, 'L2Bucket', cfnBucket.ref); // bucket is of type s3.IBucket ``` -## Parameters - -If your template uses [CloudFormation Parameters](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html), -you can retrieve them from your template: - -```typescript -import * as core from '@aws-cdk/core'; - -const param: core.CfnParameter = cfnTemplate.getParameter('MyParameter'); -``` - -The `CfnParameter` object is mutable, -and any changes you make to it will be reflected in the resulting template: +## Non-resource template elements + +In addition to resources, +you can also retrieve and mutate all other template elements: + +* [Parameters](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html): + + ```typescript + import * as core from '@aws-cdk/core'; + + const param: core.CfnParameter = cfnTemplate.getParameter('MyParameter'); + + // mutating the parameter + param.default = 'MyDefault'; + ``` + +* [Conditions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-section-structure.html): + + ```typescript + import * as core from '@aws-cdk/core'; + + const condition: core.CfnCondition = cfnTemplate.getCondition('MyCondition'); + + // mutating the condition + condition.expression = core.Fn.conditionEquals(1, 2); + ``` + +* [Mappings](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html): + + ```typescript + import * as core from '@aws-cdk/core'; + + const mapping: core.CfnMapping = cfnTemplate.getMapping('MyMapping'); + + // mutating the mapping + mapping.setValue('my-region', 'AMI', 'ami-04681a1dbd79675a5'); + ``` + +* [Service Catalog template Rules](https://docs.aws.amazon.com/servicecatalog/latest/adminguide/reference-template_constraint_rules.html): + + ```typescript + import * as core from '@aws-cdk/core'; + + const rule: core.CfnRule = cfnTemplate.getRule('MyRule'); + + // mutating the rule + rule.addAssertion(core.Fn.conditionContains(['m1.small'], myParameter.value), + 'MyParameter has to be m1.small'); + ``` + +* [Outputs](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/outputs-section-structure.html): + + ```typescript + import * as core from '@aws-cdk/core'; + + const output: core.CfnOutput = cfnTemplate.getOutput('MyOutput'); + + // mutating the output + output.value = cfnBucket.attrArn; + ``` + +* [Hooks for blue-green deployments](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/blue-green.html): + + ```typescript + import * as core from '@aws-cdk/core'; + + const hook: core.CfnHook = cfnTemplate.getHook('MyOutput'); + + // mutating the hook + const codeDeployHook = hook as core.CfnCodeDeployBlueGreenHook; + codeDeployHook.serviceRole = myRole.roleArn; + ``` + +## Parameter replacement + +If your existing template uses CloudFormation Parameters, +you may want to remove them in favor of build-time values. +You can do that using the `parameters` property: ```typescript -param.default = 'MyDefault'; -``` - -You can also provide values for them when including the template: - -```typescript -new inc.CfnInclude(stack, 'includeTemplate', { - templateFile: 'path/to/my/template' +new inc.CfnInclude(this, 'includeTemplate', { + templateFile: 'path/to/my/template', parameters: { 'MyParam': 'my-value', }, }); ``` -This will replace all references to `MyParam` with the string 'my-value', -and `MyParam` will be removed from the Parameters section of the template. - -## Conditions - -If your template uses [CloudFormation Conditions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-section-structure.html), -you can retrieve them from your template: - -```typescript -import * as core from '@aws-cdk/core'; - -const condition: core.CfnCondition = cfnTemplate.getCondition('MyCondition'); -``` - -The `CfnCondition` object is mutable, -and any changes you make to it will be reflected in the resulting template: - -```typescript -condition.expression = core.Fn.conditionEquals(1, 2); -``` - -## Mappings - -If your template uses [CloudFormation Mappings](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html), -you can retrieve them from your template: - -```typescript -import * as core from '@aws-cdk/core'; - -const mapping: core.CfnMapping = cfnTemplate.getMapping('MyMapping'); -``` - -The `CfnMapping` object is mutable, -and any changes you make to it will be reflected in the resulting template: - -```typescript -mapping.setValue('my-region', 'AMI', 'ami-04681a1dbd79675a5'); -``` - -## Rules - -If your template uses [Service Catalog template Rules](https://docs.aws.amazon.com/servicecatalog/latest/adminguide/reference-template_constraint_rules.html), -you can retrieve them from your template: - -```typescript -import * as core from '@aws-cdk/core'; - -const rule: core.CfnRule = cfnTemplate.getRule('MyRule'); -``` - -The `CfnRule` object is mutable, -and any changes you make to it will be reflected in the resulting template: - -```typescript -rule.addAssertion(core.Fn.conditionContains(['m1.small'], myParameter.value), - 'MyParameter has to be m1.small'); -``` - -## Outputs - -If your template uses [CloudFormation Outputs](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/outputs-section-structure.html), -you can retrieve them from your template: - -```typescript -import * as core from '@aws-cdk/core'; - -const output: core.CfnOutput = cfnTemplate.getOutput('MyOutput'); -``` - -The `CfnOutput` object is mutable, -and any changes you make to it will be reflected in the resulting template: - -```typescript -output.value = cfnBucket.attrArn; -``` +This will replace all references to `MyParam` with the string `'my-value'`, +and `MyParam` will be removed from the 'Parameters' section of the template. ## Nested Stacks -This module also support templates that use [nested stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html). +This module also supports templates that use [nested stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html). For example, if you have the following parent template: @@ -249,10 +248,11 @@ where the child template pointed to by `https://my-s3-template-source.s3.amazona } ``` -You can include both the parent stack and the nested stack in your CDK application as follows: +You can include both the parent stack, +and the nested stack in your CDK application as follows: ```typescript -const parentTemplate = new inc.CfnInclude(stack, 'ParentStack', { +const parentTemplate = new inc.CfnInclude(this, 'ParentStack', { templateFile: 'path/to/my-parent-template.json', nestedStacks: { 'ChildStack': { @@ -262,6 +262,14 @@ const parentTemplate = new inc.CfnInclude(stack, 'ParentStack', { }); ``` +Here, `path/to/my-nested-template.json` +represents the path on disk to the downloaded template file from the original template URL of the nested stack +(`https://my-s3-template-source.s3.amazonaws.com/child-stack.json`). +In the CDK application, +this file will be turned into an [Asset](https://docs.aws.amazon.com/cdk/latest/guide/assets.html), +and the `TemplateURL` property of the nested stack resource +will be modified to point to that asset. + The included nested stack can be accessed with the `getNestedStack` method: ```typescript @@ -270,7 +278,8 @@ const childStack: core.NestedStack = includedChildStack.stack; const childTemplate: cfn_inc.CfnInclude = includedChildStack.includedTemplate; ``` -Now you can reference resources from `ChildStack` and modify them like any other included template: +Now you can reference resources from `ChildStack`, +and modify them like any other included template: ```typescript const cfnBucket = childTemplate.getResource('MyBucket') as s3.CfnBucket; @@ -289,3 +298,32 @@ role.addToPolicy(new iam.PolicyStatement({ resources: [cfnBucket.attrArn], })); ``` + +## Vending CloudFormation templates as Constructs + +In many cases, there are existing CloudFormation templates that are not entire applications, +but more like specialized fragments, implementing a particular pattern or best practice. +If you have templates like that, +you can use the `CfnInclude` class to vend them as CDK Constructs: + +```ts +import * as path from 'path'; + +export class MyConstruct extends Construct { + constructor(scope: Construct, id: string) { + super(scope, id); + + // include a template inside the Construct + new cfn_inc.CfnInclude(this, 'MyConstruct', { + templateFile: path.join(__dirname, 'my-template.json'), + preserveLogicalIds: false, // <--- !!! + }); + } +} +``` + +Notice the `preserveLogicalIds` parameter - +it makes sure the logical IDs of all the included template elements are re-named using CDK's algorithm, +guaranteeing they are unique within your application. +Without that parameter passed, +instantiating `MyConstruct` twice in the same Stack would result in duplicated logical IDs. diff --git a/packages/@aws-cdk/cloudformation-include/lib/cfn-include.ts b/packages/@aws-cdk/cloudformation-include/lib/cfn-include.ts index 2c135abba3377..14549f452eccb 100644 --- a/packages/@aws-cdk/cloudformation-include/lib/cfn-include.ts +++ b/packages/@aws-cdk/cloudformation-include/lib/cfn-include.ts @@ -14,6 +14,20 @@ export interface CfnIncludeProps { */ readonly templateFile: string; + /** + * Whether the resources should have the same logical IDs in the resulting CDK template + * as they did in the original CloudFormation template file. + * If you're vending a Construct using an existing CloudFormation template, + * make sure to pass this as `false`. + * + * **Note**: regardless of whether this option is true or false, + * the {@link CfnInclude.getResource} and related methods always uses the original logical ID of the resource/element, + * as specified in the template file. + * + * @default true + */ + readonly preserveLogicalIds?: boolean; + /** * Specifies the template files that define nested stacks that should be included. * @@ -46,12 +60,12 @@ export interface CfnIncludeProps { */ export interface IncludedNestedStack { /** - * The NestedStack object which respresents the scope of the template. + * The NestedStack object which represents the scope of the template. */ readonly stack: core.NestedStack; /** - * The CfnInclude that respresents the template, which can + * The CfnInclude that represents the template, which can * be used to access Resources and other template elements. */ readonly includedTemplate: CfnInclude; @@ -72,6 +86,8 @@ export class CfnInclude extends core.CfnElement { private readonly mappings: { [mappingName: string]: core.CfnMapping } = {}; private readonly rules: { [ruleName: string]: core.CfnRule } = {}; private readonly rulesScope: core.Construct; + private readonly hooks: { [hookName: string]: core.CfnHook } = {}; + private readonly hooksScope: core.Construct; private readonly outputs: { [logicalId: string]: core.CfnOutput } = {}; private readonly nestedStacks: { [logicalId: string]: IncludedNestedStack } = {}; private readonly nestedStacksToInclude: { [name: string]: CfnIncludeProps }; @@ -86,8 +102,7 @@ export class CfnInclude extends core.CfnElement { // read the template into a JS object this.template = futils.readYamlSync(props.templateFile); - // ToDo implement preserveLogicalIds=false - this.preserveLogicalIds = true; + this.preserveLogicalIds = props.preserveLogicalIds ?? true; // check if all user specified parameter values exist in the template for (const logicalId of Object.keys(this.parametersToReplace)) { @@ -131,6 +146,12 @@ export class CfnInclude extends core.CfnElement { } } + // instantiate the Hooks + this.hooksScope = new core.Construct(this, '$Hooks'); + for (const hookName of Object.keys(this.template.Hooks || {})) { + this.createHook(hookName); + } + const outputScope = new core.Construct(this, '$Ouputs'); for (const logicalId of Object.keys(this.template.Outputs || {})) { this.createOutput(logicalId, outputScope); @@ -250,6 +271,24 @@ export class CfnInclude extends core.CfnElement { return ret; } + /** + * Returns the CfnHook object from the 'Hooks' + * section of the included CloudFormation template with the given logical ID. + * Any modifications performed on the returned object will be reflected in the resulting CDK template. + * + * If a Hook with the given logical ID is not present in the template, + * an exception will be thrown. + * + * @param hookLogicalId the logical ID of the Hook in the included CloudFormation template's 'Hooks' section + */ + public getHook(hookLogicalId: string): core.CfnHook { + const ret = this.hooks[hookLogicalId]; + if (!ret) { + throw new Error(`Hook with logical ID '${hookLogicalId}' was not found in the template`); + } + return ret; + } + /** * Returns the NestedStack with name logicalId. * For a nested stack to be returned by this method, it must be specified in the {@link CfnIncludeProps.nestedStacks} @@ -301,6 +340,7 @@ export class CfnInclude extends core.CfnElement { case 'Resources': case 'Parameters': case 'Rules': + case 'Hooks': case 'Outputs': // these are rendered as a side effect of instantiating the L1s break; @@ -326,7 +366,7 @@ export class CfnInclude extends core.CfnElement { mapping: cfnParser.parseValue(this.template.Mappings[mappingName]), }); this.mappings[mappingName] = cfnMapping; - cfnMapping.overrideLogicalId(mappingName); + this.overrideLogicalIdIfNeeded(cfnMapping, mappingName); } private createParameter(logicalId: string): void { @@ -357,7 +397,7 @@ export class CfnInclude extends core.CfnElement { noEcho: expression.NoEcho, }); - cfnParameter.overrideLogicalId(logicalId); + this.overrideLogicalIdIfNeeded(cfnParameter, logicalId); this.parameters[logicalId] = cfnParameter; } @@ -384,7 +424,47 @@ export class CfnInclude extends core.CfnElement { assertions: ruleProperties.Assertions, }); this.rules[ruleName] = rule; - rule.overrideLogicalId(ruleName); + this.overrideLogicalIdIfNeeded(rule, ruleName); + } + + private createHook(hookName: string): void { + const self = this; + const cfnParser = new cfn_parse.CfnParser({ + finder: { + findResource(lId): core.CfnResource | undefined { + return self.resources[lId]; + }, + findRefTarget(elementName: string): core.CfnElement | undefined { + return self.resources[elementName] ?? self.parameters[elementName]; + }, + findCondition(conditionName: string): core.CfnCondition | undefined { + return self.conditions[conditionName]; + }, + findMapping(mappingName): core.CfnMapping | undefined { + return self.mappings[mappingName]; + }, + }, + parameters: this.parametersToReplace, + }); + const hookAttributes = this.template.Hooks[hookName]; + + let hook: core.CfnHook; + switch (hookAttributes.Type) { + case 'AWS::CodeDeploy::BlueGreen': + hook = (core.CfnCodeDeployBlueGreenHook as any)._fromCloudFormation(this.hooksScope, hookName, hookAttributes, { + parser: cfnParser, + }); + break; + default: { + const hookProperties = cfnParser.parseValue(hookAttributes.Properties) ?? {}; + hook = new core.CfnHook(this.hooksScope, hookName, { + type: hookAttributes.Type, + properties: hookProperties, + }); + } + } + this.hooks[hookName] = hook; + this.overrideLogicalIdIfNeeded(hook, hookName); } private createOutput(logicalId: string, scope: core.Construct): void { @@ -422,7 +502,7 @@ export class CfnInclude extends core.CfnElement { })(), }); - cfnOutput.overrideLogicalId(logicalId); + this.overrideLogicalIdIfNeeded(cfnOutput, logicalId); this.outputs[logicalId] = cfnOutput; } @@ -455,8 +535,7 @@ export class CfnInclude extends core.CfnElement { expression: cfnParser.parseValue(this.template.Conditions[conditionName]), }); - // ToDo handle renaming of the logical IDs of the conditions - cfnCondition.overrideLogicalId(conditionName); + this.overrideLogicalIdIfNeeded(cfnCondition, conditionName); this.conditions[conditionName] = cfnCondition; return cfnCondition; } @@ -533,11 +612,7 @@ export class CfnInclude extends core.CfnElement { } } - if (this.preserveLogicalIds) { - // override the logical ID to match the original template - l1Instance.overrideLogicalId(logicalId); - } - + this.overrideLogicalIdIfNeeded(l1Instance, logicalId); this.resources[logicalId] = l1Instance; return l1Instance; } @@ -585,4 +660,10 @@ export class CfnInclude extends core.CfnElement { } return ret; } + + private overrideLogicalIdIfNeeded(element: core.CfnElement, id: string): void { + if (this.preserveLogicalIds) { + element.overrideLogicalId(id); + } + } } diff --git a/packages/@aws-cdk/cloudformation-include/lib/file-utils.ts b/packages/@aws-cdk/cloudformation-include/lib/file-utils.ts index eca3a7a0110ba..e78a6e46bde8a 100644 --- a/packages/@aws-cdk/cloudformation-include/lib/file-utils.ts +++ b/packages/@aws-cdk/cloudformation-include/lib/file-utils.ts @@ -37,23 +37,29 @@ const shortForms: yaml_types.Schema.CustomTag[] = [ makeTagForCfnIntrinsic('Ref', false), makeTagForCfnIntrinsic('Condition', false), makeTagForCfnIntrinsic('GetAtt', true, (_doc: yaml.Document, cstNode: yaml_cst.CST.Node): any => { - // The position of the leftmost period and opening bracket tell us what syntax is being used - // If no brackets are found, then the dot notation is being used; the leftmost dot separates the - // logical ID from the attribute. - // - // If a bracket is found, then the list notation is being used; if present, the leftmost dot separates the - // logical ID from the attribute. - const firstDot = cstNode.toString().indexOf('.'); - const firstBracket = cstNode.toString().indexOf('['); + const parsedArguments = parseYamlStrWithCfnTags(cstNode.toString().substring('!GetAtt'.length)); - return { - 'Fn::GetAtt': firstDot !== -1 && firstBracket === -1 - ? [ - cstNode.toString().substring('!GetAtt '.length, firstDot), - parseYamlStrWithCfnTags((cstNode.toString().substring(firstDot + 1))), - ] - : parseYamlStrWithCfnTags(cstNode.toString().substring('!GetAtt'.length)), - }; + let value: any; + if (typeof parsedArguments === 'string') { + // if the arguments to !GetAtt are a string, + // the part before the first '.' is the logical ID, + // and the rest is the attribute name + // (which can contain '.') + const firstDot = parsedArguments.indexOf('.'); + if (firstDot === -1) { + throw new Error(`Short-form Fn::GetAtt must contain a '.' in its string argument, got: '${parsedArguments}'`); + } + value = [ + parsedArguments.substring(0, firstDot), + parsedArguments.substring(firstDot + 1), // the + 1 is to skip the actual '.' + ]; + } else { + // this is the form where the arguments to Fn::GetAtt are already an array - + // in this case, nothing more to do + value = parsedArguments; + } + + return { 'Fn::GetAtt': value }; }), ); diff --git a/packages/@aws-cdk/cloudformation-include/test/invalid-templates.test.ts b/packages/@aws-cdk/cloudformation-include/test/invalid-templates.test.ts index ffbfcd81ffcc8..17bf6b9a3261e 100644 --- a/packages/@aws-cdk/cloudformation-include/test/invalid-templates.test.ts +++ b/packages/@aws-cdk/cloudformation-include/test/invalid-templates.test.ts @@ -125,13 +125,19 @@ describe('CDK Include', () => { }).toThrow(/Element referenced in Fn::Sub expression with logical ID: '' was not found in the template/); }); - test('throws an error when a template supplies an invalid string to a number parameter', () => { + test("throws an exception for a template with a non-number string passed to a property with type 'number'", () => { includeTestTemplate(stack, 'alphabetical-string-passed-to-number.json'); expect(() => { SynthUtils.synthesize(stack); }).toThrow(/"abc" should be a number/); }); + + test('throws an exception for a template with a short-form Fn::GetAtt whose string argument does not contain a dot', () => { + expect(() => { + includeTestTemplate(stack, 'short-form-get-att-no-dot.yaml'); + }).toThrow(/Short-form Fn::GetAtt must contain a '.' in its string argument, got: 'Bucket1Arn'/); + }); }); function includeTestTemplate(scope: core.Construct, testTemplate: string): inc.CfnInclude { diff --git a/packages/@aws-cdk/cloudformation-include/test/test-templates/hook-code-deploy-blue-green-ecs.json b/packages/@aws-cdk/cloudformation-include/test/test-templates/hook-code-deploy-blue-green-ecs.json new file mode 100644 index 0000000000000..fc4abcab4e1ad --- /dev/null +++ b/packages/@aws-cdk/cloudformation-include/test/test-templates/hook-code-deploy-blue-green-ecs.json @@ -0,0 +1,95 @@ +{ + "Hooks": { + "RandomHook": { + "Type": "UnknownToday" + }, + "EcsBlueGreenCodeDeployHook": { + "Type": "AWS::CodeDeploy::BlueGreen", + "Properties": { + "ServiceRole": "CodeDeployServiceRoleName", + "Applications": [ + { + "Target": { + "Type": "AWS::ECS::Service", + "LogicalID": "MyService" + }, + "ECSAttributes": { + "TaskDefinitions": [ + "MyTaskDefinition", "MyTaskDefinition" + ], + "TaskSets": [ + "MyTaskSet", "MyTaskSet" + ], + "TrafficRouting": { + "ProdTrafficRoute": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "LogicalID": "AlbListener" + }, + "TestTrafficRoute": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "LogicalID": "AlbListener" + }, + "TargetGroups": [ + "AlbTargetGroup", "AlbTargetGroup" + ] + } + } + } + ], + "TrafficRoutingConfig": { + "Type": "AllAtOnce", + "TimeBasedCanary": { + "StepPercentage": 1, + "BakeTimeMins": "2" + }, + "TimeBasedLinear": { + "StepPercentage": "3", + "BakeTimeMins": 4 + } + }, + "AdditionalOptions": { + "TerminationWaitTimeInMinutes": 5 + }, + "LifecycleEventHooks": { + "BeforeInstall": "f1", + "AfterInstall": "f2", + "AfterAllowTestTraffic": "f3", + "BeforeAllowTraffic": "f4", + "AfterAllowTraffic": "f5" + } + } + } + }, + "Resources": { + "MyService": { + "Type": "AWS::ECS::Service" + }, + "MyTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition" + }, + "MyTaskSet": { + "Type": "AWS::ECS::TaskSet", + "Properties": { + "Cluster": "my-cluster", + "Service": { "Ref": "MyService" }, + "TaskDefinition": { "Fn::Sub": "${MyTaskDefinition}" } + } + }, + "AlbTargetGroup": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup" + }, + "AlbListener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "Port": 80, + "Protocol": "HTTP", + "DefaultActions": [ + { + "Type": "forward" + } + ], + "LoadBalancerArn": "my-lb" + } + } + } +} diff --git a/packages/@aws-cdk/cloudformation-include/test/test-templates/invalid/short-form-get-att-no-dot.yaml b/packages/@aws-cdk/cloudformation-include/test/test-templates/invalid/short-form-get-att-no-dot.yaml new file mode 100644 index 0000000000000..d91ae5a5cbcf2 --- /dev/null +++ b/packages/@aws-cdk/cloudformation-include/test/test-templates/invalid/short-form-get-att-no-dot.yaml @@ -0,0 +1,7 @@ +Resources: + Bucket1: + Type: AWS::S3::Bucket + Bucket2: + Type: AWS::S3::Bucket + Metadata: + Bucket1Name: !GetAtt Bucket1Arn diff --git a/packages/@aws-cdk/cloudformation-include/test/test-templates/yaml/short-form-get-att.yaml b/packages/@aws-cdk/cloudformation-include/test/test-templates/yaml/short-form-get-att.yaml index ede387067361a..146f04045d380 100644 --- a/packages/@aws-cdk/cloudformation-include/test/test-templates/yaml/short-form-get-att.yaml +++ b/packages/@aws-cdk/cloudformation-include/test/test-templates/yaml/short-form-get-att.yaml @@ -16,9 +16,9 @@ Resources: Type: AWS::S3::Bucket Properties: BucketName: !GetAtt Bucket0.Arn - AccessControl: !GetAtt [ ELB, SourceSecurityGroup.GroupName ] + AccessControl: !GetAtt [ELB, SourceSecurityGroup.GroupName] Bucket2: Type: AWS::S3::Bucket Properties: BucketName: !GetAtt [ Bucket1, Arn ] - AccessControl: !GetAtt ELB.SourceSecurityGroup.GroupName + AccessControl: !GetAtt 'ELB.SourceSecurityGroup.GroupName' diff --git a/packages/@aws-cdk/cloudformation-include/test/valid-templates.test.ts b/packages/@aws-cdk/cloudformation-include/test/valid-templates.test.ts index 35127627410f5..9e9d41284d8b7 100644 --- a/packages/@aws-cdk/cloudformation-include/test/valid-templates.test.ts +++ b/packages/@aws-cdk/cloudformation-include/test/valid-templates.test.ts @@ -114,7 +114,7 @@ describe('CDK Include', () => { ); }); - xtest('correctly changes the logical IDs, including references, if imported with preserveLogicalIds=false', () => { + test('correctly changes the logical IDs, including references, if imported with preserveLogicalIds=false', () => { const cfnTemplate = includeTestTemplate(stack, 'bucket-with-encryption-key.json', { preserveLogicalIds: false, }); @@ -177,6 +177,11 @@ describe('CDK Include', () => { ], }, }, + "Metadata": { + "Object1": "Location1", + "KeyRef": { "Ref": "MyScopeKey7673692F" }, + "KeyArn": { "Fn::GetAtt": ["MyScopeKey7673692F", "Arn"] }, + }, "DeletionPolicy": "Retain", "UpdateReplacePolicy": "Retain", }, @@ -781,6 +786,24 @@ describe('CDK Include', () => { }).toThrow(/Rule with name 'DoesNotExist' was not found in the template/); }); + test('can ingest a template that contains Hooks, and allows retrieving those Hooks', () => { + const cfnTemplate = includeTestTemplate(stack, 'hook-code-deploy-blue-green-ecs.json'); + const hook = cfnTemplate.getHook('EcsBlueGreenCodeDeployHook'); + + expect(hook).toBeDefined(); + expect(stack).toMatchTemplate( + loadTestFileToJsObject('hook-code-deploy-blue-green-ecs.json'), + ); + }); + + test("throws an exception when attempting to retrieve a Hook that doesn't exist in the template", () => { + const cfnTemplate = includeTestTemplate(stack, 'hook-code-deploy-blue-green-ecs.json'); + + expect(() => { + cfnTemplate.getHook('DoesNotExist'); + }).toThrow(/Hook with logical ID 'DoesNotExist' was not found in the template/); + }); + test('replaces references to parameters with the user-specified values in Resources, Conditions, Metadata, and Options sections', () => { includeTestTemplate(stack, 'parameter-references.json', { parameters: { @@ -828,6 +851,31 @@ describe('CDK Include', () => { }); }); + test('replaces parameters with falsey values in Ref expressions', () => { + includeTestTemplate(stack, 'resource-attribute-creation-policy.json', { + parameters: { + 'CountParameter': 0, + }, + }); + + expect(stack).toMatchTemplate({ + "Resources": { + "Bucket": { + "Type": "AWS::S3::Bucket", + "CreationPolicy": { + "AutoScalingCreationPolicy": { + "MinSuccessfulInstancesPercent": 50, + }, + "ResourceSignal": { + "Count": 0, + "Timeout": "PT5H4M3S", + }, + }, + }, + }, + }); + }); + test('replaces parameters in Fn::Sub expressions', () => { includeTestTemplate(stack, 'fn-sub-parameters.json', { parameters: { @@ -875,6 +923,25 @@ describe('CDK Include', () => { }); }); + test('replaces parameters with falsey values in Fn::Sub expressions', () => { + includeTestTemplate(stack, 'fn-sub-parameters.json', { + parameters: { + 'MyParam': '', + }, + }); + + expect(stack).toMatchTemplate({ + "Resources": { + "Bucket": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketName": { "Fn::Sub": "" }, + }, + }, + }, + }); + }); + test('throws an exception when parameters are passed a resource name', () => { expect(() => { includeTestTemplate(stack, 'bucket-with-parameters.json', { @@ -918,7 +985,7 @@ function includeTestTemplate(scope: core.Construct, testTemplate: string, props: return new inc.CfnInclude(scope, 'MyScope', { templateFile: _testTemplateFilePath(testTemplate), parameters: props.parameters, - // preserveLogicalIds: props.preserveLogicalIds, + preserveLogicalIds: props.preserveLogicalIds, }); } diff --git a/packages/@aws-cdk/cloudformation-include/test/yaml-templates.test.ts b/packages/@aws-cdk/cloudformation-include/test/yaml-templates.test.ts index 99339a064a9e1..84f3fb43ab4bc 100644 --- a/packages/@aws-cdk/cloudformation-include/test/yaml-templates.test.ts +++ b/packages/@aws-cdk/cloudformation-include/test/yaml-templates.test.ts @@ -254,8 +254,8 @@ describe('CDK Include', () => { }); }); - // Note that this yaml template fails validation. It is unclear how to invoke !Transform. test('can ingest a template with the short form !Transform function', () => { + // Note that this yaml template fails validation. It is unclear how to invoke !Transform. includeTestTemplate(stack, 'invalid/short-form-transform.yaml'); expect(stack).toMatchTemplate({ diff --git a/packages/@aws-cdk/core/lib/cfn-codedeploy-blue-green-hook.ts b/packages/@aws-cdk/core/lib/cfn-codedeploy-blue-green-hook.ts new file mode 100644 index 0000000000000..67f1b1b489fc5 --- /dev/null +++ b/packages/@aws-cdk/core/lib/cfn-codedeploy-blue-green-hook.ts @@ -0,0 +1,513 @@ +import { CfnHook } from './cfn-hook'; +import { FromCloudFormationOptions } from './cfn-parse'; +import { CfnResource } from './cfn-resource'; +import { Construct } from './construct-compat'; + +/** + * The possible types of traffic shifting for the blue-green deployment configuration. + * The type of the {@link CfnTrafficRoutingConfig.type} property. + */ +export enum CfnTrafficRoutingType { + /** + * Switch from blue to green at once. + */ + ALL_AT_ONCE = 'AllAtOnce', + + /** + * Specifies a configuration that shifts traffic from blue to green in two increments. + */ + TIME_BASED_CANARY = 'TimeBasedCanary', + + /** + * Specifies a configuration that shifts traffic from blue to green in equal increments, + * with an equal number of minutes between each increment. + */ + TIME_BASED_LINEAR = 'TimeBasedLinear', +} + +/** + * The traffic routing configuration if {@link CfnTrafficRoutingConfig.type} + * is {@link CfnTrafficRoutingType.TIME_BASED_CANARY}. + */ +export interface CfnTrafficRoutingTimeBasedCanary { + /** + * The percentage of traffic to shift in the first increment of a time-based canary deployment. + * The step percentage must be 14% or greater. + * + * @default 15 + */ + readonly stepPercentage?: number; + + /** + * The number of minutes between the first and second traffic shifts of a time-based canary deployment. + * + * @default 5 + */ + readonly bakeTimeMins?: number; +} + +/** + * The traffic routing configuration if {@link CfnTrafficRoutingConfig.type} + * is {@link CfnTrafficRoutingType.TIME_BASED_LINEAR}. + */ +export interface CfnTrafficRoutingTimeBasedLinear { + /** + * The percentage of traffic that is shifted at the start of each increment of a time-based linear deployment. + * The step percentage must be 14% or greater. + * + * @default 15 + */ + readonly stepPercentage?: number; + + /** + * The number of minutes between the first and second traffic shifts of a time-based linear deployment. + * + * @default 5 + */ + readonly bakeTimeMins?: number; +} + +/** + * Traffic routing configuration settings. + * The type of the {@link CfnCodeDeployBlueGreenHookProps.trafficRoutingConfig} property. + */ +export interface CfnTrafficRoutingConfig { + /** + * The type of traffic shifting used by the blue-green deployment configuration. + */ + readonly type: CfnTrafficRoutingType; + + /** + * The configuration for traffic routing when {@link type} is + * {@link CfnTrafficRoutingType.TIME_BASED_CANARY}. + * + * @default - none + */ + readonly timeBasedCanary?: CfnTrafficRoutingTimeBasedCanary; + + /** + * The configuration for traffic routing when {@link type} is + * {@link CfnTrafficRoutingType.TIME_BASED_LINEAR}. + * + * @default - none + */ + readonly timeBasedLinear?: CfnTrafficRoutingTimeBasedLinear; +} + +/** + * Additional options for the blue/green deployment. + * The type of the {@link CfnCodeDeployBlueGreenHookProps.additionalOptions} property. + */ +export interface CfnCodeDeployBlueGreenAdditionalOptions { + /** + * Specifies time to wait, in minutes, before terminating the blue resources. + * + * @default - 5 minutes + */ + readonly terminationWaitTimeInMinutes?: number; +} + +/** + * Lifecycle events for blue-green deployments. + * The type of the {@link CfnCodeDeployBlueGreenHookProps.lifecycleEventHooks} property. + */ +export interface CfnCodeDeployBlueGreenLifecycleEventHooks { + /** + * Function to use to run tasks before the replacement task set is created. + * + * @default - none + */ + readonly beforeInstall?: string; + + /** + * Function to use to run tasks after the replacement task set is created and one of the target groups is associated with it. + * + * @default - none + */ + readonly afterInstall?: string; + + /** + * Function to use to run tasks after the test listener serves traffic to the replacement task set. + * + * @default - none + */ + readonly afterAllowTestTraffic?: string; + + /** + * Function to use to run tasks after the second target group is associated with the replacement task set, + * but before traffic is shifted to the replacement task set. + * + * @default - none + */ + readonly beforeAllowTraffic?: string; + + /** + * Function to use to run tasks after the second target group serves traffic to the replacement task set. + * + * @default - none + */ + readonly afterAllowTraffic?: string; +} + +/** + * Type of the {@link CfnCodeDeployBlueGreenApplication.target} property. + */ +export interface CfnCodeDeployBlueGreenApplicationTarget { + /** + * The resource type of the target being deployed. + * Right now, the only allowed value is 'AWS::ECS::Service'. + */ + readonly type: string; + + /** + * The logical id of the target resource. + */ + readonly logicalId: string; +} + +/** + * A traffic route, + * representing where the traffic is being directed to. + */ +export interface CfnTrafficRoute { + /** + * The resource type of the route. + * Today, the only allowed value is 'AWS::ElasticLoadBalancingV2::Listener'. + */ + readonly type: string; + + /** + * The logical id of the target resource. + */ + readonly logicalId: string; +} + +/** + * Type of the {@link CfnCodeDeployBlueGreenEcsAttributes.trafficRouting} property. + */ +export interface CfnTrafficRouting { + /** + * The listener to be used by your load balancer to direct traffic to your target groups. + */ + readonly prodTrafficRoute: CfnTrafficRoute; + + /** + * The listener to be used by your load balancer to direct traffic to your target groups. + */ + readonly testTrafficRoute: CfnTrafficRoute; + + /** + * The logical IDs of the blue and green, respectively, + * AWS::ElasticLoadBalancingV2::TargetGroup target groups. + */ + readonly targetGroups: string[]; +} + +/** + * The attributes of the ECS Service being deployed. + * Type of the {@link CfnCodeDeployBlueGreenApplication.ecsAttributes} property. + */ +export interface CfnCodeDeployBlueGreenEcsAttributes { + /** + * The logical IDs of the blue and green, respectively, + * AWS::ECS::TaskDefinition task definitions. + */ + readonly taskDefinitions: string[]; + + /** + * The logical IDs of the blue and green, respectively, + * AWS::ECS::TaskSet task sets. + */ + readonly taskSets: string[]; + + /** + * The traffic routing configuration. + */ + readonly trafficRouting: CfnTrafficRouting; +} + +/** + * The application actually being deployed. + * Type of the {@link CfnCodeDeployBlueGreenHookProps.applications} property. + */ +export interface CfnCodeDeployBlueGreenApplication { + /** + * The target that is being deployed. + */ + readonly target: CfnCodeDeployBlueGreenApplicationTarget; + + /** + * The detailed attributes of the deployed target. + */ + readonly ecsAttributes: CfnCodeDeployBlueGreenEcsAttributes; +} + +/** + * Construction properties of {@link CfnCodeDeployBlueGreenHook}. + */ +export interface CfnCodeDeployBlueGreenHookProps { + /** + * The IAM Role for CloudFormation to use to perform blue-green deployments. + */ + readonly serviceRole: string; + + /** + * Properties of the Amazon ECS applications being deployed. + */ + readonly applications: CfnCodeDeployBlueGreenApplication[]; + + /** + * Traffic routing configuration settings. + * + * @default - time-based canary traffic shifting, with a 15% step percentage and a five minute bake time + */ + readonly trafficRoutingConfig?: CfnTrafficRoutingConfig; + + /** + * Additional options for the blue/green deployment. + * + * @default - no additional options + */ + readonly additionalOptions?: CfnCodeDeployBlueGreenAdditionalOptions; + + /** + * Use lifecycle event hooks to specify a Lambda function that CodeDeploy can call to validate a deployment. + * You can use the same function or a different one for deployment lifecycle events. + * Following completion of the validation tests, + * the Lambda {@link CfnCodeDeployBlueGreenLifecycleEventHooks.afterAllowTraffic} + * function calls back CodeDeploy and delivers a result of 'Succeeded' or 'Failed'. + * + * @default - no lifecycle event hooks + */ + readonly lifecycleEventHooks?: CfnCodeDeployBlueGreenLifecycleEventHooks; +} + +/** + * A CloudFormation Hook for CodeDeploy blue-green ECS deployments. + * + * @see https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/blue-green.html#blue-green-template-reference + */ +export class CfnCodeDeployBlueGreenHook extends CfnHook { + /** + * A factory method that creates a new instance of this class from an object + * containing the CloudFormation properties of this resource. + * Used in the @aws-cdk/cloudformation-include module. + * + * @internal + */ + public static _fromCloudFormation(scope: Construct, id: string, hookAttributes: any, + options: FromCloudFormationOptions): CfnCodeDeployBlueGreenHook { + + hookAttributes = hookAttributes || {}; + const hookProperties = options.parser.parseValue(hookAttributes.Properties); + return new CfnCodeDeployBlueGreenHook(scope, id, { + serviceRole: hookProperties?.ServiceRole, + applications: hookProperties?.Applications?.map(applicationFromCloudFormation), + trafficRoutingConfig: { + type: hookProperties?.TrafficRoutingConfig?.Type, + timeBasedCanary: { + stepPercentage: hookProperties?.TrafficRoutingConfig?.TimeBasedCanary?.StepPercentage, + bakeTimeMins: hookProperties?.TrafficRoutingConfig?.TimeBasedCanary?.BakeTimeMins, + }, + timeBasedLinear: { + stepPercentage: hookProperties?.TrafficRoutingConfig?.TimeBasedLinear?.StepPercentage, + bakeTimeMins: hookProperties?.TrafficRoutingConfig?.TimeBasedLinear?.BakeTimeMins, + }, + }, + additionalOptions: { + terminationWaitTimeInMinutes: hookProperties?.AdditionalOptions?.TerminationWaitTimeInMinutes, + }, + lifecycleEventHooks: { + beforeInstall: hookProperties?.LifecycleEventHooks?.BeforeInstall, + afterInstall: hookProperties?.LifecycleEventHooks?.AfterInstall, + afterAllowTestTraffic: hookProperties?.LifecycleEventHooks?.AfterAllowTestTraffic, + beforeAllowTraffic: hookProperties?.LifecycleEventHooks?.BeforeAllowTraffic, + afterAllowTraffic: hookProperties?.LifecycleEventHooks?.AfterAllowTraffic, + }, + }); + + function applicationFromCloudFormation(app: any) { + const target = findResource(app?.Target?.LogicalID); + const taskDefinitions: Array | undefined = app?.ECSAttributes?.TaskDefinitions?.map( + (td: any) => findResource(td)); + const taskSets: Array | undefined = app?.ECSAttributes?.TaskSets?.map( + (ts: any) => findResource(ts)); + const prodTrafficRoute = findResource(app?.ECSAttributes?.TrafficRouting?.ProdTrafficRoute?.LogicalID); + const testTrafficRoute = findResource(app?.ECSAttributes?.TrafficRouting?.TestTrafficRoute?.LogicalID); + const targetGroups: Array | undefined = app?.ECSAttributes?.TrafficRouting?.TargetGroups?.map( + (tg: any) => findResource(tg)); + + return { + target: { + type: app?.Target?.Type, + logicalId: target?.logicalId, + }, + ecsAttributes: { + taskDefinitions: taskDefinitions?.map(td => td?.logicalId), + taskSets: taskSets?.map(ts => ts?.logicalId), + trafficRouting: { + prodTrafficRoute: { + type: app?.ECSAttributes?.TrafficRouting?.ProdTrafficRoute?.Type, + logicalId: prodTrafficRoute?.logicalId, + }, + testTrafficRoute: { + type: app?.ECSAttributes?.TrafficRouting?.TestTrafficRoute?.Type, + logicalId: testTrafficRoute?.logicalId, + }, + targetGroups: targetGroups?.map((tg) => tg?.logicalId), + }, + }, + }; + } + + function findResource(logicalId: string | undefined): CfnResource | undefined { + if (logicalId == null) { + return undefined; + } + const ret = options.parser.finder.findResource(logicalId); + if (!ret) { + throw new Error(`Hook '${id}' references resource '${logicalId}' that was not found in the template`); + } + return ret; + } + } + + private _serviceRole: string; + private _applications: CfnCodeDeployBlueGreenApplication[]; + private _trafficRoutingConfig?: CfnTrafficRoutingConfig; + private _additionalOptions?: CfnCodeDeployBlueGreenAdditionalOptions; + private _lifecycleEventHooks?: CfnCodeDeployBlueGreenLifecycleEventHooks; + + /** + * Creates a new CodeDeploy blue-green ECS Hook. + * + * @param scope the scope to create the hook in (usually the containing Stack object) + * @param id the identifier of the construct - will be used to generate the logical ID of the Hook + * @param props the properties of the Hook + */ + constructor(scope: Construct, id: string, props: CfnCodeDeployBlueGreenHookProps) { + super(scope, id, { + type: 'AWS::CodeDeploy::BlueGreen', + // we render the properties ourselves + }); + + this._serviceRole = props.serviceRole; + this._applications = props.applications; + this._trafficRoutingConfig = props.trafficRoutingConfig; + this._additionalOptions = props.additionalOptions; + this._lifecycleEventHooks = props.lifecycleEventHooks; + } + + /** + * The IAM Role for CloudFormation to use to perform blue-green deployments. + */ + public get serviceRole(): string { + return this._serviceRole; + } + + public set serviceRole(serviceRole: string) { + this._serviceRole = serviceRole; + } + + /** + * Properties of the Amazon ECS applications being deployed. + */ + public get applications(): CfnCodeDeployBlueGreenApplication[] { + return this._applications; + } + + public set applications(value: CfnCodeDeployBlueGreenApplication[]) { + this._applications = value; + } + + /** + * Traffic routing configuration settings. + * + * @default - time-based canary traffic shifting, with a 15% step percentage and a five minute bake time + */ + public get trafficRoutingConfig(): CfnTrafficRoutingConfig | undefined { + return this._trafficRoutingConfig; + } + + public set trafficRoutingConfig(value: CfnTrafficRoutingConfig | undefined) { + this._trafficRoutingConfig = value; + } + + /** + * Additional options for the blue/green deployment. + * + * @default - no additional options + */ + public get additionalOptions(): CfnCodeDeployBlueGreenAdditionalOptions | undefined { + return this._additionalOptions; + } + + public set additionalOptions(value: CfnCodeDeployBlueGreenAdditionalOptions | undefined) { + this._additionalOptions = value; + } + + /** + * Use lifecycle event hooks to specify a Lambda function that CodeDeploy can call to validate a deployment. + * You can use the same function or a different one for deployment lifecycle events. + * Following completion of the validation tests, + * the Lambda {@link CfnCodeDeployBlueGreenLifecycleEventHooks.afterAllowTraffic} + * function calls back CodeDeploy and delivers a result of 'Succeeded' or 'Failed'. + * + * @default - no lifecycle event hooks + */ + public get lifecycleEventHooks(): CfnCodeDeployBlueGreenLifecycleEventHooks | undefined { + return this._lifecycleEventHooks; + } + + public set lifecycleEventHooks(value: CfnCodeDeployBlueGreenLifecycleEventHooks | undefined) { + this._lifecycleEventHooks = value; + } + + protected renderProperties(_props?: { [p: string]: any }): { [p: string]: any } | undefined { + return { + ServiceRole: this.serviceRole, + Applications: this.applications.map((app) => ({ + Target: { + Type: app.target.type, + LogicalID: app.target.logicalId, + }, + ECSAttributes: { + TaskDefinitions: app.ecsAttributes.taskDefinitions, + TaskSets: app.ecsAttributes.taskSets, + TrafficRouting: { + ProdTrafficRoute: { + Type: app.ecsAttributes.trafficRouting.prodTrafficRoute.type, + LogicalID: app.ecsAttributes.trafficRouting.prodTrafficRoute.logicalId, + }, + TestTrafficRoute: { + Type: app.ecsAttributes.trafficRouting.testTrafficRoute.type, + LogicalID: app.ecsAttributes.trafficRouting.testTrafficRoute.logicalId, + }, + TargetGroups: app.ecsAttributes.trafficRouting.targetGroups, + }, + }, + })), + TrafficRoutingConfig: { + Type: this.trafficRoutingConfig?.type, + TimeBasedCanary: { + StepPercentage: this.trafficRoutingConfig?.timeBasedCanary?.stepPercentage, + BakeTimeMins: this.trafficRoutingConfig?.timeBasedCanary?.bakeTimeMins, + }, + TimeBasedLinear: { + StepPercentage: this.trafficRoutingConfig?.timeBasedLinear?.stepPercentage, + BakeTimeMins: this.trafficRoutingConfig?.timeBasedLinear?.bakeTimeMins, + }, + }, + AdditionalOptions: { + TerminationWaitTimeInMinutes: this.additionalOptions?.terminationWaitTimeInMinutes, + }, + LifecycleEventHooks: { + BeforeInstall: this.lifecycleEventHooks?.beforeInstall, + AfterInstall: this.lifecycleEventHooks?.afterInstall, + AfterAllowTestTraffic: this.lifecycleEventHooks?.afterAllowTestTraffic, + BeforeAllowTraffic: this.lifecycleEventHooks?.beforeAllowTraffic, + AfterAllowTraffic: this.lifecycleEventHooks?.afterAllowTraffic, + }, + }; + } +} diff --git a/packages/@aws-cdk/core/lib/cfn-hook.ts b/packages/@aws-cdk/core/lib/cfn-hook.ts new file mode 100644 index 0000000000000..8e83ae4e8da28 --- /dev/null +++ b/packages/@aws-cdk/core/lib/cfn-hook.ts @@ -0,0 +1,60 @@ +import { CfnElement } from './cfn-element'; +import { Construct } from './construct-compat'; +import { ignoreEmpty } from './util'; + +/** + * Construction properties of {@link CfnHook}. + */ +export interface CfnHookProps { + /** + * The type of the hook + * (for example, "AWS::CodeDeploy::BlueGreen"). + */ + readonly type: string; + + /** + * The properties of the hook. + * + * @default - no properties + */ + readonly properties?: { [name: string]: any }; +} + +/** + * Represents a CloudFormation resource. + */ +export class CfnHook extends CfnElement { + /** + * The type of the hook + * (for example, "AWS::CodeDeploy::BlueGreen"). + */ + public readonly type: string; + + private readonly _cfnHookProperties?: { [name: string]: any }; + + /** + * Creates a new Hook object. + */ + constructor(scope: Construct, id: string, props: CfnHookProps) { + super(scope, id); + + this.type = props.type; + this._cfnHookProperties = props.properties; + } + + /** @internal */ + public _toCloudFormation(): object { + return { + Hooks: { + [this.logicalId]: { + Type: this.type, + Properties: ignoreEmpty(this.renderProperties(this._cfnHookProperties)), + }, + }, + }; + } + + protected renderProperties(props?: {[key: string]: any}): { [key: string]: any } | undefined { + return props; + } +} diff --git a/packages/@aws-cdk/core/lib/cfn-parse.ts b/packages/@aws-cdk/core/lib/cfn-parse.ts index 4ae2eb69746ba..742880f615e1b 100644 --- a/packages/@aws-cdk/core/lib/cfn-parse.ts +++ b/packages/@aws-cdk/core/lib/cfn-parse.ts @@ -268,7 +268,6 @@ export class CfnParser { } public handleAttributes(resource: CfnResource, resourceAttributes: any, logicalId: string): void { - const finder = this.options.finder; const cfnOptions = resource.cfnOptions; cfnOptions.creationPolicy = this.parseCreationPolicy(resourceAttributes.CreationPolicy); @@ -279,7 +278,7 @@ export class CfnParser { // handle Condition if (resourceAttributes.Condition) { - const condition = finder.findCondition(resourceAttributes.Condition); + const condition = this.finder.findCondition(resourceAttributes.Condition); if (!condition) { throw new Error(`Resource '${logicalId}' uses Condition '${resourceAttributes.Condition}' that doesn't exist`); } @@ -291,7 +290,7 @@ export class CfnParser { const dependencies: string[] = Array.isArray(resourceAttributes.DependsOn) ? resourceAttributes.DependsOn : [resourceAttributes.DependsOn]; for (const dep of dependencies) { - const depResource = finder.findResource(dep); + const depResource = this.finder.findResource(dep); if (!depResource) { throw new Error(`Resource '${logicalId}' depends on '${dep}' that doesn't exist`); } @@ -411,7 +410,7 @@ export class CfnParser { if (typeof cfnValue === 'object') { // an object can be either a CFN intrinsic, or an actual object const cfnIntrinsic = this.parseIfCfnIntrinsic(cfnValue); - if (cfnIntrinsic) { + if (cfnIntrinsic !== undefined) { return cfnIntrinsic; } const ret: any = {}; @@ -424,6 +423,10 @@ export class CfnParser { return cfnValue; } + public get finder(): ICfnFinder { + return this.options.finder; + } + private parseIfCfnIntrinsic(object: any): any { const key = this.looksLikeCfnIntrinsic(object); switch (key) { @@ -432,10 +435,10 @@ export class CfnParser { case 'Ref': { const refTarget = object[key]; const specialRef = this.specialCaseRefs(refTarget); - if (specialRef) { + if (specialRef !== undefined) { return specialRef; } else { - const refElement = this.options.finder.findRefTarget(refTarget); + const refElement = this.finder.findRefTarget(refTarget); if (!refElement) { throw new Error(`Element used in Ref expression with logical ID: '${refTarget}' not found`); } @@ -445,7 +448,7 @@ export class CfnParser { case 'Fn::GetAtt': { // Fn::GetAtt takes a 2-element list as its argument const value = object[key]; - const target = this.options.finder.findResource(value[0]); + const target = this.finder.findResource(value[0]); if (!target) { throw new Error(`Resource used in GetAtt expression with logical ID: '${value[0]}' not found`); } @@ -469,7 +472,7 @@ export class CfnParser { case 'Fn::FindInMap': { const value = this.parseValue(object[key]); // the first argument to FindInMap is the mapping name - const mapping = this.options.finder.findMapping(value[0]); + const mapping = this.finder.findMapping(value[0]); if (!mapping) { throw new Error(`Mapping used in FindInMap expression with name '${value[0]}' was not found in the template`); } @@ -503,7 +506,7 @@ export class CfnParser { // Fn::If takes a 3-element list as its argument, // where the first element is the name of a Condition const value = this.parseValue(object[key]); - const condition = this.options.finder.findCondition(value[0]); + const condition = this.finder.findCondition(value[0]); if (!condition) { throw new Error(`Condition '${value[0]}' used in an Fn::If expression does not exist in the template`); } @@ -541,7 +544,7 @@ export class CfnParser { } case 'Condition': { // a reference to a Condition from another Condition - const condition = this.options.finder.findCondition(object[key]); + const condition = this.finder.findCondition(object[key]); if (!condition) { throw new Error(`Referenced Condition with name '${object[key]}' was not found in the template`); } @@ -593,21 +596,21 @@ export class CfnParser { // since it's not in the map, check if it's a pseudo parameter const specialRef = this.specialCaseSubRefs(refTarget); - if (specialRef) { + if (specialRef !== undefined) { return leftHalf + specialRef + this.parseFnSubString(rightHalf, map); } const dotIndex = refTarget.indexOf('.'); const isRef = dotIndex === -1; if (isRef) { - const refElement = this.options.finder.findRefTarget(refTarget); + const refElement = this.finder.findRefTarget(refTarget); if (!refElement) { throw new Error(`Element referenced in Fn::Sub expression with logical ID: '${refTarget}' was not found in the template`); } return leftHalf + CfnReference.for(refElement, 'Ref', true).toString() + this.parseFnSubString(rightHalf, map); } else { const targetId = refTarget.substring(0, dotIndex); - const refResource = this.options.finder.findResource(targetId); + const refResource = this.finder.findResource(targetId); if (!refResource) { throw new Error(`Resource referenced in Fn::Sub expression with logical ID: '${targetId}' was not found in the template`); } @@ -630,7 +633,7 @@ export class CfnParser { // fail here - this substitution is not allowed throw new Error(`Cannot substitute parameter '${parameterName}' used in Fn::ValueOf expression with attribute '${value[1]}'`); } - const param = this.options.finder.findRefTarget(parameterName); + const param = this.finder.findRefTarget(parameterName); if (!param) { throw new Error(`Rule references parameter '${parameterName}' which was not found in the template`); } diff --git a/packages/@aws-cdk/core/lib/expiration.ts b/packages/@aws-cdk/core/lib/expiration.ts new file mode 100644 index 0000000000000..4c0875923a99e --- /dev/null +++ b/packages/@aws-cdk/core/lib/expiration.ts @@ -0,0 +1,63 @@ +import { Duration } from './duration'; +/** + * Represents a date of expiration. + * + * The amount can be specified either as a Date object, timestamp, Duration or string. + */ +export class Expiration { + /** + * Expire at the specified date + * @param d date to expire at + */ + public static atDate(d: Date) { return new Expiration(d); } + + /** + * Expire at the specified timestamp + * @param t timestamp in unix milliseconds + */ + public static atTimestamp(t: number) { return Expiration.atDate(new Date(t)); } + + /** + * Expire once the specified duration has passed since deployment time + * @param t the duration to wait before expiring + */ + public static after(t: Duration) { return Expiration.atDate(new Date(Date.now() + t.toMilliseconds())); } + + /** + * Expire at specified date, represented as a string + * + * @param s the string that represents date to expire at + */ + public static fromString(s: string) { return new Expiration(new Date(s)); } + + /** + * Expiration value as a Date object + */ + public readonly date: Date; + + private constructor(date: Date) { + this.date = date; + } + + /** + * Exipration Value in a formatted Unix Epoch Time in seconds + */ + public toEpoch(): number { + return Math.round(this.date.getTime() / 1000); + } + /** + * Check if Exipiration expires before input + * @param t the duration to check against + */ + public isBefore(t: Duration): boolean { + return this.date < new Date(Date.now() + t.toMilliseconds()); + } + + /** + * Check if Exipiration expires after input + * @param t the duration to check against + */ + public isAfter( t: Duration ): boolean { + return this.date > new Date(Date.now() + t.toMilliseconds()); + } +} diff --git a/packages/@aws-cdk/core/lib/feature-flags.ts b/packages/@aws-cdk/core/lib/feature-flags.ts new file mode 100644 index 0000000000000..924283af30fcc --- /dev/null +++ b/packages/@aws-cdk/core/lib/feature-flags.ts @@ -0,0 +1,29 @@ +import * as cxapi from '@aws-cdk/cx-api'; +import { Construct } from '../lib/construct-compat'; + +/** + * Features that are implemented behind a flag in order to preserve backwards + * compatibility for existing apps. The list of flags are available in the + * `@aws-cdk/cx-api` module. + * + * The state of the flag for this application is stored as a CDK context variable. + */ +export class FeatureFlags { + /** + * Inspect feature flags on the construct node's context. + */ + public static of(scope: Construct) { + return new FeatureFlags(scope); + } + + private constructor(private readonly construct: Construct) {} + + /** + * Check whether a feature flag is enabled. If configured, the flag is present in + * the construct node context. Falls back to the defaults defined in the `cx-api` + * module. + */ + public isEnabled(featureFlag: string): boolean | undefined { + return this.construct.node.tryGetContext(featureFlag) ?? cxapi.futureFlagDefault(featureFlag); + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/core/lib/index.ts b/packages/@aws-cdk/core/lib/index.ts index 2ea4c92f79db4..d63f847fe2687 100644 --- a/packages/@aws-cdk/core/lib/index.ts +++ b/packages/@aws-cdk/core/lib/index.ts @@ -13,6 +13,8 @@ export * from './stack-synthesizers'; export * from './reference'; export * from './cfn-condition'; export * from './cfn-fn'; +export * from './cfn-hook'; +export * from './cfn-codedeploy-blue-green-hook'; export * from './cfn-include'; export * from './cfn-mapping'; export * from './cfn-output'; @@ -30,6 +32,7 @@ export * from './cfn-json'; export * from './removal-policy'; export * from './arn'; export * from './duration'; +export * from './expiration'; export * from './size'; export * from './stack-trace'; @@ -58,6 +61,8 @@ export * from './custom-resource-provider'; export * from './cfn-capabilities'; export * from './cloudformation.generated'; +export * from './feature-flags'; + // WARNING: Should not be exported, but currently is because of a bug. See the // class description for more information. export * from './private/intrinsic'; diff --git a/packages/@aws-cdk/core/lib/private/refs.ts b/packages/@aws-cdk/core/lib/private/refs.ts index 6521df69ed585..0fdc5e1bed40f 100644 --- a/packages/@aws-cdk/core/lib/private/refs.ts +++ b/packages/@aws-cdk/core/lib/private/refs.ts @@ -7,6 +7,7 @@ import { CfnElement } from '../cfn-element'; import { CfnOutput } from '../cfn-output'; import { CfnParameter } from '../cfn-parameter'; import { Construct, IConstruct } from '../construct-compat'; +import { FeatureFlags } from '../feature-flags'; import { Reference } from '../reference'; import { IResolvable } from '../resolvable'; import { Stack } from '../stack'; @@ -201,7 +202,7 @@ function getCreateExportsScope(stack: Stack) { } function generateExportName(stackExports: Construct, id: string) { - const stackRelativeExports = stackExports.node.tryGetContext(cxapi.STACK_RELATIVE_EXPORTS_CONTEXT); + const stackRelativeExports = FeatureFlags.of(stackExports).isEnabled(cxapi.STACK_RELATIVE_EXPORTS_CONTEXT); const stack = Stack.of(stackExports); const components = [ diff --git a/packages/@aws-cdk/core/lib/private/synthesis.ts b/packages/@aws-cdk/core/lib/private/synthesis.ts index b4d2368ae6c99..c67fa14d5b75e 100644 --- a/packages/@aws-cdk/core/lib/private/synthesis.ts +++ b/packages/@aws-cdk/core/lib/private/synthesis.ts @@ -123,7 +123,7 @@ function synthesizeTree(root: IConstruct, builder: cxapi.CloudAssemblyBuilder) { }; if (construct instanceof Stack) { - construct._synthesizeTemplate(session); + construct.synthesizer.synthesize(session); } else if (construct instanceof TreeMetadata) { construct._synthesizeTree(session); } diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/bootstrapless-synthesizer.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/bootstrapless-synthesizer.ts index a1149f91e4990..16ea69c1b2302 100644 --- a/packages/@aws-cdk/core/lib/stack-synthesizers/bootstrapless-synthesizer.ts +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/bootstrapless-synthesizer.ts @@ -1,6 +1,6 @@ import { DockerImageAssetLocation, DockerImageAssetSource, FileAssetLocation, FileAssetSource } from '../assets'; import { ISynthesisSession } from '../construct-compat'; -import { addStackArtifactToAssembly, assertBound } from './_shared'; +import { assertBound } from './_shared'; import { DefaultStackSynthesizer } from './default-synthesizer'; /** @@ -35,6 +35,7 @@ export class BootstraplessSynthesizer extends DefaultStackSynthesizer { super({ deployRoleArn: props.deployRoleArn, cloudFormationExecutionRole: props.cloudFormationExecutionRoleArn, + generateBootstrapVersionRule: false, }); } @@ -46,15 +47,17 @@ export class BootstraplessSynthesizer extends DefaultStackSynthesizer { throw new Error('Cannot add assets to a Stack that uses the BootstraplessSynthesizer'); } - public synthesizeStackArtifacts(session: ISynthesisSession): void { + public synthesize(session: ISynthesisSession): void { assertBound(this.stack); + this.synthesizeStackTemplate(this.stack, session); + // do _not_ treat the template as an asset, // because this synthesizer doesn't have a bootstrap bucket to put it in - addStackArtifactToAssembly(session, this.stack, { + this.emitStackArtifact(this.stack, session, { assumeRoleArn: this.deployRoleArn, cloudFormationExecutionRoleArn: this.cloudFormationExecutionRoleArn, requiresBootstrapStackVersion: 1, - }, []); + }); } } diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts index ad5ea2512f049..15f2d96e3b8cd 100644 --- a/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts @@ -9,8 +9,8 @@ import { CfnRule } from '../cfn-rule'; import { ISynthesisSession } from '../construct-compat'; import { Stack } from '../stack'; import { Token } from '../token'; -import { addStackArtifactToAssembly, assertBound, contentHash } from './_shared'; -import { IStackSynthesizer } from './types'; +import { assertBound, contentHash } from './_shared'; +import { StackSynthesizer } from './stack-synthesizer'; export const BOOTSTRAP_QUALIFIER_CONTEXT = '@aws-cdk/core:bootstrapQualifier'; @@ -140,6 +140,16 @@ export interface DefaultStackSynthesizerProps { * @default - Value of context key '@aws-cdk/core:bootstrapQualifier' if set, otherwise `DefaultStackSynthesizer.DEFAULT_QUALIFIER` */ readonly qualifier?: string; + + /** + * Whether to add a Rule to the stack template verifying the bootstrap stack version + * + * This generally should be left set to `true`, unless you explicitly + * want to be able to deploy to an unbootstrapped environment. + * + * @default true + */ + readonly generateBootstrapVersionRule?: boolean; } /** @@ -151,7 +161,7 @@ export interface DefaultStackSynthesizerProps { * * Requires the environment to have been bootstrapped with Bootstrap Stack V2. */ -export class DefaultStackSynthesizer implements IStackSynthesizer { +export class DefaultStackSynthesizer extends StackSynthesizer { /** * Default ARN qualifier */ @@ -199,17 +209,20 @@ export class DefaultStackSynthesizer implements IStackSynthesizer { private _cloudFormationExecutionRoleArn?: string; private fileAssetPublishingRoleArn?: string; private imageAssetPublishingRoleArn?: string; + private qualifier?: string; private readonly files: NonNullable = {}; private readonly dockerImages: NonNullable = {}; constructor(private readonly props: DefaultStackSynthesizerProps = {}) { + super(); } public bind(stack: Stack): void { this._stack = stack; const qualifier = this.props.qualifier ?? stack.node.tryGetContext(BOOTSTRAP_QUALIFIER_CONTEXT) ?? DefaultStackSynthesizer.DEFAULT_QUALIFIER; + this.qualifier = qualifier; // Function to replace placeholders in the input string as much as possible // @@ -234,8 +247,6 @@ export class DefaultStackSynthesizer implements IStackSynthesizer { this.fileAssetPublishingRoleArn = specialize(this.props.fileAssetPublishingRoleArn ?? DefaultStackSynthesizer.DEFAULT_FILE_ASSET_PUBLISHING_ROLE_ARN); this.imageAssetPublishingRoleArn = specialize(this.props.imageAssetPublishingRoleArn ?? DefaultStackSynthesizer.DEFAULT_IMAGE_ASSET_PUBLISHING_ROLE_ARN); /* eslint-enable max-len */ - - addBootstrapVersionRule(stack, MIN_BOOTSTRAP_STACK_VERSION, qualifier); } public addFileAsset(asset: FileAssetSource): FileAssetLocation { @@ -309,20 +320,36 @@ export class DefaultStackSynthesizer implements IStackSynthesizer { }; } - public synthesizeStackArtifacts(session: ISynthesisSession): void { + /** + * Synthesize the associated stack to the session + */ + public synthesize(session: ISynthesisSession): void { assertBound(this.stack); + assertBound(this.qualifier); + + // Must be done here -- if it's done in bind() (called in the Stack's constructor) + // then it will become impossible to set context after that. + // + // If it's done AFTER _synthesizeTemplate(), then the template won't contain the + // right constructs. + if (this.props.generateBootstrapVersionRule ?? true) { + addBootstrapVersionRule(this.stack, MIN_BOOTSTRAP_STACK_VERSION, this.qualifier); + } + + this.synthesizeStackTemplate(this.stack, session); // Add the stack's template to the artifact manifest const templateManifestUrl = this.addStackTemplateToAssetManifest(session); const artifactId = this.writeAssetManifest(session); - addStackArtifactToAssembly(session, this.stack, { + this.emitStackArtifact(this.stack, session, { assumeRoleArn: this._deployRoleArn, cloudFormationExecutionRoleArn: this._cloudFormationExecutionRoleArn, stackTemplateAssetObjectUrl: templateManifestUrl, requiresBootstrapStackVersion: MIN_BOOTSTRAP_STACK_VERSION, - }, [artifactId]); + additionalDependencies: [artifactId], + }); } /** @@ -471,6 +498,11 @@ function stackLocationOrInstrinsics(stack: Stack) { * so we encode this rule into the template in a way that CloudFormation will check it. */ function addBootstrapVersionRule(stack: Stack, requiredVersion: number, qualifier: string) { + // Because of https://github.com/aws/aws-cdk/blob/master/packages/@aws-cdk/assert/lib/synth-utils.ts#L74 + // synthesize() may be called more than once on a stack in unit tests, and the below would break + // if we execute it a second time. Guard against the constructs already existing. + if (stack.node.tryFindChild('BootstrapVersion')) { return; } + const param = new CfnParameter(stack, 'BootstrapVersion', { type: 'AWS::SSM::Parameter::Value', description: 'Version of the CDK Bootstrap resources in this environment, automatically retrieved from SSM Parameter Store.', diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/index.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/index.ts index b4ad67384729d..db5f8e4d3f656 100644 --- a/packages/@aws-cdk/core/lib/stack-synthesizers/index.ts +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/index.ts @@ -3,3 +3,4 @@ export * from './default-synthesizer'; export * from './legacy'; export * from './bootstrapless-synthesizer'; export * from './nested'; +export * from './stack-synthesizer'; diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/legacy.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/legacy.ts index e8bbe5317dd3c..bacf1514a8b4b 100644 --- a/packages/@aws-cdk/core/lib/stack-synthesizers/legacy.ts +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/legacy.ts @@ -5,8 +5,8 @@ import { Fn } from '../cfn-fn'; import { Construct, ISynthesisSession } from '../construct-compat'; import { FileAssetParameters } from '../private/asset-parameters'; import { Stack } from '../stack'; -import { addStackArtifactToAssembly, assertBound } from './_shared'; -import { IStackSynthesizer } from './types'; +import { assertBound } from './_shared'; +import { StackSynthesizer } from './stack-synthesizer'; /** * The well-known name for the docker image asset ECR repository. All docker @@ -32,7 +32,7 @@ const ASSETS_ECR_REPOSITORY_NAME_OVERRIDE_CONTEXT_KEY = 'assets-ecr-repository-n * This is the only StackSynthesizer that supports customizing asset behavior * by overriding `Stack.addFileAsset()` and `Stack.addDockerImageAsset()`. */ -export class LegacyStackSynthesizer implements IStackSynthesizer { +export class LegacyStackSynthesizer extends StackSynthesizer { private stack?: Stack; private cycle = false; @@ -94,11 +94,16 @@ export class LegacyStackSynthesizer implements IStackSynthesizer { } } - public synthesizeStackArtifacts(session: ISynthesisSession): void { + /** + * Synthesize the associated stack to the session + */ + public synthesize(session: ISynthesisSession): void { assertBound(this.stack); + this.synthesizeStackTemplate(this.stack, session); + // Just do the default stuff, nothing special - addStackArtifactToAssembly(session, this.stack, {}, []); + this.emitStackArtifact(this.stack, session); } private doAddDockerImageAsset(asset: DockerImageAssetSource): DockerImageAssetLocation { diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/nested.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/nested.ts index 8841618823aa9..bc909775fee8a 100644 --- a/packages/@aws-cdk/core/lib/stack-synthesizers/nested.ts +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/nested.ts @@ -1,6 +1,8 @@ import { DockerImageAssetLocation, DockerImageAssetSource, FileAssetLocation, FileAssetSource } from '../assets'; import { ISynthesisSession } from '../construct-compat'; import { Stack } from '../stack'; +import { assertBound } from './_shared'; +import { StackSynthesizer } from './stack-synthesizer'; import { IStackSynthesizer } from './types'; /** @@ -8,12 +10,15 @@ import { IStackSynthesizer } from './types'; * * Interoperates with the StackSynthesizer of the parent stack. */ -export class NestedStackSynthesizer implements IStackSynthesizer { +export class NestedStackSynthesizer extends StackSynthesizer { + private stack?: Stack; + constructor(private readonly parentDeployment: IStackSynthesizer) { + super(); } - public bind(_stack: Stack): void { - // Nothing to do + public bind(stack: Stack): void { + this.stack = stack; } public addFileAsset(asset: FileAssetSource): FileAssetLocation { @@ -28,8 +33,10 @@ export class NestedStackSynthesizer implements IStackSynthesizer { return this.parentDeployment.addDockerImageAsset(asset); } - public synthesizeStackArtifacts(_session: ISynthesisSession): void { - // Do not emit Nested Stack as a cloud assembly artifact. + public synthesize(session: ISynthesisSession): void { + assertBound(this.stack); + // Synthesize the template, but don't emit as a cloud assembly artifact. // It will be registered as an S3 asset of its parent instead. + this.synthesizeStackTemplate(this.stack, session); } } diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts new file mode 100644 index 0000000000000..fde6ed053059e --- /dev/null +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts @@ -0,0 +1,109 @@ +import { DockerImageAssetLocation, DockerImageAssetSource, FileAssetLocation, FileAssetSource } from '../assets'; +import { ISynthesisSession } from '../construct-compat'; +import { Stack } from '../stack'; +import { addStackArtifactToAssembly } from './_shared'; +import { IStackSynthesizer } from './types'; + +/** + * Base class for implementing an IStackSynthesizer + * + * This class needs to exist to provide public surface area for external + * implementations of stack synthesizers. The protected methods give + * access to functions that are otherwise @_internal to the framework + * and could not be accessed by external implementors. + */ +export abstract class StackSynthesizer implements IStackSynthesizer { + /** + * Bind to the stack this environment is going to be used on + * + * Must be called before any of the other methods are called. + */ + public abstract bind(stack: Stack): void; + + /** + * Register a File Asset + * + * Returns the parameters that can be used to refer to the asset inside the template. + */ + public abstract addFileAsset(asset: FileAssetSource): FileAssetLocation; + + /** + * Register a Docker Image Asset + * + * Returns the parameters that can be used to refer to the asset inside the template. + */ + public abstract addDockerImageAsset(asset: DockerImageAssetSource): DockerImageAssetLocation; + + /** + * Synthesize the associated stack to the session + */ + public abstract synthesize(session: ISynthesisSession): void; + + /** + * Have the stack write out its template + */ + protected synthesizeStackTemplate(stack: Stack, session: ISynthesisSession): void { + stack._synthesizeTemplate(session); + } + + + /** + * Write the stack artifact to the session + * + * Use default settings to add a CloudFormationStackArtifact artifact to + * the given synthesis session. + */ + protected emitStackArtifact(stack: Stack, session: ISynthesisSession, options: SynthesizeStackArtifactOptions = {}) { + addStackArtifactToAssembly(session, stack, options ?? {}, options.additionalDependencies ?? []); + } +} + +/** + * Stack artifact options + * + * A subset of `cxschema.AwsCloudFormationStackProperties` of optional settings that need to be + * configurable by synthesizers, plus `additionalDependencies`. + */ +export interface SynthesizeStackArtifactOptions { + /** + * Identifiers of additional dependencies + * + * @default - No additional dependencies + */ + readonly additionalDependencies?: string[]; + + /** + * Values for CloudFormation stack parameters that should be passed when the stack is deployed. + * + * @default - No parameters + */ + readonly parameters?: { [id: string]: string }; + + /** + * The role that needs to be assumed to deploy the stack + * + * @default - No role is assumed (current credentials are used) + */ + readonly assumeRoleArn?: string; + + /** + * The role that is passed to CloudFormation to execute the change set + * + * @default - No role is passed (currently assumed role/credentials are used) + */ + readonly cloudFormationExecutionRoleArn?: string; + + /** + * If the stack template has already been included in the asset manifest, its asset URL + * + * @default - Not uploaded yet, upload just before deploying + */ + readonly stackTemplateAssetObjectUrl?: string; + + /** + * Version of bootstrap stack required to deploy this stack + * + * @default - No bootstrap stack required + */ + readonly requiresBootstrapStackVersion?: number; +} \ No newline at end of file diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/types.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/types.ts index c7f5fce1a7cbf..425aee7b7af5a 100644 --- a/packages/@aws-cdk/core/lib/stack-synthesizers/types.ts +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/types.ts @@ -28,9 +28,7 @@ export interface IStackSynthesizer { addDockerImageAsset(asset: DockerImageAssetSource): DockerImageAssetLocation; /** - * Synthesize all artifacts required for the stack into the session - * - * @experimental + * Synthesize the associated stack to the session */ - synthesizeStackArtifacts(session: ISynthesisSession): void; + synthesize(session: ISynthesisSession): void; } diff --git a/packages/@aws-cdk/core/lib/stack.ts b/packages/@aws-cdk/core/lib/stack.ts index de26587a2a861..2aad06f44c721 100644 --- a/packages/@aws-cdk/core/lib/stack.ts +++ b/packages/@aws-cdk/core/lib/stack.ts @@ -13,6 +13,7 @@ import { CfnResource, TagType } from './cfn-resource'; import { Construct, IConstruct, ISynthesisSession } from './construct-compat'; import { ContextProvider } from './context-provider'; import { Environment } from './environment'; +import { FeatureFlags } from './feature-flags'; import { CLOUDFORMATION_TOKEN_RESOLVER, CloudFormationLang } from './private/cloudformation-lang'; import { LogicalIDs } from './private/logical-id'; import { resolve } from './private/resolve'; @@ -168,8 +169,8 @@ export class Stack extends Construct implements ITaggable { return c; } - if (!c.node.scope) { - throw new Error(`No stack could be identified for the construct at path ${construct.node.path}`); + if (Stage.isStage(c) || !c.node.scope) { + throw new Error(`${construct.constructor?.name ?? 'Construct'} at '${construct.node.path}' should be created in the scope of a Stack, but no Stack found`); } return _lookup(c.node.scope); @@ -358,14 +359,16 @@ export class Stack extends Construct implements ITaggable { // // Also use the new behavior if we are using the new CI/CD-ready synthesizer; that way // people only have to flip one flag. - // eslint-disable-next-line max-len - this.artifactId = this.node.tryGetContext(cxapi.ENABLE_STACK_NAME_DUPLICATES_CONTEXT) || this.node.tryGetContext(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT) + const featureFlags = FeatureFlags.of(this); + const stackNameDupeContext = featureFlags.isEnabled(cxapi.ENABLE_STACK_NAME_DUPLICATES_CONTEXT); + const newStyleSynthesisContext = featureFlags.isEnabled(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT); + this.artifactId = (stackNameDupeContext || newStyleSynthesisContext) ? this.generateStackArtifactId() : this.stackName; this.templateFile = `${this.artifactId}.template.json`; - this.synthesizer = props.synthesizer ?? (this.node.tryGetContext(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT) + this.synthesizer = props.synthesizer ?? (newStyleSynthesisContext ? new DefaultStackSynthesizer() : new LegacyStackSynthesizer()); this.synthesizer.bind(this); @@ -725,9 +728,6 @@ export class Stack extends Construct implements ITaggable { for (const ctx of this._missingContext) { builder.addMissing(ctx); } - - // Delegate adding artifacts to the Synthesizer - this.synthesizer.synthesizeStackArtifacts(session); } /** diff --git a/packages/@aws-cdk/core/package.json b/packages/@aws-cdk/core/package.json index e71ef1f3f3841..2b4572467a42d 100644 --- a/packages/@aws-cdk/core/package.json +++ b/packages/@aws-cdk/core/package.json @@ -41,6 +41,10 @@ "duration-prop-type:@aws-cdk/core.ResourceSignal.timeout", "props-no-any:@aws-cdk/core.CfnParameterProps.default", "props-no-cfn-types:@aws-cdk/core.CfnRuleProps.assertions", + "props-no-cfn-types:@aws-cdk/core.CfnCodeDeployBlueGreenHookProps.applications", + "props-no-cfn-types:@aws-cdk/core.CfnCodeDeployBlueGreenHookProps.additionalOptions", + "props-no-cfn-types:@aws-cdk/core.CfnCodeDeployBlueGreenHookProps.lifecycleEventHooks", + "props-no-cfn-types:@aws-cdk/core.CfnCodeDeployBlueGreenHookProps.trafficRoutingConfig", "construct-ctor:@aws-cdk/core.Stack..params[1]", "docs-public-apis:@aws-cdk/core.ScopedAws.urlSuffix", "docs-public-apis:@aws-cdk/core.TagType.NOT_TAGGABLE", diff --git a/packages/@aws-cdk/core/test/stack-synthesis/test.new-style-synthesis.ts b/packages/@aws-cdk/core/test/stack-synthesis/test.new-style-synthesis.ts index 7d88ce7c0be49..8028658b4c7dc 100644 --- a/packages/@aws-cdk/core/test/stack-synthesis/test.new-style-synthesis.ts +++ b/packages/@aws-cdk/core/test/stack-synthesis/test.new-style-synthesis.ts @@ -36,7 +36,7 @@ export = { // THEN -- the S3 url is advertised on the stack artifact const stackArtifact = asm.getStackArtifact('Stack'); - const templateHash = '040a6374d4c48c0db867f1d4f95c69b12d28e69c3b8a9903a1db1ec651dcf480'; + const templateHash = last(stackArtifact.stackTemplateAssetObjectUrl?.split('/')); test.equals(stackArtifact.stackTemplateAssetObjectUrl, `s3://cdk-hnb659fds-assets-\${AWS::AccountId}-\${AWS::Region}/${templateHash}`); @@ -83,6 +83,24 @@ export = { test.done(); }, + 'version check is not added to template if disabled'(test: Test) { + // GIVEN + stack = new Stack(app, 'Stack2', { + synthesizer: new DefaultStackSynthesizer({ + generateBootstrapVersionRule: false, + }), + }); + new CfnResource(stack, 'Resource', { + type: 'Some::Resource', + }); + + // THEN + const template = app.synth().getStackByName('Stack2').template; + test.equal(template?.Rules?.CheckBootstrapVersion, undefined); + + test.done(); + }, + 'add file asset'(test: Test) { // WHEN const location = stack.synthesizer.addFileAsset({ @@ -221,3 +239,7 @@ function readAssetManifest(asm: cxapi.CloudAssembly): cxschema.AssetManifest { return JSON.parse(fs.readFileSync(manifestArtifact.file, { encoding: 'utf-8' })); } + +function last(xs?: A[]): A | undefined { + return xs ? xs[xs.length - 1] : undefined; +} \ No newline at end of file diff --git a/packages/@aws-cdk/core/test/test.cfn-resource.ts b/packages/@aws-cdk/core/test/test.cfn-resource.ts index 5033af4b21598..19bd494b485d5 100644 --- a/packages/@aws-cdk/core/test/test.cfn-resource.ts +++ b/packages/@aws-cdk/core/test/test.cfn-resource.ts @@ -124,6 +124,35 @@ export = nodeunit.testCase({ // No DependsOn! }); + test.done(); + }, + + 'CfnResource cannot be created outside Stack'(test: nodeunit.Test) { + const app = new core.App(); + test.throws(() => { + new core.CfnResource(app, 'Resource', { + type: 'Some::Resource', + }); + }, /should be created in the scope of a Stack, but no Stack found/); + + + test.done(); + }, + + /** + * Stages start a new scope, which does not count as a Stack anymore + */ + 'CfnResource cannot be in Stage in Stack'(test: nodeunit.Test) { + const app = new core.App(); + const stack = new core.Stack(app, 'Stack'); + const stage = new core.Stage(stack, 'Stage'); + test.throws(() => { + new core.CfnResource(stage, 'Resource', { + type: 'Some::Resource', + }); + }, /should be created in the scope of a Stack, but no Stack found/); + + test.done(); }, }); diff --git a/packages/@aws-cdk/core/test/test.expiration.ts b/packages/@aws-cdk/core/test/test.expiration.ts new file mode 100644 index 0000000000000..2aa9078f3448c --- /dev/null +++ b/packages/@aws-cdk/core/test/test.expiration.ts @@ -0,0 +1,49 @@ +import * as nodeunit from 'nodeunit'; +import { Duration, Expiration } from '../lib'; + +export = nodeunit.testCase({ + 'from string'(test: nodeunit.Test) { + const date = new Date('Sun, 26 Jan 2020 00:53:20 GMT'); + test.equal(Expiration.fromString('Sun, 26 Jan 2020 00:53:20 GMT').date.getDate(), date.getDate()); + test.done(); + }, + + 'at specified date'(test: nodeunit.Test) { + const date = new Date('Sun, 26 Jan 2020 00:53:20 GMT'); + test.equal(Expiration.atDate(new Date('Sun, 26 Jan 2020 00:53:20 GMT')).date.toUTCString(), 'Sun, 26 Jan 2020 00:53:20 GMT'); + test.equal(Expiration.atDate(new Date(1580000000000)).date.toUTCString(), 'Sun, 26 Jan 2020 00:53:20 GMT'); + test.equal(Expiration.atDate(new Date(date)).date.toUTCString(), 'Sun, 26 Jan 2020 00:53:20 GMT'); + test.done(); + }, + + 'at time stamp'(test: nodeunit.Test) { + test.equal(Expiration.atDate(new Date(1580000000000)).date.toUTCString(), 'Sun, 26 Jan 2020 00:53:20 GMT'); + test.done(); + }, + + 'after'(test: nodeunit.Test) { + test.ok(Math.abs(new Date(Expiration.after(Duration.minutes(10)).date.toUTCString()).getTime() - (Date.now() + 600000)) < 15000); + test.done(); + }, + + 'toEpoch returns correct value'(test: nodeunit.Test) { + const date = new Date('Sun, 26 Jan 2020 00:53:20 GMT'); + test.equal(Expiration.atDate(date).toEpoch(), 1580000000); + test.done(); + }, + + 'isBefore'(test: nodeunit.Test) { + const expire = Expiration.after(Duration.days(2)); + test.ok(!expire.isBefore(Duration.days(1))); + test.ok(expire.isBefore(Duration.days(3))); + test.done(); + }, + + 'isAfter'(test: nodeunit.Test) { + const expire = Expiration.after(Duration.days(2)); + test.ok(expire.isAfter(Duration.days(1))); + test.ok(!expire.isAfter(Duration.days(3))); + test.done(); + }, + +}); diff --git a/packages/@aws-cdk/core/test/test.feature-flags.ts b/packages/@aws-cdk/core/test/test.feature-flags.ts new file mode 100644 index 0000000000000..abb1723e6a3dc --- /dev/null +++ b/packages/@aws-cdk/core/test/test.feature-flags.ts @@ -0,0 +1,31 @@ +import * as cxapi from '@aws-cdk/cx-api'; +import { Test } from 'nodeunit'; +import { FeatureFlags, Stack } from '../lib'; + +export = { + isEnabled: { + 'returns true when the flag is enabled'(test: Test) { + const stack = new Stack(); + stack.node.setContext(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT, true); + + const actual = FeatureFlags.of(stack).isEnabled(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT); + test.equals(actual, true); + test.done(); + }, + + 'falls back to the default'(test: Test) { + const stack = new Stack(); + + test.equals(FeatureFlags.of(stack).isEnabled(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT), + cxapi.futureFlagDefault(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT)); + test.done(); + }, + + 'invalid flag'(test: Test) { + const stack = new Stack(); + + test.equals(FeatureFlags.of(stack).isEnabled('non-existent-flag'), undefined); + test.done(); + }, + }, +} \ No newline at end of file diff --git a/packages/@aws-cdk/core/test/test.stack.ts b/packages/@aws-cdk/core/test/test.stack.ts index e553da7018732..a27a992b5fdfd 100644 --- a/packages/@aws-cdk/core/test/test.stack.ts +++ b/packages/@aws-cdk/core/test/test.stack.ts @@ -2,7 +2,7 @@ import * as cxapi from '@aws-cdk/cx-api'; import { Test } from 'nodeunit'; import { App, CfnCondition, CfnInclude, CfnOutput, CfnParameter, - CfnResource, Construct, Lazy, ScopedAws, Stack, validateString, ISynthesisSession, Tags, + CfnResource, Construct, Lazy, ScopedAws, Stack, validateString, ISynthesisSession, Tags, LegacyStackSynthesizer, DefaultStackSynthesizer, } from '../lib'; import { Intrinsic } from '../lib/private/intrinsic'; import { resolveReferences } from '../lib/private/refs'; @@ -747,7 +747,7 @@ export = { 'Stack.of() throws when there is no parent Stack'(test: Test) { const root = new Construct(undefined as any, 'Root'); const construct = new Construct(root, 'Construct'); - test.throws(() => Stack.of(construct), /No stack could be identified for the construct at path/); + test.throws(() => Stack.of(construct), /should be created in the scope of a Stack, but no Stack found/); test.done(); }, @@ -951,6 +951,30 @@ export = { test.ok(called, 'synthesize() not called for Stack'); test.done(); }, + + 'context can be set on a stack using a LegacySynthesizer'(test: Test) { + // WHEN + const stack = new Stack(undefined, undefined, { + synthesizer: new LegacyStackSynthesizer(), + }); + stack.node.setContext('something', 'value'); + + // THEN: no exception + + test.done(); + }, + + 'context can be set on a stack using a DefaultSynthesizer'(test: Test) { + // WHEN + const stack = new Stack(undefined, undefined, { + synthesizer: new DefaultStackSynthesizer(), + }); + stack.node.setContext('something', 'value'); + + // THEN: no exception + + test.done(); + }, }; class StackWithPostProcessor extends Stack { diff --git a/packages/@aws-cdk/custom-resources/lib/aws-custom-resource/aws-custom-resource.ts b/packages/@aws-cdk/custom-resources/lib/aws-custom-resource/aws-custom-resource.ts index 37f61ccb65260..db40e3269ca17 100644 --- a/packages/@aws-cdk/custom-resources/lib/aws-custom-resource/aws-custom-resource.ts +++ b/packages/@aws-cdk/custom-resources/lib/aws-custom-resource/aws-custom-resource.ts @@ -324,24 +324,31 @@ export class AwsCustomResource extends cdk.Construct implements iam.IGrantable { }); this.grantPrincipal = provider.grantPrincipal; + // Create the policy statements for the custom resource function role, or use the user-provided ones + const statements = []; if (props.policy.statements.length !== 0) { // Use custom statements provided by the user for (const statement of props.policy.statements) { - provider.addToRolePolicy(statement); + statements.push(statement); } } else { // Derive statements from AWS SDK calls for (const call of [props.onCreate, props.onUpdate, props.onDelete]) { if (call) { - provider.addToRolePolicy(new iam.PolicyStatement({ + const statement = new iam.PolicyStatement({ actions: [awsSdkToIamAction(call.service, call.action)], resources: props.policy.resources, - })); + }); + statements.push(statement); } } - } - + const policy = new iam.Policy(this, 'CustomResourcePolicy', { + statements: statements, + }); + if (provider.role !== undefined) { + policy.attachToRole(provider.role); + } const create = props.onCreate || props.onUpdate; this.customResource = new cdk.CustomResource(this, 'Resource', { resourceType: props.resourceType || 'Custom::AWS', @@ -354,6 +361,10 @@ export class AwsCustomResource extends cdk.Construct implements iam.IGrantable { installLatestAwsSdk: props.installLatestAwsSdk ?? true, }, }); + + // If the policy was deleted first, then the function might lose permissions to delete the custom resource + // This is here so that the policy doesn't get removed before onDelete is called + this.customResource.node.addDependency(policy); } /** diff --git a/packages/@aws-cdk/custom-resources/test/aws-custom-resource/aws-custom-resource.test.ts b/packages/@aws-cdk/custom-resources/test/aws-custom-resource/aws-custom-resource.test.ts index 184e72c4ed4ca..76d7ec9fa585d 100644 --- a/packages/@aws-cdk/custom-resources/test/aws-custom-resource/aws-custom-resource.test.ts +++ b/packages/@aws-cdk/custom-resources/test/aws-custom-resource/aws-custom-resource.test.ts @@ -286,11 +286,6 @@ test('implements IGrantable', () => { expect(stack).toHaveResource('AWS::IAM::Policy', { PolicyDocument: { Statement: [ - { - Action: 'service:Action', - Effect: 'Allow', - Resource: '*', - }, { Action: 'iam:PassRole', Effect: 'Allow', @@ -564,3 +559,52 @@ test('can specify function name', () => { FunctionName: 'my-cool-function', }); }); + +test('separate policies per custom resource', () => { + // GIVEN + const stack = new cdk.Stack(); + + // WHEN + new AwsCustomResource(stack, 'Custom1', { + onCreate: { + service: 'service1', + action: 'action1', + physicalResourceId: PhysicalResourceId.of('id1'), + }, + policy: AwsCustomResourcePolicy.fromSdkCalls({ resources: AwsCustomResourcePolicy.ANY_RESOURCE }), + }); + new AwsCustomResource(stack, 'Custom2', { + onCreate: { + service: 'service2', + action: 'action2', + physicalResourceId: PhysicalResourceId.of('id2'), + }, + policy: AwsCustomResourcePolicy.fromSdkCalls({ resources: AwsCustomResourcePolicy.ANY_RESOURCE }), + }); + + // THEN + expect(stack).toHaveResource('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: 'service1:Action1', + Effect: 'Allow', + Resource: '*', + }, + ], + Version: '2012-10-17', + }, + }); + expect(stack).toHaveResource('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: 'service2:Action2', + Effect: 'Allow', + Resource: '*', + }, + ], + Version: '2012-10-17', + }, + }); +}); diff --git a/packages/@aws-cdk/custom-resources/test/aws-custom-resource/integ.aws-custom-resource.expected.json b/packages/@aws-cdk/custom-resources/test/aws-custom-resource/integ.aws-custom-resource.expected.json index f8527c3bdd8eb..81da053efe11d 100644 --- a/packages/@aws-cdk/custom-resources/test/aws-custom-resource/integ.aws-custom-resource.expected.json +++ b/packages/@aws-cdk/custom-resources/test/aws-custom-resource/integ.aws-custom-resource.expected.json @@ -45,7 +45,10 @@ "InstallLatestAwsSdk": true }, "UpdateReplacePolicy": "Delete", - "DeletionPolicy": "Delete" + "DeletionPolicy": "Delete", + "DependsOn": [ + "PublishCustomResourcePolicyDF696FCA" + ] }, "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2": { "Type": "AWS::IAM::Role", @@ -78,37 +81,6 @@ ] } }, - "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleDefaultPolicyD28E1A5E": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyDocument": { - "Statement": [ - { - "Action": "sns:Publish", - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "sns:ListTopics", - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "ssm:GetParameter", - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - }, - "PolicyName": "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleDefaultPolicyD28E1A5E", - "Roles": [ - { - "Ref": "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2" - } - ] - } - }, "AWS679f53fac002430cb0da5b7982bd22872D164C4C": { "Type": "AWS::Lambda::Function", "Properties": { @@ -161,7 +133,6 @@ "Timeout": 120 }, "DependsOn": [ - "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleDefaultPolicyD28E1A5E", "AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2" ] }, @@ -191,6 +162,7 @@ "InstallLatestAwsSdk": true }, "DependsOn": [ + "ListTopicsCustomResourcePolicy31A8396A", "TopicBFC7AF6E" ], "UpdateReplacePolicy": "Delete", @@ -241,7 +213,44 @@ "InstallLatestAwsSdk": true }, "UpdateReplacePolicy": "Delete", - "DeletionPolicy": "Delete" + "DeletionPolicy": "Delete", + "DependsOn": [ + "GetParameterCustomResourcePolicyD8E5D455" + ] + }, + "PublishCustomResourcePolicyDF696FCA": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [{"Action":"sns:Publish","Effect":"Allow","Resource":"*"}], + "Version": "2012-10-17" + }, + "PolicyName": "PublishCustomResourcePolicyDF696FCA", + "Roles": [{"Ref":"AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2"}] + } + }, + "ListTopicsCustomResourcePolicy31A8396A": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [{"Action":"sns:ListTopics","Effect":"Allow","Resource":"*"}], + "Version": "2012-10-17" + }, + "PolicyName": "ListTopicsCustomResourcePolicy31A8396A", + "Roles": [{"Ref":"AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2"}] + }, + "DependsOn": ["TopicBFC7AF6E"] + }, + "GetParameterCustomResourcePolicyD8E5D455": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [{"Action":"ssm:GetParameter","Effect":"Allow","Resource":"*"}], + "Version": "2012-10-17" + }, + "PolicyName": "GetParameterCustomResourcePolicyD8E5D455", + "Roles": [{"Ref":"AWS679f53fac002430cb0da5b7982bd2287ServiceRoleC1EA0FF2"}] + } } }, "Parameters": { diff --git a/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts b/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts index 66a39b250ca1c..167d589753b63 100644 --- a/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts +++ b/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts @@ -114,12 +114,19 @@ export class CloudFormationStackArtifact extends CloudArtifact { this.originalName = this.stackName; } + /** + * Full path to the template file + */ + public get templateFullPath() { + return path.join(this.assembly.directory, this.templateFile); + } + /** * The CloudFormation template for this stack. */ public get template(): any { if (this._template === undefined) { - this._template = JSON.parse(fs.readFileSync(path.join(this.assembly.directory, this.templateFile), 'utf-8')); + this._template = JSON.parse(fs.readFileSync(this.templateFullPath, 'utf-8')); } return this._template; } diff --git a/packages/@aws-cdk/cx-api/lib/cloud-assembly.ts b/packages/@aws-cdk/cx-api/lib/cloud-assembly.ts index 6ab4b595ae36b..8155d8a2c33ca 100644 --- a/packages/@aws-cdk/cx-api/lib/cloud-assembly.ts +++ b/packages/@aws-cdk/cx-api/lib/cloud-assembly.ts @@ -172,13 +172,22 @@ export class CloudAssembly { * @returns all the CloudFormation stack artifacts that are included in this assembly. */ public get stacks(): CloudFormationStackArtifact[] { - const result = new Array(); - for (const a of this.artifacts) { - if (a instanceof CloudFormationStackArtifact) { - result.push(a); - } + return this.artifacts.filter(isCloudFormationStackArtifact); + + function isCloudFormationStackArtifact(x: any): x is CloudFormationStackArtifact { + return x instanceof CloudFormationStackArtifact; + } + } + + /** + * The nested assembly artifacts in this assembly + */ + public get nestedAssemblies(): NestedCloudAssemblyArtifact[] { + return this.artifacts.filter(isNestedCloudAssemblyArtifact); + + function isNestedCloudAssemblyArtifact(x: any): x is NestedCloudAssemblyArtifact { + return x instanceof NestedCloudAssemblyArtifact; } - return result; } private validateDeps() { diff --git a/packages/@aws-cdk/cx-api/lib/features.ts b/packages/@aws-cdk/cx-api/lib/features.ts index 5483e64f92021..0a4ee8b8d02b0 100644 --- a/packages/@aws-cdk/cx-api/lib/features.ts +++ b/packages/@aws-cdk/cx-api/lib/features.ts @@ -63,5 +63,20 @@ export const FUTURE_FLAGS = { [STACK_RELATIVE_EXPORTS_CONTEXT]: 'true', // We will advertise this flag when the feature is complete - // [NEW_STYLE_STACK_SYNTHESIS]: 'true', + // [NEW_STYLE_STACK_SYNTHESIS_CONTEXT]: 'true', }; + +/** + * The set of defaults that should be applied if the feature flag is not + * explicitly configured. + */ +const FUTURE_FLAGS_DEFAULTS: { [key: string]: boolean } = { + [ENABLE_STACK_NAME_DUPLICATES_CONTEXT]: false, + [ENABLE_DIFF_NO_FAIL_CONTEXT]: false, + [STACK_RELATIVE_EXPORTS_CONTEXT]: false, + [NEW_STYLE_STACK_SYNTHESIS_CONTEXT]: false, +}; + +export function futureFlagDefault(flag: string): boolean { + return FUTURE_FLAGS_DEFAULTS[flag]; +} \ No newline at end of file diff --git a/packages/@aws-cdk/cx-api/test/features.test.ts b/packages/@aws-cdk/cx-api/test/features.test.ts new file mode 100644 index 0000000000000..fbff6c236b984 --- /dev/null +++ b/packages/@aws-cdk/cx-api/test/features.test.ts @@ -0,0 +1,7 @@ +import * as feats from '../lib/features'; + +test('all future flags have defaults configured', () => { + Object.keys(feats.FUTURE_FLAGS).forEach(flag => { + expect(typeof(feats.futureFlagDefault(flag))).toEqual('boolean'); + }); +}); \ No newline at end of file diff --git a/packages/@aws-cdk/pipelines/README.md b/packages/@aws-cdk/pipelines/README.md index 8f19d40253bfb..0fe49601d145a 100644 --- a/packages/@aws-cdk/pipelines/README.md +++ b/packages/@aws-cdk/pipelines/README.md @@ -384,6 +384,34 @@ files from several sources: * Directoy from the source repository * Additional compiled artifacts from the synth step +### Controlling IAM permissions + +IAM permissions can be added to the execution role of a `ShellScriptAction` in +two ways. + +Either pass additional policy statements in the `rolePolicyStatements` property: + +```ts +new ShellScriptAction({ + // ... + rolePolicyStatements: [ + new iam.PolicyStatement({ + actions: ['s3:GetObject'], + resources: ['*'], + }), + ], +})); +``` + +The Action can also be used as a Grantable after having been added to a Pipeline: + +```ts +const action = new ShellScriptAction({ /* ... */ }); +pipeline.addStage('Test').addActions(action); + +bucket.grantRead(action); +``` + #### Additional files from the source repository Bringing in additional files from the source repository is appropriate if the diff --git a/packages/@aws-cdk/pipelines/lib/pipeline.ts b/packages/@aws-cdk/pipelines/lib/pipeline.ts index 6355d836c15c8..9777bcdd4af4d 100644 --- a/packages/@aws-cdk/pipelines/lib/pipeline.ts +++ b/packages/@aws-cdk/pipelines/lib/pipeline.ts @@ -1,7 +1,7 @@ import * as path from 'path'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as iam from '@aws-cdk/aws-iam'; -import { App, CfnOutput, Construct, PhysicalName, Stack, Stage, Aspects } from '@aws-cdk/core'; +import { Annotations, App, CfnOutput, Construct, PhysicalName, Stack, Stage, Aspects } from '@aws-cdk/core'; import { AssetType, DeployCdkStackAction, PublishAssetsAction, UpdatePipelineAction } from './actions'; import { appOf, assemblyBuilderOf } from './private/construct-internals'; import { AddStageOptions, AssetPublishingCommand, CdkStage, StackOutput } from './stage'; @@ -266,7 +266,7 @@ export class CdkPipeline extends Construct { const depAction = stackActions.find(s => s.stackArtifactId === depId); if (depAction === undefined) { - this.node.addWarning(`Stack '${stackAction.stackName}' depends on stack ` + + Annotations.of(this).addWarning(`Stack '${stackAction.stackName}' depends on stack ` + `'${depId}', but that dependency is not deployed through the pipeline!`); } else if (!(depAction.executeRunOrder < stackAction.prepareRunOrder)) { yield `Stack '${stackAction.stackName}' depends on stack ` + @@ -336,6 +336,13 @@ class AssetPublishing extends Construct { // FIXME: this is silly, we need the relative path here but no easy way to get it const relativePath = path.relative(this.myCxAsmRoot, command.assetManifestPath); + // The path cannot be outside the asm root. I don't really understand how this could ever + // come to pass, but apparently it has (see https://github.com/aws/aws-cdk/issues/9766). + // Add a sanity check here so we can catch it more quickly next time. + if (relativePath.startsWith(`..${path.sep}`)) { + throw new Error(`The asset manifest (${command.assetManifestPath}) cannot be outside the Cloud Assembly directory (${this.myCxAsmRoot}). Please report this error at https://github.com/aws/aws-cdk/issues to help us debug why this is happening.`); + } + // Late-binding here (rather than in the constructor) to prevent creating the role in cases where no asset actions are created. if (!this.assetRoles[command.assetType]) { this.generateAssetRole(command.assetType); diff --git a/packages/@aws-cdk/pipelines/lib/stage.ts b/packages/@aws-cdk/pipelines/lib/stage.ts index e916d8131c7a2..4f599ae52bf09 100644 --- a/packages/@aws-cdk/pipelines/lib/stage.ts +++ b/packages/@aws-cdk/pipelines/lib/stage.ts @@ -72,6 +72,12 @@ export class CdkStage extends Construct { public addApplication(appStage: Stage, options: AddStageOptions = {}) { const asm = appStage.synth(); + if (asm.stacks.length === 0) { + // If we don't check here, a more puzzling "stage contains no actions" + // error will be thrown come deployment time. + throw new Error(`The given Stage construct ('${appStage.node.path}') should contain at least one Stack`); + } + const sortedTranches = topologicalSort(asm.stacks, stack => stack.id, stack => stack.dependencies.map(d => d.id)); diff --git a/packages/@aws-cdk/pipelines/lib/synths/simple-synth-action.ts b/packages/@aws-cdk/pipelines/lib/synths/simple-synth-action.ts index 62bc8299bdb4b..88d36ae81d3eb 100644 --- a/packages/@aws-cdk/pipelines/lib/synths/simple-synth-action.ts +++ b/packages/@aws-cdk/pipelines/lib/synths/simple-synth-action.ts @@ -1,10 +1,11 @@ +import * as crypto from 'crypto'; import * as path from 'path'; import * as codebuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as codepipeline_actions from '@aws-cdk/aws-codepipeline-actions'; import * as events from '@aws-cdk/aws-events'; -import { PolicyStatement } from '@aws-cdk/aws-iam'; -import { Construct } from '@aws-cdk/core'; +import * as iam from '@aws-cdk/aws-iam'; +import { Construct, Stack } from '@aws-cdk/core'; import { cloudAssemblyBuildSpecDir } from '../private/construct-internals'; import { copyEnvironmentVariables, filterEmpty } from './_util'; @@ -86,7 +87,7 @@ export interface SimpleSynthOptions { * * @default - No policy statements added to CodeBuild Project Role */ - readonly rolePolicyStatements?: PolicyStatement[]; + readonly rolePolicyStatements?: iam.PolicyStatement[]; } /** @@ -171,7 +172,7 @@ export interface AdditionalArtifact { /** * A standard synth with a generated buildspec */ -export class SimpleSynthAction implements codepipeline.IAction { +export class SimpleSynthAction implements codepipeline.IAction, iam.IGrantable { /** * Create a standard NPM synth action @@ -205,6 +206,7 @@ export class SimpleSynthAction implements codepipeline.IAction { private _action?: codepipeline_actions.CodeBuildAction; private _actionProperties: codepipeline.ActionProperties; + private _project?: codebuild.IProject; constructor(private readonly props: SimpleSynthActionProps) { // A number of actionProperties get read before bind() is even called (so before we @@ -253,6 +255,16 @@ export class SimpleSynthAction implements codepipeline.IAction { return this._actionProperties; } + /** + * Project generated to run the synth command + */ + public get project(): codebuild.IProject { + if (!this._project) { + throw new Error('Project becomes available after SimpleSynthAction has been bound to a stage'); + } + return this._project; + } + /** * Exists to implement IAction */ @@ -262,32 +274,48 @@ export class SimpleSynthAction implements codepipeline.IAction { const testCommands = this.props.testCommands ?? []; const synthCommand = this.props.synthCommand; - const project = new codebuild.PipelineProject(scope, 'CdkBuildProject', { - projectName: this.props.projectName ?? this.props.projectName, - environment: { buildImage: codebuild.LinuxBuildImage.STANDARD_4_0, ...this.props.environment }, - buildSpec: codebuild.BuildSpec.fromObject({ - version: '0.2', - phases: { - pre_build: { - commands: filterEmpty([ - this.props.subdirectory ? `cd ${this.props.subdirectory}` : '', - ...installCommands, - ]), - }, - build: { - commands: filterEmpty([ - ...buildCommands, - ...testCommands, - synthCommand, - ]), - }, + const buildSpec = codebuild.BuildSpec.fromObject({ + version: '0.2', + phases: { + pre_build: { + commands: filterEmpty([ + this.props.subdirectory ? `cd ${this.props.subdirectory}` : '', + ...installCommands, + ]), + }, + build: { + commands: filterEmpty([ + ...buildCommands, + ...testCommands, + synthCommand, + ]), }, - artifacts: renderArtifacts(this), - }), - environmentVariables: { - ...copyEnvironmentVariables(...this.props.copyEnvironmentVariables || []), - ...this.props.environmentVariables, }, + artifacts: renderArtifacts(this), + }); + + const environment = { buildImage: codebuild.LinuxBuildImage.STANDARD_4_0, ...this.props.environment }; + + const environmentVariables = { + ...copyEnvironmentVariables(...this.props.copyEnvironmentVariables || []), + ...this.props.environmentVariables, + }; + + // A hash over the values that make the CodeBuild Project unique (and necessary + // to restart the pipeline if one of them changes). projectName is not necessary to include + // here because the pipeline will definitely restart if projectName changes. + // (Resolve tokens) + const projectConfigHash = hash(Stack.of(scope).resolve({ + environment, + buildSpecString: buildSpec.toBuildSpec(), + environmentVariables, + })); + + const project = new codebuild.PipelineProject(scope, 'CdkBuildProject', { + projectName: this.props.projectName, + environment, + buildSpec, + environmentVariables, }); if (this.props.rolePolicyStatements !== undefined) { @@ -296,10 +324,20 @@ export class SimpleSynthAction implements codepipeline.IAction { }); } + this._project = project; + this._action = new codepipeline_actions.CodeBuildAction({ actionName: this.actionProperties.actionName, input: this.props.sourceArtifact, outputs: [this.props.cloudAssemblyArtifact, ...(this.props.additionalArtifacts ?? []).map(a => a.artifact)], + + // Inclusion of the hash here will lead to the pipeline structure for any changes + // made the config of the underlying CodeBuild Project. + // Hence, the pipeline will be restarted. This is necessary if the users + // adds (for example) build or test commands to the buildspec. + environmentVariables: { + _PROJECT_CONFIG_HASH: { value: projectConfigHash }, + }, project, }); this._actionProperties = this._action.actionProperties; @@ -339,6 +377,13 @@ export class SimpleSynthAction implements codepipeline.IAction { } } + /** + * The CodeBuild Project's principal + */ + public get grantPrincipal(): iam.IPrincipal { + return this.project.grantPrincipal; + } + /** * Exists to implement IAction */ @@ -411,4 +456,10 @@ export interface StandardYarnSynthOptions extends SimpleSynthOptions { * @default 'npx cdk synth' */ readonly synthCommand?: string; -} \ No newline at end of file +} + +function hash(obj: A) { + const d = crypto.createHash('sha256'); + d.update(JSON.stringify(obj)); + return d.digest('hex'); +} diff --git a/packages/@aws-cdk/pipelines/lib/validation/shell-script-action.ts b/packages/@aws-cdk/pipelines/lib/validation/shell-script-action.ts index b5d1d820b8a6b..ae4f8367f90eb 100644 --- a/packages/@aws-cdk/pipelines/lib/validation/shell-script-action.ts +++ b/packages/@aws-cdk/pipelines/lib/validation/shell-script-action.ts @@ -2,6 +2,7 @@ import * as codebuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as codepipeline_actions from '@aws-cdk/aws-codepipeline-actions'; import * as events from '@aws-cdk/aws-events'; +import * as iam from '@aws-cdk/aws-iam'; import { Construct } from '@aws-cdk/core'; import { StackOutput } from '../stage'; @@ -64,12 +65,19 @@ export interface ShellScriptActionProps { * @default 100 */ readonly runOrder?: number; + + /** + * Additional policy statements to add to the execution role + * + * @default - No policy statements + */ + readonly rolePolicyStatements?: iam.PolicyStatement[]; } /** * Validate a revision using shell commands */ -export class ShellScriptAction implements codepipeline.IAction { +export class ShellScriptAction implements codepipeline.IAction, iam.IGrantable { private _project?: codebuild.IProject; private _action?: codepipeline_actions.CodeBuildAction; @@ -99,6 +107,13 @@ export class ShellScriptAction implements codepipeline.IAction { } } + /** + * The CodeBuild Project's principal + */ + public get grantPrincipal(): iam.IPrincipal { + return this.project.grantPrincipal; + } + /** * Exists to implement IAction */ @@ -147,6 +162,9 @@ export class ShellScriptAction implements codepipeline.IAction { }, }), }); + for (const statement of this.props.rolePolicyStatements ?? []) { + this._project.addToRolePolicy(statement); + } this._action = new codepipeline_actions.CodeBuildAction({ actionName: this.props.actionName, diff --git a/packages/@aws-cdk/pipelines/test/builds.test.ts b/packages/@aws-cdk/pipelines/test/builds.test.ts index c270aa176cad9..41c1187105700 100644 --- a/packages/@aws-cdk/pipelines/test/builds.test.ts +++ b/packages/@aws-cdk/pipelines/test/builds.test.ts @@ -1,6 +1,8 @@ -import { arrayWith, deepObjectLike, encodedJson } from '@aws-cdk/assert'; +import { arrayWith, deepObjectLike, encodedJson, objectLike, Capture } from '@aws-cdk/assert'; import '@aws-cdk/assert/jest'; +import * as cbuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; +import * as s3 from '@aws-cdk/aws-s3'; import { Stack } from '@aws-cdk/core'; import * as cdkp from '../lib'; import { PIPELINE_ENV, TestApp, TestGitHubNpmPipeline } from './testutil'; @@ -176,9 +178,6 @@ test.each([['npm'], ['yarn']])('%s can have its install command overridden', (np test('Standard (NPM) synth can output additional artifacts', () => { // WHEN - sourceArtifact = new codepipeline.Artifact(); - cloudAssemblyArtifact = new codepipeline.Artifact('CloudAsm'); - const addlArtifact = new codepipeline.Artifact('IntegTest'); new TestGitHubNpmPipeline(pipelineStack, 'Cdk', { sourceArtifact, @@ -219,6 +218,110 @@ test('Standard (NPM) synth can output additional artifacts', () => { }); }); +test('Pipeline action contains a hash that changes as the buildspec changes', () => { + const hash1 = synthWithAction((sa, cxa) => cdkp.SimpleSynthAction.standardNpmSynth({ + sourceArtifact: sa, + cloudAssemblyArtifact: cxa, + })); + + // To make sure the hash is not just random :) + const hash1prime = synthWithAction((sa, cxa) => cdkp.SimpleSynthAction.standardNpmSynth({ + sourceArtifact: sa, + cloudAssemblyArtifact: cxa, + })); + + const hash2 = synthWithAction((sa, cxa) => cdkp.SimpleSynthAction.standardNpmSynth({ + sourceArtifact: sa, + cloudAssemblyArtifact: cxa, + installCommand: 'do install', + })); + const hash3 = synthWithAction((sa, cxa) => cdkp.SimpleSynthAction.standardNpmSynth({ + sourceArtifact: sa, + cloudAssemblyArtifact: cxa, + environment: { + computeType: cbuild.ComputeType.LARGE, + }, + })); + const hash4 = synthWithAction((sa, cxa) => cdkp.SimpleSynthAction.standardNpmSynth({ + sourceArtifact: sa, + cloudAssemblyArtifact: cxa, + environmentVariables: { + xyz: { value: 'SOME-VALUE' }, + }, + })); + + expect(hash1).toEqual(hash1prime); + + expect(hash1).not.toEqual(hash2); + expect(hash1).not.toEqual(hash3); + expect(hash1).not.toEqual(hash4); + expect(hash2).not.toEqual(hash3); + expect(hash2).not.toEqual(hash4); + expect(hash3).not.toEqual(hash4); + + function synthWithAction(cb: (sourceArtifact: codepipeline.Artifact, cloudAssemblyArtifact: codepipeline.Artifact) => codepipeline.IAction) { + const _app = new TestApp({ outdir: 'testcdk.out' }); + const _pipelineStack = new Stack(_app, 'PipelineStack', { env: PIPELINE_ENV }); + const _sourceArtifact = new codepipeline.Artifact(); + const _cloudAssemblyArtifact = new codepipeline.Artifact('CloudAsm'); + + new TestGitHubNpmPipeline(_pipelineStack, 'Cdk', { + sourceArtifact: _sourceArtifact, + cloudAssemblyArtifact: _cloudAssemblyArtifact, + synthAction: cb(_sourceArtifact, _cloudAssemblyArtifact), + }); + + const theHash = Capture.aString(); + expect(_pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Stages: arrayWith({ + Name: 'Build', + Actions: [ + objectLike({ + Name: 'Synth', + Configuration: objectLike({ + EnvironmentVariables: encodedJson([ + { + name: '_PROJECT_CONFIG_HASH', + type: 'PLAINTEXT', + value: theHash.capture(), + }, + ]), + }), + }), + ], + }), + }); + + return theHash.capturedValue; + } +}); + +test('SimpleSynthAction is IGrantable', () => { + // GIVEN + const synthAction = cdkp.SimpleSynthAction.standardNpmSynth({ + sourceArtifact, + cloudAssemblyArtifact, + }); + new TestGitHubNpmPipeline(pipelineStack, 'Cdk', { + sourceArtifact, + cloudAssemblyArtifact, + synthAction, + }); + const bucket = new s3.Bucket(pipelineStack, 'Bucket'); + + // WHEN + bucket.grantRead(synthAction); + + // THEN + expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + PolicyDocument: { + Statement: arrayWith(deepObjectLike({ + Action: ['s3:GetObject*', 's3:GetBucket*', 's3:List*'], + })), + }, + }); +}); + function npmYarnBuild(npmYarn: string) { if (npmYarn === 'npm') { return cdkp.SimpleSynthAction.standardNpmSynth; } if (npmYarn === 'yarn') { return cdkp.SimpleSynthAction.standardYarnSynth; } diff --git a/packages/@aws-cdk/pipelines/test/cross-environment-infra.test.ts b/packages/@aws-cdk/pipelines/test/cross-environment-infra.test.ts index 15ee878022574..dac6ebb595f0b 100644 --- a/packages/@aws-cdk/pipelines/test/cross-environment-infra.test.ts +++ b/packages/@aws-cdk/pipelines/test/cross-environment-infra.test.ts @@ -45,6 +45,24 @@ test('in a cross-account/cross-region setup, artifact bucket can be read by depl })), }, }); + + // And the key to go along with it + expect(supportStack).toHaveResourceLike('AWS::KMS::Key', { + KeyPolicy: { + Statement: arrayWith(objectLike({ + Action: arrayWith('kms:Decrypt', 'kms:DescribeKey'), + Principal: { + AWS: { + 'Fn::Join': ['', [ + 'arn:', + { Ref: 'AWS::Partition' }, + stringLike('*-deploy-role-*'), + ]], + }, + }, + })), + }, + }); }); test('in a cross-account/same-region setup, artifact bucket can be read by deploy role', () => { diff --git a/packages/@aws-cdk/pipelines/test/integ.pipeline-with-assets.expected.json b/packages/@aws-cdk/pipelines/test/integ.pipeline-with-assets.expected.json index bf0e04a7f96b9..4c8786774af7e 100644 --- a/packages/@aws-cdk/pipelines/test/integ.pipeline-with-assets.expected.json +++ b/packages/@aws-cdk/pipelines/test/integ.pipeline-with-assets.expected.json @@ -32,211 +32,6 @@ } }, "Resources": { - "PipelineUpdatePipelineSelfMutationRole57E559E8": { - "Type": "AWS::IAM::Role", - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "codebuild.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - } - } - }, - "PipelineUpdatePipelineSelfMutationRoleDefaultPolicyA225DA4E": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:PutLogEvents" - ], - "Effect": "Allow", - "Resource": [ - { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":logs:test-region:12345678:log-group:/aws/codebuild/", - { - "Ref": "PipelineUpdatePipelineSelfMutationDAA41400" - } - ] - ] - }, - { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":logs:test-region:12345678:log-group:/aws/codebuild/", - { - "Ref": "PipelineUpdatePipelineSelfMutationDAA41400" - }, - ":*" - ] - ] - } - ] - }, - { - "Action": [ - "codebuild:CreateReportGroup", - "codebuild:CreateReport", - "codebuild:UpdateReport", - "codebuild:BatchPutTestCases" - ], - "Effect": "Allow", - "Resource": { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":codebuild:test-region:12345678:report-group/", - { - "Ref": "PipelineUpdatePipelineSelfMutationDAA41400" - }, - "-*" - ] - ] - } - }, - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Resource": [ - "arn:*:iam::*:role/*-deploy-role-*", - "arn:*:iam::*:role/*-publishing-role-*" - ] - }, - { - "Action": "cloudformation:DescribeStacks", - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "s3:ListBucket", - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "s3:GetObject*", - "s3:GetBucket*", - "s3:List*" - ], - "Effect": "Allow", - "Resource": [ - { - "Fn::GetAtt": [ - "PipelineArtifactsBucketAEA9A052", - "Arn" - ] - }, - { - "Fn::Join": [ - "", - [ - { - "Fn::GetAtt": [ - "PipelineArtifactsBucketAEA9A052", - "Arn" - ] - }, - "/*" - ] - ] - } - ] - }, - { - "Action": [ - "kms:Decrypt", - "kms:DescribeKey" - ], - "Effect": "Allow", - "Resource": { - "Fn::GetAtt": [ - "PipelineArtifactsBucketEncryptionKeyF5BF0670", - "Arn" - ] - } - }, - { - "Action": [ - "kms:Decrypt", - "kms:Encrypt", - "kms:ReEncrypt*", - "kms:GenerateDataKey*" - ], - "Effect": "Allow", - "Resource": { - "Fn::GetAtt": [ - "PipelineArtifactsBucketEncryptionKeyF5BF0670", - "Arn" - ] - } - } - ], - "Version": "2012-10-17" - }, - "PolicyName": "PipelineUpdatePipelineSelfMutationRoleDefaultPolicyA225DA4E", - "Roles": [ - { - "Ref": "PipelineUpdatePipelineSelfMutationRole57E559E8" - } - ] - } - }, - "PipelineUpdatePipelineSelfMutationDAA41400": { - "Type": "AWS::CodeBuild::Project", - "Properties": { - "Artifacts": { - "Type": "CODEPIPELINE" - }, - "Environment": { - "ComputeType": "BUILD_GENERAL1_SMALL", - "Image": "aws/codebuild/standard:4.0", - "PrivilegedMode": false, - "Type": "LINUX_CONTAINER" - }, - "ServiceRole": { - "Fn::GetAtt": [ - "PipelineUpdatePipelineSelfMutationRole57E559E8", - "Arn" - ] - }, - "Source": { - "BuildSpec": "{\n \"version\": \"0.2\",\n \"phases\": {\n \"install\": {\n \"commands\": \"npm install -g aws-cdk\"\n },\n \"build\": {\n \"commands\": [\n \"cdk -a . deploy PipelineStack --require-approval=never --verbose\"\n ]\n }\n }\n}", - "Type": "CODEPIPELINE" - }, - "EncryptionKey": { - "Fn::GetAtt": [ - "PipelineArtifactsBucketEncryptionKeyF5BF0670", - "Arn" - ] - } - } - }, "PipelineArtifactsBucketEncryptionKeyF5BF0670": { "Type": "AWS::KMS::Key", "Properties": { @@ -693,7 +488,8 @@ "Configuration": { "ProjectName": { "Ref": "PipelineBuildSynthCdkBuildProject6BEFA8E6" - } + }, + "EnvironmentVariables": "[{\"name\":\"_PROJECT_CONFIG_HASH\",\"type\":\"PLAINTEXT\",\"value\":\"7f09efece2ae66563f27569c480f9602225200051089f57486c5fa0b52095956\"}]" }, "InputArtifacts": [ { @@ -1490,6 +1286,211 @@ } } }, + "PipelineUpdatePipelineSelfMutationRole57E559E8": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "codebuild.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + } + } + }, + "PipelineUpdatePipelineSelfMutationRoleDefaultPolicyA225DA4E": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":logs:test-region:12345678:log-group:/aws/codebuild/", + { + "Ref": "PipelineUpdatePipelineSelfMutationDAA41400" + } + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":logs:test-region:12345678:log-group:/aws/codebuild/", + { + "Ref": "PipelineUpdatePipelineSelfMutationDAA41400" + }, + ":*" + ] + ] + } + ] + }, + { + "Action": [ + "codebuild:CreateReportGroup", + "codebuild:CreateReport", + "codebuild:UpdateReport", + "codebuild:BatchPutTestCases" + ], + "Effect": "Allow", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":codebuild:test-region:12345678:report-group/", + { + "Ref": "PipelineUpdatePipelineSelfMutationDAA41400" + }, + "-*" + ] + ] + } + }, + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Resource": [ + "arn:*:iam::*:role/*-deploy-role-*", + "arn:*:iam::*:role/*-publishing-role-*" + ] + }, + { + "Action": "cloudformation:DescribeStacks", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "s3:ListBucket", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:GetObject*", + "s3:GetBucket*", + "s3:List*" + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + }, + { + "Action": [ + "kms:Decrypt", + "kms:DescribeKey" + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "PipelineArtifactsBucketEncryptionKeyF5BF0670", + "Arn" + ] + } + }, + { + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*" + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "PipelineArtifactsBucketEncryptionKeyF5BF0670", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "PipelineUpdatePipelineSelfMutationRoleDefaultPolicyA225DA4E", + "Roles": [ + { + "Ref": "PipelineUpdatePipelineSelfMutationRole57E559E8" + } + ] + } + }, + "PipelineUpdatePipelineSelfMutationDAA41400": { + "Type": "AWS::CodeBuild::Project", + "Properties": { + "Artifacts": { + "Type": "CODEPIPELINE" + }, + "Environment": { + "ComputeType": "BUILD_GENERAL1_SMALL", + "Image": "aws/codebuild/standard:4.0", + "PrivilegedMode": false, + "Type": "LINUX_CONTAINER" + }, + "ServiceRole": { + "Fn::GetAtt": [ + "PipelineUpdatePipelineSelfMutationRole57E559E8", + "Arn" + ] + }, + "Source": { + "BuildSpec": "{\n \"version\": \"0.2\",\n \"phases\": {\n \"install\": {\n \"commands\": \"npm install -g aws-cdk\"\n },\n \"build\": {\n \"commands\": [\n \"cdk -a . deploy PipelineStack --require-approval=never --verbose\"\n ]\n }\n }\n}", + "Type": "CODEPIPELINE" + }, + "EncryptionKey": { + "Fn::GetAtt": [ + "PipelineArtifactsBucketEncryptionKeyF5BF0670", + "Arn" + ] + } + } + }, "PipelineAssetsFileRole59943A77": { "Type": "AWS::IAM::Role", "Properties": { @@ -1694,4 +1695,4 @@ } } } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/pipelines/test/integ.pipeline.expected.json b/packages/@aws-cdk/pipelines/test/integ.pipeline.expected.json index d608c7a22b842..06d75a873fd53 100644 --- a/packages/@aws-cdk/pipelines/test/integ.pipeline.expected.json +++ b/packages/@aws-cdk/pipelines/test/integ.pipeline.expected.json @@ -32,211 +32,6 @@ } }, "Resources": { - "PipelineUpdatePipelineSelfMutationRole57E559E8": { - "Type": "AWS::IAM::Role", - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": "codebuild.amazonaws.com" - } - } - ], - "Version": "2012-10-17" - } - } - }, - "PipelineUpdatePipelineSelfMutationRoleDefaultPolicyA225DA4E": { - "Type": "AWS::IAM::Policy", - "Properties": { - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:PutLogEvents" - ], - "Effect": "Allow", - "Resource": [ - { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":logs:test-region:12345678:log-group:/aws/codebuild/", - { - "Ref": "PipelineUpdatePipelineSelfMutationDAA41400" - } - ] - ] - }, - { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":logs:test-region:12345678:log-group:/aws/codebuild/", - { - "Ref": "PipelineUpdatePipelineSelfMutationDAA41400" - }, - ":*" - ] - ] - } - ] - }, - { - "Action": [ - "codebuild:CreateReportGroup", - "codebuild:CreateReport", - "codebuild:UpdateReport", - "codebuild:BatchPutTestCases" - ], - "Effect": "Allow", - "Resource": { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":codebuild:test-region:12345678:report-group/", - { - "Ref": "PipelineUpdatePipelineSelfMutationDAA41400" - }, - "-*" - ] - ] - } - }, - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Resource": [ - "arn:*:iam::*:role/*-deploy-role-*", - "arn:*:iam::*:role/*-publishing-role-*" - ] - }, - { - "Action": "cloudformation:DescribeStacks", - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": "s3:ListBucket", - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "s3:GetObject*", - "s3:GetBucket*", - "s3:List*" - ], - "Effect": "Allow", - "Resource": [ - { - "Fn::GetAtt": [ - "PipelineArtifactsBucketAEA9A052", - "Arn" - ] - }, - { - "Fn::Join": [ - "", - [ - { - "Fn::GetAtt": [ - "PipelineArtifactsBucketAEA9A052", - "Arn" - ] - }, - "/*" - ] - ] - } - ] - }, - { - "Action": [ - "kms:Decrypt", - "kms:DescribeKey" - ], - "Effect": "Allow", - "Resource": { - "Fn::GetAtt": [ - "PipelineArtifactsBucketEncryptionKeyF5BF0670", - "Arn" - ] - } - }, - { - "Action": [ - "kms:Decrypt", - "kms:Encrypt", - "kms:ReEncrypt*", - "kms:GenerateDataKey*" - ], - "Effect": "Allow", - "Resource": { - "Fn::GetAtt": [ - "PipelineArtifactsBucketEncryptionKeyF5BF0670", - "Arn" - ] - } - } - ], - "Version": "2012-10-17" - }, - "PolicyName": "PipelineUpdatePipelineSelfMutationRoleDefaultPolicyA225DA4E", - "Roles": [ - { - "Ref": "PipelineUpdatePipelineSelfMutationRole57E559E8" - } - ] - } - }, - "PipelineUpdatePipelineSelfMutationDAA41400": { - "Type": "AWS::CodeBuild::Project", - "Properties": { - "Artifacts": { - "Type": "CODEPIPELINE" - }, - "Environment": { - "ComputeType": "BUILD_GENERAL1_SMALL", - "Image": "aws/codebuild/standard:4.0", - "PrivilegedMode": false, - "Type": "LINUX_CONTAINER" - }, - "ServiceRole": { - "Fn::GetAtt": [ - "PipelineUpdatePipelineSelfMutationRole57E559E8", - "Arn" - ] - }, - "Source": { - "BuildSpec": "{\n \"version\": \"0.2\",\n \"phases\": {\n \"install\": {\n \"commands\": \"npm install -g aws-cdk\"\n },\n \"build\": {\n \"commands\": [\n \"cdk -a . deploy PipelineStack --require-approval=never --verbose\"\n ]\n }\n }\n}", - "Type": "CODEPIPELINE" - }, - "EncryptionKey": { - "Fn::GetAtt": [ - "PipelineArtifactsBucketEncryptionKeyF5BF0670", - "Arn" - ] - } - } - }, "PipelineArtifactsBucketEncryptionKeyF5BF0670": { "Type": "AWS::KMS::Key", "Properties": { @@ -649,7 +444,8 @@ "Configuration": { "ProjectName": { "Ref": "PipelineBuildSynthCdkBuildProject6BEFA8E6" - } + }, + "EnvironmentVariables": "[{\"name\":\"_PROJECT_CONFIG_HASH\",\"type\":\"PLAINTEXT\",\"value\":\"7f09efece2ae66563f27569c480f9602225200051089f57486c5fa0b52095956\"}]" }, "InputArtifacts": [ { @@ -1388,6 +1184,211 @@ ] } } + }, + "PipelineUpdatePipelineSelfMutationRole57E559E8": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "codebuild.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + } + } + }, + "PipelineUpdatePipelineSelfMutationRoleDefaultPolicyA225DA4E": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":logs:test-region:12345678:log-group:/aws/codebuild/", + { + "Ref": "PipelineUpdatePipelineSelfMutationDAA41400" + } + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":logs:test-region:12345678:log-group:/aws/codebuild/", + { + "Ref": "PipelineUpdatePipelineSelfMutationDAA41400" + }, + ":*" + ] + ] + } + ] + }, + { + "Action": [ + "codebuild:CreateReportGroup", + "codebuild:CreateReport", + "codebuild:UpdateReport", + "codebuild:BatchPutTestCases" + ], + "Effect": "Allow", + "Resource": { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":codebuild:test-region:12345678:report-group/", + { + "Ref": "PipelineUpdatePipelineSelfMutationDAA41400" + }, + "-*" + ] + ] + } + }, + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Resource": [ + "arn:*:iam::*:role/*-deploy-role-*", + "arn:*:iam::*:role/*-publishing-role-*" + ] + }, + { + "Action": "cloudformation:DescribeStacks", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "s3:ListBucket", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:GetObject*", + "s3:GetBucket*", + "s3:List*" + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + }, + { + "Action": [ + "kms:Decrypt", + "kms:DescribeKey" + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "PipelineArtifactsBucketEncryptionKeyF5BF0670", + "Arn" + ] + } + }, + { + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*" + ], + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "PipelineArtifactsBucketEncryptionKeyF5BF0670", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "PipelineUpdatePipelineSelfMutationRoleDefaultPolicyA225DA4E", + "Roles": [ + { + "Ref": "PipelineUpdatePipelineSelfMutationRole57E559E8" + } + ] + } + }, + "PipelineUpdatePipelineSelfMutationDAA41400": { + "Type": "AWS::CodeBuild::Project", + "Properties": { + "Artifacts": { + "Type": "CODEPIPELINE" + }, + "Environment": { + "ComputeType": "BUILD_GENERAL1_SMALL", + "Image": "aws/codebuild/standard:4.0", + "PrivilegedMode": false, + "Type": "LINUX_CONTAINER" + }, + "ServiceRole": { + "Fn::GetAtt": [ + "PipelineUpdatePipelineSelfMutationRole57E559E8", + "Arn" + ] + }, + "Source": { + "BuildSpec": "{\n \"version\": \"0.2\",\n \"phases\": {\n \"install\": {\n \"commands\": \"npm install -g aws-cdk\"\n },\n \"build\": {\n \"commands\": [\n \"cdk -a . deploy PipelineStack --require-approval=never --verbose\"\n ]\n }\n }\n}", + "Type": "CODEPIPELINE" + }, + "EncryptionKey": { + "Fn::GetAtt": [ + "PipelineArtifactsBucketEncryptionKeyF5BF0670", + "Arn" + ] + } + } } } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/pipelines/test/pipeline.test.ts b/packages/@aws-cdk/pipelines/test/pipeline.test.ts index 7719620706cc6..7d4455959a8eb 100644 --- a/packages/@aws-cdk/pipelines/test/pipeline.test.ts +++ b/packages/@aws-cdk/pipelines/test/pipeline.test.ts @@ -42,6 +42,13 @@ test('references stack template in subassembly', () => { }); }); +test('obvious error is thrown when stage contains no stacks', () => { + // WHEN + expect(() => { + pipeline.addApplicationStage(new Stage(app, 'EmptyStage')); + }).toThrow(/should contain at least one Stack/); +}); + test('action has right settings for same-env deployment', () => { // WHEN pipeline.addApplicationStage(new OneStackApp(app, 'Same')); diff --git a/packages/@aws-cdk/pipelines/test/validation.test.ts b/packages/@aws-cdk/pipelines/test/validation.test.ts index 9c311af6c5b47..ae1b44f2671f2 100644 --- a/packages/@aws-cdk/pipelines/test/validation.test.ts +++ b/packages/@aws-cdk/pipelines/test/validation.test.ts @@ -1,6 +1,8 @@ import { anything, arrayWith, deepObjectLike, encodedJson } from '@aws-cdk/assert'; import '@aws-cdk/assert/jest'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; +import * as iam from '@aws-cdk/aws-iam'; +import * as s3 from '@aws-cdk/aws-s3'; import { CfnOutput, Construct, Stack, Stage, StageProps } from '@aws-cdk/core'; import * as cdkp from '../lib'; import { } from './testmatchers'; @@ -173,6 +175,54 @@ test('can use additional files from build', () => { }); }); +test('add policy statements to ShellScriptAction', () => { + // WHEN + pipeline.addStage('Test').addActions(new cdkp.ShellScriptAction({ + actionName: 'Boop', + additionalArtifacts: [integTestArtifact], + commands: ['true'], + rolePolicyStatements: [ + new iam.PolicyStatement({ + actions: ['s3:Banana'], + resources: ['*'], + }), + ], + })); + + // THEN + expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + PolicyDocument: { + Statement: arrayWith(deepObjectLike({ + Action: 's3:Banana', + Resource: '*', + })), + }, + }); +}); + +test('ShellScriptAction is IGrantable', () => { + // GIVEN + const action = new cdkp.ShellScriptAction({ + actionName: 'Boop', + additionalArtifacts: [integTestArtifact], + commands: ['true'], + }); + pipeline.addStage('Test').addActions(action); + const bucket = new s3.Bucket(pipelineStack, 'Bucket'); + + // WHEN + bucket.grantRead(action); + + // THEN + expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + PolicyDocument: { + Statement: arrayWith(deepObjectLike({ + Action: ['s3:GetObject*', 's3:GetBucket*', 's3:List*'], + })), + }, + }); +}); + class AppWithStackOutput extends Stage { public readonly output: CfnOutput; diff --git a/packages/aws-cdk-lib/.eslintrc.js b/packages/aws-cdk-lib/.eslintrc.js new file mode 100644 index 0000000000000..61dd8dd001f63 --- /dev/null +++ b/packages/aws-cdk-lib/.eslintrc.js @@ -0,0 +1,3 @@ +const baseConfig = require('cdk-build-tools/config/eslintrc'); +baseConfig.parserOptions.project = __dirname + '/tsconfig.json'; +module.exports = baseConfig; diff --git a/packages/aws-cdk-lib/.gitignore b/packages/aws-cdk-lib/.gitignore new file mode 100644 index 0000000000000..129f2f8e0bc37 --- /dev/null +++ b/packages/aws-cdk-lib/.gitignore @@ -0,0 +1,19 @@ +*.js +*.d.ts +!deps.js +!gen.js +lib/ +tsconfig.json +.jsii +*.tsbuildinfo + +dist +.LAST_PACKAGE +.LAST_BUILD +*.snk +!.eslintrc.js + +# Ignore barrel import entry points +/*.ts + +junit.xml \ No newline at end of file diff --git a/packages/aws-cdk-lib/.npmignore b/packages/aws-cdk-lib/.npmignore new file mode 100644 index 0000000000000..b5bc540300d0f --- /dev/null +++ b/packages/aws-cdk-lib/.npmignore @@ -0,0 +1,26 @@ +# The basics +*.ts +*.tgz +*.snk +!*.d.ts +!*.js + +# Coverage +coverage +.nyc_output +.nycrc + +# Build gear +build-tools +dist +.LAST_BUILD +.LAST_PACKAGE + +tsconfig.json +*.tsbuildinfo + +!.jsii +.eslintrc.js +# exclude cdk artifacts +**/cdk.out +junit.xml \ No newline at end of file diff --git a/packages/aws-cdk-lib/LICENSE b/packages/aws-cdk-lib/LICENSE new file mode 100644 index 0000000000000..b71ec1688783a --- /dev/null +++ b/packages/aws-cdk-lib/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/packages/aws-cdk-lib/NOTICE b/packages/aws-cdk-lib/NOTICE new file mode 100644 index 0000000000000..bfccac9a7f69c --- /dev/null +++ b/packages/aws-cdk-lib/NOTICE @@ -0,0 +1,2 @@ +AWS Cloud Development Kit (AWS CDK) +Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/packages/aws-cdk-lib/README.md b/packages/aws-cdk-lib/README.md new file mode 100644 index 0000000000000..de21ae0684304 --- /dev/null +++ b/packages/aws-cdk-lib/README.md @@ -0,0 +1,52 @@ +# AWS Cloud Development Kit Library + +[![experimental](http://badges.github.io/stability-badges/dist/experimental.svg)](http://github.com/badges/stability-badges) + +The AWS CDK construct library provides APIs to define your CDK application and add +CDK constructs to the application. + +## Usage + +### Upgrade from CDK 1.x + +When upgrading from CDK 1.x, remove all dependencies to individual CDK packages +from your dependencies file and follow the rest of the sections. + +### Installation + +To use this package, you need to declare this package and the `constructs` package as +dependencies. + +According to the kind of project you are developing: +- For projects that are CDK libraries, declare them both under the `devDependencies` + **and** `peerDependencies` sections. +- For CDK apps, declare them under the `dependencies` section only. + +### Use in your code + +#### Classic import + +You can use a classic import to get access to each service namespaces: + +```ts +import { core, aws_s3 as s3 } from 'aws-cdk-lib'; + +const app = new core.App(); +const stack = new core.Stack(app, 'TestStack'); + +new s3.Bucket(stack, 'TestBucket'); +``` + +#### Barrel import + +Alternatively, you can use "barrel" imports: + +```ts +import { App, Stack } from 'aws-cdk-lib'; +import { Bucket } from 'aws-cdk-lib/aws-s3'; + +const app = new App(); +const stack = new Stack(app, 'TestStack'); + +new Bucket(stack, 'TestBucket'); +``` diff --git a/packages/aws-cdk-lib/package.json b/packages/aws-cdk-lib/package.json new file mode 100644 index 0000000000000..17d3ce816c69a --- /dev/null +++ b/packages/aws-cdk-lib/package.json @@ -0,0 +1,276 @@ +{ + "name": "aws-cdk-lib", + "private": "true", + "version": "0.0.0", + "description": "The AWS Cloud Development Kit library", + "main": "lib/index.js", + "types": "lib/index.d.ts", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-cdk.git", + "directory": "packages/aws-cdk-lib" + }, + "stability": "experimental", + "maturity": "developer-preview", + "scripts": { + "gen": "ubergen", + "build": "cdk-build", + "lint": "cdk-lint", + "test": "echo done", + "package": "cdk-package", + "pkglint": "pkglint -f", + "build+test": "npm run build && npm test", + "build+test+package": "npm run build+test && npm run package", + "watch": "cdk-watch", + "compat": "cdk-compat" + }, + "awslint": { + "exclude": [ + "*:*" + ] + }, + "cdk-build": { + "eslint": { + "disable": true + }, + "pre": [ + "npm run gen" + ] + }, + "pkglint": { + "exclude": [ + "package-info/maturity", + "jsii/java", + "jsii/python", + "jsii/dotnet" + ] + }, + "jsii": { + "excludeTypescript": [ + "build-tools/*" + ], + "outdir": "dist", + "targets": { + "dotnet": { + "namespace": "Amazon.CDK.Lib", + "packageId": "Amazon.CDK.Lib", + "iconUrl": "https://raw.githubusercontent.com/aws/aws-cdk/master/logo/default-256-dark.png", + "versionSuffix": "-devpreview", + "signAssembly": true, + "assemblyOriginatorKeyFile": "../../key.snk" + }, + "java": { + "package": "software.amazon.awscdk.lib", + "maven": { + "groupId": "software.amazon.awscdk", + "artifactId": "lib", + "versionSuffix": ".DEVPREVIEW" + } + }, + "python": { + "distName": "aws-cdk.lib", + "module": "aws_cdk.lib" + } + }, + "projectReferences": false + }, + "author": { + "name": "Amazon Web Services", + "url": "https://aws.amazon.com", + "organization": true + }, + "license": "Apache-2.0", + "bundledDependencies": [ + "case", + "fs-extra", + "jsonschema", + "minimatch", + "semver", + "yaml" + ], + "dependencies": { + "case": "1.6.3", + "fs-extra": "^9.0.1", + "jsonschema": "^1.2.5", + "minimatch": "^3.0.4", + "semver": "^7.2.2", + "yaml": "1.10.0" + }, + "devDependencies": { + "constructs": "^3.0.4", + "@aws-cdk/alexa-ask": "0.0.0", + "@aws-cdk/app-delivery": "0.0.0", + "@aws-cdk/assets": "0.0.0", + "@aws-cdk/aws-accessanalyzer": "0.0.0", + "@aws-cdk/aws-acmpca": "0.0.0", + "@aws-cdk/aws-amazonmq": "0.0.0", + "@aws-cdk/aws-amplify": "0.0.0", + "@aws-cdk/aws-apigateway": "0.0.0", + "@aws-cdk/aws-apigatewayv2": "0.0.0", + "@aws-cdk/aws-appconfig": "0.0.0", + "@aws-cdk/aws-applicationautoscaling": "0.0.0", + "@aws-cdk/aws-appmesh": "0.0.0", + "@aws-cdk/aws-appstream": "0.0.0", + "@aws-cdk/aws-appsync": "0.0.0", + "@aws-cdk/aws-athena": "0.0.0", + "@aws-cdk/aws-autoscaling": "0.0.0", + "@aws-cdk/aws-autoscaling-common": "0.0.0", + "@aws-cdk/aws-autoscaling-hooktargets": "0.0.0", + "@aws-cdk/aws-autoscalingplans": "0.0.0", + "@aws-cdk/aws-backup": "0.0.0", + "@aws-cdk/aws-batch": "0.0.0", + "@aws-cdk/aws-budgets": "0.0.0", + "@aws-cdk/aws-cassandra": "0.0.0", + "@aws-cdk/aws-ce": "0.0.0", + "@aws-cdk/aws-certificatemanager": "0.0.0", + "@aws-cdk/aws-chatbot": "0.0.0", + "@aws-cdk/aws-cloud9": "0.0.0", + "@aws-cdk/aws-cloudformation": "0.0.0", + "@aws-cdk/aws-cloudfront": "0.0.0", + "@aws-cdk/aws-cloudfront-origins": "0.0.0", + "@aws-cdk/aws-cloudtrail": "0.0.0", + "@aws-cdk/aws-cloudwatch": "0.0.0", + "@aws-cdk/aws-cloudwatch-actions": "0.0.0", + "@aws-cdk/aws-codebuild": "0.0.0", + "@aws-cdk/aws-codecommit": "0.0.0", + "@aws-cdk/aws-codedeploy": "0.0.0", + "@aws-cdk/aws-codeguruprofiler": "0.0.0", + "@aws-cdk/aws-codepipeline": "0.0.0", + "@aws-cdk/aws-codepipeline-actions": "0.0.0", + "@aws-cdk/aws-codestar": "0.0.0", + "@aws-cdk/aws-codestarconnections": "0.0.0", + "@aws-cdk/aws-codestarnotifications": "0.0.0", + "@aws-cdk/aws-cognito": "0.0.0", + "@aws-cdk/aws-config": "0.0.0", + "@aws-cdk/aws-datapipeline": "0.0.0", + "@aws-cdk/aws-dax": "0.0.0", + "@aws-cdk/aws-detective": "0.0.0", + "@aws-cdk/aws-directoryservice": "0.0.0", + "@aws-cdk/aws-dlm": "0.0.0", + "@aws-cdk/aws-dms": "0.0.0", + "@aws-cdk/aws-docdb": "0.0.0", + "@aws-cdk/aws-dynamodb": "0.0.0", + "@aws-cdk/aws-ec2": "0.0.0", + "@aws-cdk/aws-ecr": "0.0.0", + "@aws-cdk/aws-ecr-assets": "0.0.0", + "@aws-cdk/aws-ecs": "0.0.0", + "@aws-cdk/aws-ecs-patterns": "0.0.0", + "@aws-cdk/aws-efs": "0.0.0", + "@aws-cdk/aws-eks": "0.0.0", + "@aws-cdk/aws-eks-legacy": "0.0.0", + "@aws-cdk/aws-elasticache": "0.0.0", + "@aws-cdk/aws-elasticbeanstalk": "0.0.0", + "@aws-cdk/aws-elasticloadbalancing": "0.0.0", + "@aws-cdk/aws-elasticloadbalancingv2": "0.0.0", + "@aws-cdk/aws-elasticloadbalancingv2-actions": "0.0.0", + "@aws-cdk/aws-elasticloadbalancingv2-targets": "0.0.0", + "@aws-cdk/aws-elasticsearch": "0.0.0", + "@aws-cdk/aws-emr": "0.0.0", + "@aws-cdk/aws-events": "0.0.0", + "@aws-cdk/aws-events-targets": "0.0.0", + "@aws-cdk/aws-eventschemas": "0.0.0", + "@aws-cdk/aws-fms": "0.0.0", + "@aws-cdk/aws-fsx": "0.0.0", + "@aws-cdk/aws-gamelift": "0.0.0", + "@aws-cdk/aws-globalaccelerator": "0.0.0", + "@aws-cdk/aws-glue": "0.0.0", + "@aws-cdk/aws-greengrass": "0.0.0", + "@aws-cdk/aws-guardduty": "0.0.0", + "@aws-cdk/aws-iam": "0.0.0", + "@aws-cdk/aws-imagebuilder": "0.0.0", + "@aws-cdk/aws-inspector": "0.0.0", + "@aws-cdk/aws-iot": "0.0.0", + "@aws-cdk/aws-iot1click": "0.0.0", + "@aws-cdk/aws-iotanalytics": "0.0.0", + "@aws-cdk/aws-iotevents": "0.0.0", + "@aws-cdk/aws-iotthingsgraph": "0.0.0", + "@aws-cdk/aws-kinesis": "0.0.0", + "@aws-cdk/aws-kinesisanalytics": "0.0.0", + "@aws-cdk/aws-kinesisfirehose": "0.0.0", + "@aws-cdk/aws-kms": "0.0.0", + "@aws-cdk/aws-lakeformation": "0.0.0", + "@aws-cdk/aws-lambda": "0.0.0", + "@aws-cdk/aws-lambda-destinations": "0.0.0", + "@aws-cdk/aws-lambda-event-sources": "0.0.0", + "@aws-cdk/aws-lambda-nodejs": "0.0.0", + "@aws-cdk/aws-lambda-python": "0.0.0", + "@aws-cdk/aws-logs": "0.0.0", + "@aws-cdk/aws-logs-destinations": "0.0.0", + "@aws-cdk/aws-macie": "0.0.0", + "@aws-cdk/aws-managedblockchain": "0.0.0", + "@aws-cdk/aws-mediaconvert": "0.0.0", + "@aws-cdk/aws-medialive": "0.0.0", + "@aws-cdk/aws-mediastore": "0.0.0", + "@aws-cdk/aws-msk": "0.0.0", + "@aws-cdk/aws-neptune": "0.0.0", + "@aws-cdk/aws-networkmanager": "0.0.0", + "@aws-cdk/aws-opsworks": "0.0.0", + "@aws-cdk/aws-opsworkscm": "0.0.0", + "@aws-cdk/aws-pinpoint": "0.0.0", + "@aws-cdk/aws-pinpointemail": "0.0.0", + "@aws-cdk/aws-qldb": "0.0.0", + "@aws-cdk/aws-ram": "0.0.0", + "@aws-cdk/aws-rds": "0.0.0", + "@aws-cdk/aws-redshift": "0.0.0", + "@aws-cdk/aws-resourcegroups": "0.0.0", + "@aws-cdk/aws-robomaker": "0.0.0", + "@aws-cdk/aws-route53": "0.0.0", + "@aws-cdk/aws-route53-patterns": "0.0.0", + "@aws-cdk/aws-route53-targets": "0.0.0", + "@aws-cdk/aws-route53resolver": "0.0.0", + "@aws-cdk/aws-s3": "0.0.0", + "@aws-cdk/aws-s3-assets": "0.0.0", + "@aws-cdk/aws-s3-deployment": "0.0.0", + "@aws-cdk/aws-s3-notifications": "0.0.0", + "@aws-cdk/aws-sagemaker": "0.0.0", + "@aws-cdk/aws-sam": "0.0.0", + "@aws-cdk/aws-sdb": "0.0.0", + "@aws-cdk/aws-secretsmanager": "0.0.0", + "@aws-cdk/aws-securityhub": "0.0.0", + "@aws-cdk/aws-servicecatalog": "0.0.0", + "@aws-cdk/aws-servicediscovery": "0.0.0", + "@aws-cdk/aws-ses": "0.0.0", + "@aws-cdk/aws-ses-actions": "0.0.0", + "@aws-cdk/aws-sns": "0.0.0", + "@aws-cdk/aws-sns-subscriptions": "0.0.0", + "@aws-cdk/aws-sqs": "0.0.0", + "@aws-cdk/aws-ssm": "0.0.0", + "@aws-cdk/aws-stepfunctions": "0.0.0", + "@aws-cdk/aws-stepfunctions-tasks": "0.0.0", + "@aws-cdk/aws-synthetics": "0.0.0", + "@aws-cdk/aws-transfer": "0.0.0", + "@aws-cdk/aws-waf": "0.0.0", + "@aws-cdk/aws-wafregional": "0.0.0", + "@aws-cdk/aws-wafv2": "0.0.0", + "@aws-cdk/aws-workspaces": "0.0.0", + "@aws-cdk/cloud-assembly-schema": "0.0.0", + "@aws-cdk/cloudformation-include": "0.0.0", + "@aws-cdk/core": "0.0.0", + "@aws-cdk/custom-resources": "0.0.0", + "@aws-cdk/cx-api": "0.0.0", + "@aws-cdk/pipelines": "0.0.0", + "@aws-cdk/region-info": "0.0.0", + "@types/fs-extra": "^8.1.1", + "@types/node": "^10.17.28", + "cdk-build-tools": "0.0.0", + "fs-extra": "^9.0.1", + "pkglint": "0.0.0", + "ts-node": "^8.10.2", + "typescript": "~3.8.3", + "ubergen": "0.0.0" + }, + "peerDependencies": { + "constructs": "^3.0.4" + }, + "homepage": "https://github.com/aws/aws-cdk", + "engines": { + "node": ">= 10.13.0 <13 || >=13.7.0" + }, + "keywords": [ + "aws", + "cdk" + ], + "awscdkio": { + "announce": false + } +} diff --git a/packages/aws-cdk/CONTRIBUTING.md b/packages/aws-cdk/CONTRIBUTING.md index 2ad4991eef00c..b5416406b7136 100644 --- a/packages/aws-cdk/CONTRIBUTING.md +++ b/packages/aws-cdk/CONTRIBUTING.md @@ -42,6 +42,22 @@ To run regression tests in the source tree: $ test/integ/test-cli-regression-against-current-code.sh [-t '...'] ``` +Integ tests can run in parallel across multiple regions. Set the `AWS_REGIONS` +environment variable to a comma-separate list of regions: + +``` +$ env AWS_REGIONS=us-west-2,us-west-1,eu-central-1,eu-west-2,eu-west-3 test/integ/run-against-repo test/integ/cli/test.sh +``` + +Elements from the list of region will be exclusively allocated to one test at +a time. The tests will run in parallel up to the concurrency limit imposed by +jest (default of 5, controllable by `--maxConcurrency`) and the available +number of elements. Regions may be repeated in the list in which case more +than one test will run at a time in that region. + +If `AWS_REGIONS` is not set, all tests will sequentially run in the one +region set in `AWS_REGION`. + ### CLI integration tests CLI tests will exercise a number of common CLI scenarios, and deploy actual diff --git a/packages/aws-cdk/bin/cdk.ts b/packages/aws-cdk/bin/cdk.ts index e03dc68db94d0..2bade13a3f6e8 100644 --- a/packages/aws-cdk/bin/cdk.ts +++ b/packages/aws-cdk/bin/cdk.ts @@ -87,7 +87,8 @@ async function parseCommandLineArguments() { .option('require-approval', { type: 'string', choices: [RequireApproval.Never, RequireApproval.AnyChange, RequireApproval.Broadening], desc: 'What security-sensitive changes need manual approval' }) .option('ci', { type: 'boolean', desc: 'Force CI detection', default: process.env.CI !== undefined }) .option('notification-arns', { type: 'array', desc: 'ARNs of SNS topics that CloudFormation will notify with stack related events', nargs: 1, requiresArg: true }) - .option('tags', { type: 'array', alias: 't', desc: 'Tags to add to the stack (KEY=VALUE), overrides tags from Cloud Assembly', nargs: 1, requiresArg: true }) + // @deprecated(v2) -- tags are part of the Cloud Assembly and tags specified here will be overwritten on the next deployment + .option('tags', { type: 'array', alias: 't', desc: 'Tags to add to the stack (KEY=VALUE), overrides tags from Cloud Assembly (deprecated)', nargs: 1, requiresArg: true }) .option('execute', { type: 'boolean', desc: 'Whether to execute ChangeSet (--no-execute will NOT execute the ChangeSet)', default: true }) .option('force', { alias: 'f', type: 'boolean', desc: 'Always deploy stack even if templates are identical', default: false }) .option('parameters', { type: 'array', desc: 'Additional parameters passed to CloudFormation at deploy time (STACK:KEY=VALUE)', nargs: 1, requiresArg: true, default: {} }) @@ -224,13 +225,14 @@ async function initCommandLine() { return await cli.list(args.STACKS, { long: args.long }); case 'diff': + const enableDiffNoFail = isFeatureEnabled(configuration, cxapi.ENABLE_DIFF_NO_FAIL); return await cli.diff({ stackNames: args.STACKS, exclusively: args.exclusively, templatePath: args.template, strict: args.strict, contextLines: args.contextLines, - fail: args.fail || !configuration.context.get(cxapi.ENABLE_DIFF_NO_FAIL), + fail: args.fail || !enableDiffNoFail, }); case 'bootstrap': @@ -241,13 +243,14 @@ async function initCommandLine() { // anticipation of flipping the switch, in user messaging we still call it // "new" bootstrapping. let source: BootstrapSource = { source: 'legacy' }; + const newStyleStackSynthesis = isFeatureEnabled(configuration, cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT); if (args.template) { print(`Using bootstrapping template from ${args.template}`); source = { source: 'custom', templateFile: args.template }; } else if (process.env.CDK_NEW_BOOTSTRAP) { print('CDK_NEW_BOOTSTRAP set, using new-style bootstrapping'); source = { source: 'default' }; - } else if (configuration.context.get(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT)) { + } else if (newStyleStackSynthesis) { print(`'${cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT}' context set, using new-style bootstrapping`); source = { source: 'default' }; } @@ -336,6 +339,10 @@ async function initCommandLine() { } } +function isFeatureEnabled(configuration: Configuration, featureFlag: string) { + return configuration.context.get(featureFlag) ?? cxapi.futureFlagDefault(featureFlag); +} + /** * Translate a Yargs input array to something that makes more sense in a programming language * model (telling the difference between absence and an empty array) diff --git a/packages/aws-cdk/lib/api/aws-auth/aws-sdk-inifile.ts b/packages/aws-cdk/lib/api/aws-auth/aws-sdk-inifile.ts index af12393fff381..012bb80470039 100644 --- a/packages/aws-cdk/lib/api/aws-auth/aws-sdk-inifile.ts +++ b/packages/aws-cdk/lib/api/aws-auth/aws-sdk-inifile.ts @@ -20,7 +20,11 @@ import * as AWS from 'aws-sdk'; * `getProfilesFromSharedConfig` overwrites ALL `config` data with `credentials` * data, so we also need to do extra work to fish the `region` out of the config. * + * 3. The 'credential_source' option is not supported. Meaning credentials + * for assume-role cannot be fetched using EC2/ESC metadata. + * * See https://github.com/aws/aws-sdk-js/issues/3418 for all the gory details. + * See https://github.com/aws/aws-sdk-js/issues/1916 for some more glory details. */ export class PatchedSharedIniFileCredentials extends AWS.SharedIniFileCredentials { declare private profile: string; @@ -53,40 +57,30 @@ export class PatchedSharedIniFileCredentials extends AWS.SharedIniFileCredential var roleSessionName = roleProfile.role_session_name; var externalId = roleProfile.external_id; var mfaSerial = roleProfile.mfa_serial; - var sourceProfileName = roleProfile.source_profile; - - if (!sourceProfileName) { - throw (AWS as any).util.error( - new Error('source_profile is not set using profile ' + this.profile), - { code: 'SharedIniFileCredentialsProviderFailure' }, - ); - } + var sourceProfile = roleProfile.source_profile; + var credentialSource = roleProfile.credential_source; - var sourceProfileExistanceTest = creds[sourceProfileName]; + const credentialError = (AWS as any).util.error( + new Error(`When using 'role_arn' in profile ('${this.profile}'), you must also configure exactly one of 'source_profile' or 'credential_source'`), + { code: 'SharedIniFileCredentialsProviderFailure' }, + ); - if (typeof sourceProfileExistanceTest !== 'object') { - throw (AWS as any).util.error( - new Error('source_profile ' + sourceProfileName + ' using profile ' - + this.profile + ' does not exist'), - { code: 'SharedIniFileCredentialsProviderFailure' }, - ); + if (sourceProfile && credentialSource) { + throw credentialError; } - var sourceCredentials = new AWS.SharedIniFileCredentials( - (AWS as any).util.merge(this.options || {}, { - profile: sourceProfileName, - preferStaticCredentials: true, - }), - ); + if (!sourceProfile && !credentialSource) { + throw credentialError; + } - // --------- THIS IS NEW ---------------------- const profiles = loadProfilesProper(this.filename); const region = profiles[this.profile]?.region ?? profiles.default?.region ?? 'us-east-1'; - // --------- /THIS IS NEW ---------------------- + + const stsCreds = sourceProfile ? this.sourceProfileCredentials(sourceProfile, creds) : this.credentialSourceCredentials(credentialSource); this.roleArn = roleArn; var sts = new AWS.STS({ - credentials: sourceCredentials, + credentials: stsCreds, region, httpOptions: this.httpOptions, }); @@ -126,6 +120,49 @@ export class PatchedSharedIniFileCredentials extends AWS.SharedIniFileCredential sts.assumeRole(roleParams, callback); } + + private sourceProfileCredentials(sourceProfile: string, profiles: Record>) { + + var sourceProfileExistanceTest = profiles[sourceProfile]; + + if (typeof sourceProfileExistanceTest !== 'object') { + throw (AWS as any).util.error( + new Error('source_profile ' + sourceProfile + ' using profile ' + + this.profile + ' does not exist'), + { code: 'SharedIniFileCredentialsProviderFailure' }, + ); + } + + return new AWS.SharedIniFileCredentials( + (AWS as any).util.merge(this.options || {}, { + profile: sourceProfile, + preferStaticCredentials: true, + }), + ); + + } + + // the aws-sdk for js does not support 'credential_source' (https://github.com/aws/aws-sdk-js/issues/1916) + // so unfortunately we need to implement this ourselves. + private credentialSourceCredentials(sourceCredential: string) { + + // see https://docs.aws.amazon.com/credref/latest/refdocs/setting-global-credential_source.html + switch (sourceCredential) { + case 'Environment': { + return new AWS.EnvironmentCredentials('AWS'); + } + case 'Ec2InstanceMetadata': { + return new AWS.EC2MetadataCredentials(); + } + case 'EcsContainer': { + return new AWS.ECSCredentials(); + } + default: { + throw new Error(`credential_source ${sourceCredential} in profile ${this.profile} is unsupported. choose one of [Environment, Ec2InstanceMetadata, EcsContainer]`); + } + } + + } } /** diff --git a/packages/aws-cdk/lib/api/bootstrap/bootstrap-template.yaml b/packages/aws-cdk/lib/api/bootstrap/bootstrap-template.yaml index 8e9d40462f3e3..feeee078030d6 100644 --- a/packages/aws-cdk/lib/api/bootstrap/bootstrap-template.yaml +++ b/packages/aws-cdk/lib/api/bootstrap/bootstrap-template.yaml @@ -341,6 +341,7 @@ Resources: - cloudformation:DescribeStackEvents - cloudformation:GetTemplate - cloudformation:DeleteStack + - cloudformation:UpdateTerminationProtection - sts:GetCallerIdentity Resource: "*" Effect: Allow diff --git a/packages/aws-cdk/lib/api/cxapp/cloud-executable.ts b/packages/aws-cdk/lib/api/cxapp/cloud-executable.ts index 9a6bd42593b44..859bf10c6e2e6 100644 --- a/packages/aws-cdk/lib/api/cxapp/cloud-executable.ts +++ b/packages/aws-cdk/lib/api/cxapp/cloud-executable.ts @@ -1,3 +1,4 @@ +import { promises as fs } from 'fs'; import * as cxapi from '@aws-cdk/cx-api'; import { RegionInfo } from '@aws-cdk/region-info'; import * as contextproviders from '../../context-providers'; @@ -89,54 +90,76 @@ export class CloudExecutable { } } - if (trackVersions && assembly.runtime) { - const modules = formatModules(assembly.runtime); - for (const stack of assembly.stacks) { - if (!stack.template.Resources) { - stack.template.Resources = {}; - } - const resourcePresent = stack.environment.region === cxapi.UNKNOWN_REGION - || RegionInfo.get(stack.environment.region).cdkMetadataResourceAvailable; - if (resourcePresent) { - if (!stack.template.Resources.CDKMetadata) { - stack.template.Resources.CDKMetadata = { - Type: 'AWS::CDK::Metadata', - Properties: { - Modules: modules, - }, - }; - if (stack.environment.region === cxapi.UNKNOWN_REGION) { - stack.template.Conditions = stack.template.Conditions || {}; - const condName = 'CDKMetadataAvailable'; - if (!stack.template.Conditions[condName]) { - stack.template.Conditions[condName] = _makeCdkMetadataAvailableCondition(); - stack.template.Resources.CDKMetadata.Condition = condName; - } else { - warning(`The stack ${stack.id} already includes a ${condName} condition`); - } - } - } else { - warning(`The stack ${stack.id} already includes a CDKMetadata resource`); - } - } - } + if (trackVersions) { + // @deprecated(v2): remove this 'if' block and all code referenced by it. + // This should honestly not be done here. The framework + // should (and will, shortly) synthesize this information directly into + // the template. However, in order to support old framework versions + // that don't synthesize this info yet, we can only remove this code + // once we break backwards compatibility. + await this.addMetadataResource(assembly); } return new CloudAssembly(assembly); + } + } + + /** + * Modify the templates in the assembly in-place to add metadata resource declarations + */ + private async addMetadataResource(rootAssembly: cxapi.CloudAssembly) { + if (!rootAssembly.runtime) { return; } + + const modules = formatModules(rootAssembly.runtime); + await processAssembly(rootAssembly); + + async function processAssembly(assembly: cxapi.CloudAssembly) { + for (const stack of assembly.stacks) { + await processStack(stack); + } + for (const nested of assembly.nestedAssemblies) { + await processAssembly(nested.nestedAssembly); + } + } - function formatModules(runtime: cxapi.RuntimeInfo): string { - const modules = new Array(); + async function processStack(stack: cxapi.CloudFormationStackArtifact) { + const resourcePresent = stack.environment.region === cxapi.UNKNOWN_REGION + || RegionInfo.get(stack.environment.region).cdkMetadataResourceAvailable; + if (!resourcePresent) { return; } - // inject toolkit version to list of modules - // eslint-disable-next-line @typescript-eslint/no-require-imports - const toolkitVersion = require('../../../package.json').version; - modules.push(`aws-cdk=${toolkitVersion}`); + if (!stack.template.Resources) { + stack.template.Resources = {}; + } + if (stack.template.Resources.CDKMetadata) { + warning(`The stack ${stack.id} already includes a CDKMetadata resource`); + return; + } - for (const key of Object.keys(runtime.libraries).sort()) { - modules.push(`${key}=${runtime.libraries[key]}`); + stack.template.Resources.CDKMetadata = { + Type: 'AWS::CDK::Metadata', + Properties: { + Modules: modules, + }, + }; + + if (stack.environment.region === cxapi.UNKNOWN_REGION) { + stack.template.Conditions = stack.template.Conditions || {}; + const condName = 'CDKMetadataAvailable'; + if (!stack.template.Conditions[condName]) { + stack.template.Conditions[condName] = _makeCdkMetadataAvailableCondition(); + stack.template.Resources.CDKMetadata.Condition = condName; + } else { + warning(`The stack ${stack.id} already includes a ${condName} condition`); } - return modules.join(','); } + + // The template has changed in-memory, but the file on disk remains unchanged so far. + // The CLI *might* later on deploy the in-memory version (if it's <50kB) or use the + // on-disk version (if it's >50kB). + // + // Be sure to flush the changes we just made back to disk. The on-disk format is always + // JSON. + await fs.writeFile(stack.templateFullPath, JSON.stringify(stack.template, undefined, 2), { encoding: 'utf-8' }); } } } @@ -185,4 +208,18 @@ function _inGroupsOf(array: T[], maxGroup: number): T[][] { result.push(array.slice(i, i + maxGroup)); } return result; -} \ No newline at end of file +} + +function formatModules(runtime: cxapi.RuntimeInfo): string { + const modules = new Array(); + + // inject toolkit version to list of modules + // eslint-disable-next-line @typescript-eslint/no-require-imports + const toolkitVersion = require('../../../package.json').version; + modules.push(`aws-cdk=${toolkitVersion}`); + + for (const key of Object.keys(runtime.libraries).sort()) { + modules.push(`${key}=${runtime.libraries[key]}`); + } + return modules.join(','); +} diff --git a/packages/aws-cdk/lib/init-templates/lib/typescript/lib/index.template.ts b/packages/aws-cdk/lib/init-templates/lib/typescript/lib/index.template.ts index 00b081cfb75df..d05d444fc8f09 100644 --- a/packages/aws-cdk/lib/init-templates/lib/typescript/lib/index.template.ts +++ b/packages/aws-cdk/lib/init-templates/lib/typescript/lib/index.template.ts @@ -1,32 +1,14 @@ -import * as sns from '@aws-cdk/aws-sns'; -import * as subs from '@aws-cdk/aws-sns-subscriptions'; -import * as sqs from '@aws-cdk/aws-sqs'; import * as cdk from '@aws-cdk/core'; export interface %name.PascalCased%Props { - /** - * The visibility timeout to be configured on the SQS Queue, in seconds. - * - * @default Duration.seconds(300) - */ - visibilityTimeout?: cdk.Duration; + // Define construct properties here } export class %name.PascalCased% extends cdk.Construct { - /** @returns the ARN of the SQS queue */ - public readonly queueArn: string; constructor(scope: cdk.Construct, id: string, props: %name.PascalCased%Props = {}) { super(scope, id); - const queue = new sqs.Queue(this, '%name.PascalCased%Queue', { - visibilityTimeout: props.visibilityTimeout || cdk.Duration.seconds(300) - }); - - const topic = new sns.Topic(this, '%name.PascalCased%Topic'); - - topic.addSubscription(new subs.SqsSubscription(queue)); - - this.queueArn = queue.queueArn; + // Define construct contents here } } diff --git a/packages/aws-cdk/lib/init-templates/lib/typescript/package.template.json b/packages/aws-cdk/lib/init-templates/lib/typescript/package.template.json index 498dd02b329a2..8d9ef219c74ca 100644 --- a/packages/aws-cdk/lib/init-templates/lib/typescript/package.template.json +++ b/packages/aws-cdk/lib/init-templates/lib/typescript/package.template.json @@ -20,9 +20,6 @@ "@aws-cdk/core": "%cdk-version%" }, "dependencies": { - "@aws-cdk/aws-sns": "%cdk-version%", - "@aws-cdk/aws-sns-subscriptions": "%cdk-version%", - "@aws-cdk/aws-sqs": "%cdk-version%", "@aws-cdk/core": "%cdk-version%" } } diff --git a/packages/aws-cdk/lib/init-templates/lib/typescript/test/%name%.test.template.ts b/packages/aws-cdk/lib/init-templates/lib/typescript/test/%name%.test.template.ts index 449eafcce5896..7dbd310b088f7 100644 --- a/packages/aws-cdk/lib/init-templates/lib/typescript/test/%name%.test.template.ts +++ b/packages/aws-cdk/lib/init-templates/lib/typescript/test/%name%.test.template.ts @@ -1,21 +1,15 @@ -import { expect as expectCDK, haveResource } from '@aws-cdk/assert'; +import { expect as expectCDK, countResources } from '@aws-cdk/assert'; import * as cdk from '@aws-cdk/core'; import * as %name.PascalCased% from '../lib/index'; -test('SQS Queue Created', () => { - const app = new cdk.App(); - const stack = new cdk.Stack(app, "TestStack"); - // WHEN - new %name.PascalCased%.%name.PascalCased%(stack, 'MyTestConstruct'); - // THEN - expectCDK(stack).to(haveResource("AWS::SQS::Queue")); -}); - +/* + * Example test + */ test('SNS Topic Created', () => { const app = new cdk.App(); const stack = new cdk.Stack(app, "TestStack"); // WHEN new %name.PascalCased%.%name.PascalCased%(stack, 'MyTestConstruct'); // THEN - expectCDK(stack).to(haveResource("AWS::SNS::Topic")); + expectCDK(stack).to(countResources("AWS::SNS::Topic",0)); }); diff --git a/packages/aws-cdk/test/account-cache.test.ts b/packages/aws-cdk/test/account-cache.test.ts index c9b5682f33124..f8860046df584 100644 --- a/packages/aws-cdk/test/account-cache.test.ts +++ b/packages/aws-cdk/test/account-cache.test.ts @@ -1,6 +1,7 @@ import * as path from 'path'; import * as fs from 'fs-extra'; import { AccountAccessKeyCache } from '../lib/api/aws-auth/account-cache'; +import { withMocked } from './util'; async function makeCache() { const dir = await fs.mkdtemp('/tmp/account-cache-test'); @@ -110,28 +111,3 @@ test(`cache is nuked if it exceeds ${AccountAccessKeyCache.MAX_ENTRIES} entries` await nukeCache(cacheDir); } }); - -function withMocked(obj: A, key: K, block: (fn: jest.Mocked[K]) => B): B { - const original = obj[key]; - const mockFn = jest.fn(); - (obj as any)[key] = mockFn; - - let ret; - try { - ret = block(mockFn as any); - } catch (e) { - obj[key] = original; - throw e; - } - - if (!isPromise(ret)) { - obj[key] = original; - return ret; - } - - return ret.finally(() => { obj[key] = original; }) as any; -} - -function isPromise(object: any): object is Promise { - return Promise.resolve(object) === object; -} \ No newline at end of file diff --git a/packages/aws-cdk/test/api/sdk-provider.test.ts b/packages/aws-cdk/test/api/sdk-provider.test.ts index e72225bbf82ee..3865925a7299a 100644 --- a/packages/aws-cdk/test/api/sdk-provider.test.ts +++ b/packages/aws-cdk/test/api/sdk-provider.test.ts @@ -7,6 +7,7 @@ import { PluginHost } from '../../lib'; import { ISDK, Mode, SdkProvider } from '../../lib/api/aws-auth'; import * as logging from '../../lib/logging'; import * as bockfs from '../bockfs'; +import { withMocked } from '../util'; // Mock promptly prompt to test MFA support jest.mock('promptly', () => ({ @@ -318,6 +319,160 @@ test('can assume role without a [default] profile', async () => { expect(account?.accountId).toEqual(`${uid}the_account_#`); }); +test('can assume role with ecs credentials', async () => { + + return withMocked(AWS.ECSCredentials.prototype, 'needsRefresh', async (needsRefresh) => { + + // GIVEN + bockfs({ + '/home/me/.bxt/credentials': dedent(` + `), + '/home/me/.bxt/config': dedent(` + [profile ecs] + role_arn=arn:aws:iam::12356789012:role/Assumable + credential_source = EcsContainer + `), + }); + + // Set environment variables that we want + process.env.AWS_CONFIG_FILE = bockfs.path('/home/me/.bxt/config'); + process.env.AWS_SHARED_CREDENTIALS_FILE = bockfs.path('/home/me/.bxt/credentials'); + + // WHEN + const provider = await SdkProvider.withAwsCliCompatibleDefaults({ + ...defaultCredOptions, + profile: 'ecs', + httpOptions: { + proxyAddress: 'http://DOESNTMATTER/', + }, + }); + + await provider.defaultAccount(); + + // THEN + // expect(account?.accountId).toEqual(`${uid}the_account_#`); + expect(needsRefresh).toHaveBeenCalled(); + + }); + +}); + +test('can assume role with ec2 credentials', async () => { + + return withMocked(AWS.EC2MetadataCredentials.prototype, 'needsRefresh', async (needsRefresh) => { + + // GIVEN + bockfs({ + '/home/me/.bxt/credentials': dedent(` + `), + '/home/me/.bxt/config': dedent(` + [profile ecs] + role_arn=arn:aws:iam::12356789012:role/Assumable + credential_source = Ec2InstanceMetadata + `), + }); + + // Set environment variables that we want + process.env.AWS_CONFIG_FILE = bockfs.path('/home/me/.bxt/config'); + process.env.AWS_SHARED_CREDENTIALS_FILE = bockfs.path('/home/me/.bxt/credentials'); + + // WHEN + const provider = await SdkProvider.withAwsCliCompatibleDefaults({ + ...defaultCredOptions, + profile: 'ecs', + httpOptions: { + proxyAddress: 'http://DOESNTMATTER/', + }, + }); + + await provider.defaultAccount(); + + // THEN + // expect(account?.accountId).toEqual(`${uid}the_account_#`); + expect(needsRefresh).toHaveBeenCalled(); + + }); + +}); + +test('can assume role with env credentials', async () => { + + return withMocked(AWS.EnvironmentCredentials.prototype, 'needsRefresh', async (needsRefresh) => { + + // GIVEN + bockfs({ + '/home/me/.bxt/credentials': dedent(` + `), + '/home/me/.bxt/config': dedent(` + [profile ecs] + role_arn=arn:aws:iam::12356789012:role/Assumable + credential_source = Environment + `), + }); + + // Set environment variables that we want + process.env.AWS_CONFIG_FILE = bockfs.path('/home/me/.bxt/config'); + process.env.AWS_SHARED_CREDENTIALS_FILE = bockfs.path('/home/me/.bxt/credentials'); + + // WHEN + const provider = await SdkProvider.withAwsCliCompatibleDefaults({ + ...defaultCredOptions, + profile: 'ecs', + httpOptions: { + proxyAddress: 'http://DOESNTMATTER/', + }, + }); + + await provider.defaultAccount(); + + // THEN + // expect(account?.accountId).toEqual(`${uid}the_account_#`); + expect(needsRefresh).toHaveBeenCalled(); + + }); + +}); + +test('assume fails with unsupported credential_source', async () => { + // GIVEN + bockfs({ + '/home/me/.bxt/config': dedent(` + [profile assumable] + role_arn=arn:aws:iam::12356789012:role/Assumable + credential_source = unsupported + `), + }); + + SDKMock.mock('STS', 'assumeRole', (_request: AWS.STS.AssumeRoleRequest, cb: AwsCallback) => { + return cb(null, { + Credentials: { + AccessKeyId: `${uid}access`, // Needs UID in here otherwise key will be cached + Expiration: new Date(Date.now() + 10000), + SecretAccessKey: 'b', + SessionToken: 'c', + }, + }); + }); + + // Set environment variables that we want + process.env.AWS_CONFIG_FILE = bockfs.path('/home/me/.bxt/config'); + process.env.AWS_SHARED_CREDENTIALS_FILE = bockfs.path('/home/me/.bxt/credentials'); + + // WHEN + const provider = await SdkProvider.withAwsCliCompatibleDefaults({ + ...defaultCredOptions, + profile: 'assumable', + httpOptions: { + proxyAddress: 'http://DOESNTMATTER/', + }, + }); + + const account = await provider.defaultAccount(); + + // THEN + expect(account?.accountId).toEqual(undefined); +}); + /** * Strip shared whitespace from the start of lines */ diff --git a/packages/aws-cdk/test/integ/cli-regression-patches/v1.62.0/NOTES.md b/packages/aws-cdk/test/integ/cli-regression-patches/v1.62.0/NOTES.md new file mode 100644 index 0000000000000..a6fe21e024e72 --- /dev/null +++ b/packages/aws-cdk/test/integ/cli-regression-patches/v1.62.0/NOTES.md @@ -0,0 +1,2 @@ +Tests now take longer than hour and cause token expiration. +Added creddentials refreshing in in aws-helper but the older tests don't have it. \ No newline at end of file diff --git a/packages/aws-cdk/test/integ/cli-regression-patches/v1.62.0/aws-helpers.js b/packages/aws-cdk/test/integ/cli-regression-patches/v1.62.0/aws-helpers.js new file mode 100644 index 0000000000000..038f1f0289fed --- /dev/null +++ b/packages/aws-cdk/test/integ/cli-regression-patches/v1.62.0/aws-helpers.js @@ -0,0 +1,245 @@ +"use strict"; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.outputFromStack = exports.deleteBucket = exports.deleteImageRepository = exports.emptyBucket = exports.sleep = exports.retry = exports.isBucketMissingError = exports.isStackMissingError = exports.stackStatus = exports.deleteStacks = exports.sts = exports.lambda = exports.iam = exports.sns = exports.ecr = exports.s3 = exports.cloudFormation = exports.testEnv = void 0; +const AWS = require("aws-sdk"); +const cdk_helpers_1 = require("./cdk-helpers"); + +function chainableCredentials(region) { + const profileName = process.env.AWS_PROFILE; + if (process.env.CODEBUILD_BUILD_ARN && profileName) { + // in codebuild we must assume the role that the cdk uses + // otherwise credentials will just be picked up by the normal sdk + // heuristics and expire after an hour. + // can't use '~' since the SDK doesn't seem to expand it...? + const configPath = `${process.env.HOME}/.aws/config`; + const ini = new AWS.IniLoader().loadFrom({ + filename: configPath, + isConfig: true, + }); + const profile = ini[profileName]; + if (!profile) { + throw new Error(`Profile '${profileName}' does not exist in config file (${configPath})`); + } + const arn = profile.role_arn; + const externalId = profile.external_id; + if (!arn) { + throw new Error(`role_arn does not exist in profile ${profileName}`); + } + if (!externalId) { + throw new Error(`external_id does not exist in profile ${externalId}`); + } + return new AWS.ChainableTemporaryCredentials({ + params: { + RoleArn: arn, + ExternalId: externalId, + RoleSessionName: 'integ-tests', + }, + stsConfig: { + region, + }, + masterCredentials: new AWS.ECSCredentials(), + }); + } + return undefined; +} + +exports.testEnv = async () => { + var _a, _b; + const region = (_b = (_a = process.env.AWS_REGION) !== null && _a !== void 0 ? _a : process.env.AWS_DEFAULT_REGION) !== null && _b !== void 0 ? _b : 'us-east-1'; + const sts = new AWS.STS({ + region: region, + credentials: chainableCredentials(region), + maxRetries: 8, + retryDelayOptions: { base: 500 }, + }); + const response = await sts.getCallerIdentity().promise(); + const ret = { + account: response.Account, + region, + }; + exports.testEnv = () => Promise.resolve(ret); + return ret; +}; + +exports.cloudFormation = makeAwsCaller(AWS.CloudFormation); +exports.s3 = makeAwsCaller(AWS.S3); +exports.ecr = makeAwsCaller(AWS.ECR); +exports.sns = makeAwsCaller(AWS.SNS); +exports.iam = makeAwsCaller(AWS.IAM); +exports.lambda = makeAwsCaller(AWS.Lambda); +exports.sts = makeAwsCaller(AWS.STS); +/** + * Perform an AWS call from nothing + * + * Create the correct client, do the call and resole the promise(). + */ +async function awsCall(ctor, call, request) { + const env = await exports.testEnv(); + const cfn = new ctor({ + region: env.region, + credentials: chainableCredentials(env.region), + maxRetries: 6, + retryDelayOptions: { + base: 500, + }, + }); + const response = cfn[call](request); + try { + return await response.promise(); + } + catch (e) { + const newErr = new Error(`${call}(${JSON.stringify(request)}): ${e.message}`); + newErr.code = e.code; + throw newErr; + } +} +/** + * Factory function to invoke 'awsCall' for specific services. + * + * Not strictly necessary but calling this replaces a whole bunch of annoying generics you otherwise have to type: + * + * ```ts + * export function cloudFormation< + * C extends keyof ServiceCalls, + * >(call: C, request: First[C]>): Promise[C]>> { + * return awsCall(AWS.CloudFormation, call, request); + * } + * ``` + */ +function makeAwsCaller(ctor) { + return (call, request) => { + return awsCall(ctor, call, request); + }; +} +async function deleteStacks(...stackNames) { + if (stackNames.length === 0) { + return; + } + for (const stackName of stackNames) { + await exports.cloudFormation('updateTerminationProtection', { + EnableTerminationProtection: false, + StackName: stackName, + }); + await exports.cloudFormation('deleteStack', { + StackName: stackName, + }); + } + await retry(`Deleting ${stackNames}`, retry.forSeconds(600), async () => { + for (const stackName of stackNames) { + const status = await stackStatus(stackName); + if (status !== undefined && status.endsWith('_FAILED')) { + throw retry.abort(new Error(`'${stackName}' is in state '${status}'`)); + } + if (status !== undefined) { + throw new Error(`Delete of '${stackName}' not complete yet`); + } + } + }); +} +exports.deleteStacks = deleteStacks; +async function stackStatus(stackName) { + var _a; + try { + return (_a = (await exports.cloudFormation('describeStacks', { StackName: stackName })).Stacks) === null || _a === void 0 ? void 0 : _a[0].StackStatus; + } + catch (e) { + if (isStackMissingError(e)) { + return undefined; + } + throw e; + } +} +exports.stackStatus = stackStatus; +function isStackMissingError(e) { + return e.message.indexOf('does not exist') > -1; +} +exports.isStackMissingError = isStackMissingError; +function isBucketMissingError(e) { + return e.message.indexOf('does not exist') > -1; +} +exports.isBucketMissingError = isBucketMissingError; +/** + * Retry an async operation until a deadline is hit. + * + * Use `retry.forSeconds()` to construct a deadline relative to right now. + * + * Exceptions will cause the operation to retry. Use `retry.abort` to annotate an exception + * to stop the retry and end in a failure. + */ +async function retry(operation, deadline, block) { + let i = 0; + cdk_helpers_1.log(`💈 ${operation}`); + while (true) { + try { + i++; + const ret = await block(); + cdk_helpers_1.log(`💈 ${operation}: succeeded after ${i} attempts`); + return ret; + } + catch (e) { + if (e.abort || Date.now() > deadline.getTime()) { + throw new Error(`${operation}: did not succeed after ${i} attempts: ${e}`); + } + cdk_helpers_1.log(`⏳ ${operation} (${e.message})`); + await sleep(5000); + } + } +} +exports.retry = retry; +/** + * Make a deadline for the `retry` function relative to the current time. + */ +retry.forSeconds = (seconds) => { + return new Date(Date.now() + seconds * 1000); +}; +/** + * Annotate an error to stop the retrying + */ +retry.abort = (e) => { + e.abort = true; + return e; +}; +async function sleep(ms) { + return new Promise(ok => setTimeout(ok, ms)); +} +exports.sleep = sleep; +async function emptyBucket(bucketName) { + const objects = await exports.s3('listObjects', { Bucket: bucketName }); + const deletes = (objects.Contents || []).map(obj => obj.Key || '').filter(d => !!d); + if (deletes.length === 0) { + return Promise.resolve(); + } + return exports.s3('deleteObjects', { + Bucket: bucketName, + Delete: { + Objects: deletes.map(d => ({ Key: d })), + Quiet: false, + }, + }); +} +exports.emptyBucket = emptyBucket; +async function deleteImageRepository(repositoryName) { + await exports.ecr('deleteRepository', { repositoryName, force: true }); +} +exports.deleteImageRepository = deleteImageRepository; +async function deleteBucket(bucketName) { + try { + await emptyBucket(bucketName); + await exports.s3('deleteBucket', { + Bucket: bucketName, + }); + } + catch (e) { + if (isBucketMissingError(e)) { + return; + } + throw e; + } +} +exports.deleteBucket = deleteBucket; +function outputFromStack(key, stack) { + var _a, _b; + return (_b = ((_a = stack.Outputs) !== null && _a !== void 0 ? _a : []).find(o => o.OutputKey === key)) === null || _b === void 0 ? void 0 : _b.OutputValue; +} +exports.outputFromStack = outputFromStack; +//# sourceMappingURL=data:application/json;base64,{"version":3,"file":"aws-helpers.js","sourceRoot":"","sources":["aws-helpers.ts"],"names":[],"mappings":";;;AAAA,+BAA+B;AAC/B,+CAAoC;AAOzB,QAAA,OAAO,GAAG,KAAK,IAAkB,EAAE;;IAC5C,MAAM,QAAQ,GAAG,MAAM,IAAI,GAAG,CAAC,GAAG,EAAE,CAAC,iBAAiB,EAAE,CAAC,OAAO,EAAE,CAAC;IAEnE,MAAM,GAAG,GAAQ;QACf,OAAO,EAAE,QAAQ,CAAC,OAAQ;QAC1B,MAAM,cAAE,OAAO,CAAC,GAAG,CAAC,UAAU,mCAAI,OAAO,CAAC,GAAG,CAAC,kBAAkB,mCAAI,WAAW;KAChF,CAAC;IAEF,eAAO,GAAG,GAAG,EAAE,CAAC,OAAO,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC;IACrC,OAAO,GAAG,CAAC;AACb,CAAC,CAAC;AAEW,QAAA,cAAc,GAAG,aAAa,CAAC,GAAG,CAAC,cAAc,CAAC,CAAC;AACnD,QAAA,EAAE,GAAG,aAAa,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC;AAC3B,QAAA,GAAG,GAAG,aAAa,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;AAC7B,QAAA,GAAG,GAAG,aAAa,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;AAC7B,QAAA,GAAG,GAAG,aAAa,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;AAC7B,QAAA,MAAM,GAAG,aAAa,CAAC,GAAG,CAAC,MAAM,CAAC,CAAC;AACnC,QAAA,GAAG,GAAG,aAAa,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;AAE1C;;;;GAIG;AACH,KAAK,UAAU,OAAO,CAGpB,IAA4B,EAAE,IAAO,EAAE,OAAkC;IACzE,MAAM,GAAG,GAAG,MAAM,eAAO,EAAE,CAAC;IAE5B,MAAM,WAAW,GAAG,OAAO,CAAC,GAAG,CAAC,WAAW,CAAC;IAC5C,IAAI,KAAK,GAAG,SAAS,CAAC;IACtB,IAAI,OAAO,CAAC,GAAG,CAAC,mBAAmB,IAAI,WAAW,EAAE;QAElD,yDAAyD;QACzD,iEAAiE;QACjE,uCAAuC;QAEvC,4DAA4D;QAC5D,MAAM,UAAU,GAAG,GAAG,OAAO,CAAC,GAAG,CAAC,IAAI,cAAc,CAAC;QACrD,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,SAAS,EAAE,CAAC,QAAQ,CAAC;YACvC,QAAQ,EAAE,UAAU;YACpB,QAAQ,EAAE,IAAI;SACf,CAAC,CAAC;QAEH,MAAM,OAAO,GAAG,GAAG,CAAC,WAAW,CAAC,CAAC;QAEjC,IAAI,CAAC,OAAO,EAAE;YACZ,MAAM,IAAI,KAAK,CAAC,YAAY,WAAW,oCAAoC,UAAU,GAAG,CAAC,CAAC;SAC3F;QAED,MAAM,GAAG,GAAG,OAAO,CAAC,QAAQ,CAAC;QAC7B,MAAM,UAAU,GAAG,OAAO,CAAC,WAAW,CAAC;QAEvC,IAAI,CAAC,GAAG,EAAE;YACR,MAAM,IAAI,KAAK,CAAC,sCAAsC,WAAW,EAAE,CAAC,CAAC;SACtE;QAED,IAAI,CAAC,UAAU,EAAE;YACf,MAAM,IAAI,KAAK,CAAC,yCAAyC,UAAU,EAAE,CAAC,CAAC;SACxE;QAED,KAAK,GAAG,IAAI,GAAG,CAAC,6BAA6B,CAAC;YAC5C,MAAM,EAAE;gBACN,OAAO,EAAE,GAAG;gBACZ,UAAU,EAAE,UAAU;gBACtB,eAAe,EAAE,aAAa;aAC/B;YACD,SAAS,EAAE;gBACT,MAAM,EAAE,GAAG,CAAC,MAAM;aACnB;YACD,iBAAiB,EAAE,IAAI,GAAG,CAAC,cAAc,EAAE;SAC5C,CAAC,CAAC;KAEJ;IAED,MAAM,GAAG,GAAG,IAAI,IAAI,CAAC;QACnB,MAAM,EAAE,GAAG,CAAC,MAAM;QAClB,WAAW,EAAE,KAAK;QAClB,UAAU,EAAE,CAAC;QACb,iBAAiB,EAAE;YACjB,IAAI,EAAE,GAAG;SACV;KACF,CAAC,CAAC;IAEH,MAAM,QAAQ,GAAG,GAAG,CAAC,IAAI,CAAC,CAAC,OAAO,CAAC,CAAC;IACpC,IAAI;QACF,OAAO,MAAM,QAAQ,CAAC,OAAO,EAAE,CAAC;KACjC;IAAC,OAAO,CAAC,EAAE;QACV,MAAM,MAAM,GAAG,IAAI,KAAK,CAAC,GAAG,IAAI,IAAI,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,OAAO,EAAE,CAAC,CAAC;QAC7E,MAAc,CAAC,IAAI,GAAG,CAAC,CAAC,IAAI,CAAC;QAC9B,MAAM,MAAM,CAAC;KACd;AACH,CAAC;AAED;;;;;;;;;;;;GAYG;AACH,SAAS,aAAa,CAAwB,IAA4B;IACxE,OAAO,CAAkC,IAAO,EAAE,OAAkC,EAAuC,EAAE;QAC3H,OAAO,OAAO,CAAC,IAAI,EAAE,IAAI,EAAE,OAAO,CAAC,CAAC;IACtC,CAAC,CAAC;AACJ,CAAC;AAyBM,KAAK,UAAU,YAAY,CAAC,GAAG,UAAoB;IACxD,IAAI,UAAU,CAAC,MAAM,KAAK,CAAC,EAAE;QAAE,OAAO;KAAE;IAExC,KAAK,MAAM,SAAS,IAAI,UAAU,EAAE;QAClC,MAAM,sBAAc,CAAC,6BAA6B,EAAE;YAClD,2BAA2B,EAAE,KAAK;YAClC,SAAS,EAAE,SAAS;SACrB,CAAC,CAAC;QACH,MAAM,sBAAc,CAAC,aAAa,EAAE;YAClC,SAAS,EAAE,SAAS;SACrB,CAAC,CAAC;KACJ;IAED,MAAM,KAAK,CAAC,YAAY,UAAU,EAAE,EAAE,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,EAAE,KAAK,IAAI,EAAE;QACtE,KAAK,MAAM,SAAS,IAAI,UAAU,EAAE;YAClC,MAAM,MAAM,GAAG,MAAM,WAAW,CAAC,SAAS,CAAC,CAAC;YAC5C,IAAI,MAAM,KAAK,SAAS,IAAI,MAAM,CAAC,QAAQ,CAAC,SAAS,CAAC,EAAE;gBACtD,MAAM,KAAK,CAAC,KAAK,CAAC,IAAI,KAAK,CAAC,IAAI,SAAS,kBAAkB,MAAM,GAAG,CAAC,CAAC,CAAC;aACxE;YACD,IAAI,MAAM,KAAK,SAAS,EAAE;gBACxB,MAAM,IAAI,KAAK,CAAC,cAAc,SAAS,oBAAoB,CAAC,CAAC;aAC9D;SACF;IACH,CAAC,CAAC,CAAC;AACL,CAAC;AAxBD,oCAwBC;AAEM,KAAK,UAAU,WAAW,CAAC,SAAiB;;IACjD,IAAI;QACF,aAAO,CAAC,MAAM,sBAAc,CAAC,gBAAgB,EAAE,EAAE,SAAS,EAAE,SAAS,EAAE,CAAC,CAAC,CAAC,MAAM,0CAAG,CAAC,EAAE,WAAW,CAAC;KACnG;IAAC,OAAO,CAAC,EAAE;QACV,IAAI,mBAAmB,CAAC,CAAC,CAAC,EAAE;YAAE,OAAO,SAAS,CAAC;SAAE;QACjD,MAAM,CAAC,CAAC;KACT;AACH,CAAC;AAPD,kCAOC;AAED,SAAgB,mBAAmB,CAAC,CAAQ;IAC1C,OAAO,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,gBAAgB,CAAC,GAAG,CAAC,CAAC,CAAC;AAClD,CAAC;AAFD,kDAEC;AAED,SAAgB,oBAAoB,CAAC,CAAQ;IAC3C,OAAO,CAAC,CAAC,OAAO,CAAC,OAAO,CAAC,gBAAgB,CAAC,GAAG,CAAC,CAAC,CAAC;AAClD,CAAC;AAFD,oDAEC;AAED;;;;;;;GAOG;AACI,KAAK,UAAU,KAAK,CAAI,SAAiB,EAAE,QAAc,EAAE,KAAuB;IACvF,IAAI,CAAC,GAAG,CAAC,CAAC;IACV,iBAAG,CAAC,MAAM,SAAS,EAAE,CAAC,CAAC;IACvB,OAAO,IAAI,EAAE;QACX,IAAI;YACF,CAAC,EAAE,CAAC;YACJ,MAAM,GAAG,GAAG,MAAM,KAAK,EAAE,CAAC;YAC1B,iBAAG,CAAC,MAAM,SAAS,qBAAqB,CAAC,WAAW,CAAC,CAAC;YACtD,OAAO,GAAG,CAAC;SACZ;QAAC,OAAO,CAAC,EAAE;YACV,IAAI,CAAC,CAAC,KAAK,IAAI,IAAI,CAAC,GAAG,EAAE,GAAG,QAAQ,CAAC,OAAO,EAAG,EAAE;gBAC/C,MAAM,IAAI,KAAK,CAAC,GAAG,SAAS,2BAA2B,CAAC,cAAc,CAAC,EAAE,CAAC,CAAC;aAC5E;YACD,iBAAG,CAAC,KAAK,SAAS,KAAK,CAAC,CAAC,OAAO,GAAG,CAAC,CAAC;YACrC,MAAM,KAAK,CAAC,IAAI,CAAC,CAAC;SACnB;KACF;AACH,CAAC;AAjBD,sBAiBC;AAED;;GAEG;AACH,KAAK,CAAC,UAAU,GAAG,CAAC,OAAe,EAAQ,EAAE;IAC3C,OAAO,IAAI,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE,GAAG,OAAO,GAAG,IAAI,CAAC,CAAC;AAC/C,CAAC,CAAC;AAEF;;GAEG;AACH,KAAK,CAAC,KAAK,GAAG,CAAC,CAAQ,EAAS,EAAE;IAC/B,CAAS,CAAC,KAAK,GAAG,IAAI,CAAC;IACxB,OAAO,CAAC,CAAC;AACX,CAAC,CAAC;AAEK,KAAK,UAAU,KAAK,CAAC,EAAU;IACpC,OAAO,IAAI,OAAO,CAAC,EAAE,CAAC,EAAE,CAAC,UAAU,CAAC,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC;AAC/C,CAAC;AAFD,sBAEC;AAEM,KAAK,UAAU,WAAW,CAAC,UAAkB;IAClD,MAAM,OAAO,GAAG,MAAM,UAAE,CAAC,aAAa,EAAE,EAAE,MAAM,EAAE,UAAU,EAAE,CAAC,CAAC;IAChE,MAAM,OAAO,GAAG,CAAC,OAAO,CAAC,QAAQ,IAAI,EAAE,CAAC,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,CAAC,GAAG,IAAI,EAAE,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;IACpF,IAAI,OAAO,CAAC,MAAM,KAAK,CAAC,EAAE;QACxB,OAAO,OAAO,CAAC,OAAO,EAAE,CAAC;KAC1B;IACD,OAAO,UAAE,CAAC,eAAe,EAAE;QACzB,MAAM,EAAE,UAAU;QAClB,MAAM,EAAE;YACN,OAAO,EAAE,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,GAAG,EAAE,CAAC,EAAE,CAAC,CAAC;YACvC,KAAK,EAAE,KAAK;SACb;KACF,CAAC,CAAC;AACL,CAAC;AAbD,kCAaC;AAEM,KAAK,UAAU,qBAAqB,CAAC,cAAsB;IAChE,MAAM,WAAG,CAAC,kBAAkB,EAAE,EAAE,cAAc,EAAE,KAAK,EAAE,IAAI,EAAE,CAAC,CAAC;AACjE,CAAC;AAFD,sDAEC;AAEM,KAAK,UAAU,YAAY,CAAC,UAAkB;IACnD,IAAI;QACF,MAAM,WAAW,CAAC,UAAU,CAAC,CAAC;QAC9B,MAAM,UAAE,CAAC,cAAc,EAAE;YACvB,MAAM,EAAE,UAAU;SACnB,CAAC,CAAC;KACJ;IAAC,OAAO,CAAC,EAAE;QACV,IAAI,oBAAoB,CAAC,CAAC,CAAC,EAAE;YAAE,OAAO;SAAE;QACxC,MAAM,CAAC,CAAC;KACT;AACH,CAAC;AAVD,oCAUC;AAED,SAAgB,eAAe,CAAC,GAAW,EAAE,KAA+B;;IAC1E,aAAO,OAAC,KAAK,CAAC,OAAO,mCAAI,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,SAAS,KAAK,GAAG,CAAC,0CAAE,WAAW,CAAC;AAC3E,CAAC;AAFD,0CAEC","sourcesContent":["import * as AWS from 'aws-sdk';\nimport { log } from './cdk-helpers';\n\ninterface Env {\n  account: string;\n  region: string;\n}\n\nexport let testEnv = async (): Promise<Env> => {\n  const response = await new AWS.STS().getCallerIdentity().promise();\n\n  const ret: Env = {\n    account: response.Account!,\n    region: process.env.AWS_REGION ?? process.env.AWS_DEFAULT_REGION ?? 'us-east-1',\n  };\n\n  testEnv = () => Promise.resolve(ret);\n  return ret;\n};\n\nexport const cloudFormation = makeAwsCaller(AWS.CloudFormation);\nexport const s3 = makeAwsCaller(AWS.S3);\nexport const ecr = makeAwsCaller(AWS.ECR);\nexport const sns = makeAwsCaller(AWS.SNS);\nexport const iam = makeAwsCaller(AWS.IAM);\nexport const lambda = makeAwsCaller(AWS.Lambda);\nexport const sts = makeAwsCaller(AWS.STS);\n\n/**\n * Perform an AWS call from nothing\n *\n * Create the correct client, do the call and resole the promise().\n */\nasync function awsCall<\n  A extends AWS.Service,\n  B extends keyof ServiceCalls<A>,\n>(ctor: new (config: any) => A, call: B, request: First<ServiceCalls<A>[B]>): Promise<Second<ServiceCalls<A>[B]>> {\n  const env = await testEnv();\n\n  const profileName = process.env.AWS_PROFILE;\n  let creds = undefined;\n  if (process.env.CODEBUILD_BUILD_ARN && profileName) {\n\n    // in codebuild we must assume the role that the cdk uses\n    // otherwise credentials will just be picked up by the normal sdk\n    // heuristics and expire after an hour.\n\n    // can't use '~' since the SDK doesn't seem to expand it...?\n    const configPath = `${process.env.HOME}/.aws/config`;\n    const ini = new AWS.IniLoader().loadFrom({\n      filename: configPath,\n      isConfig: true,\n    });\n\n    const profile = ini[profileName];\n\n    if (!profile) {\n      throw new Error(`Profile '${profileName}' does not exist in config file (${configPath})`);\n    }\n\n    const arn = profile.role_arn;\n    const externalId = profile.external_id;\n\n    if (!arn) {\n      throw new Error(`role_arn does not exist in profile ${profileName}`);\n    }\n\n    if (!externalId) {\n      throw new Error(`external_id does not exist in profile ${externalId}`);\n    }\n\n    creds = new AWS.ChainableTemporaryCredentials({\n      params: {\n        RoleArn: arn,\n        ExternalId: externalId,\n        RoleSessionName: 'integ-tests',\n      },\n      stsConfig: {\n        region: env.region,\n      },\n      masterCredentials: new AWS.ECSCredentials(),\n    });\n\n  }\n\n  const cfn = new ctor({\n    region: env.region,\n    credentials: creds,\n    maxRetries: 6,\n    retryDelayOptions: {\n      base: 500,\n    },\n  });\n\n  const response = cfn[call](request);\n  try {\n    return await response.promise();\n  } catch (e) {\n    const newErr = new Error(`${call}(${JSON.stringify(request)}): ${e.message}`);\n    (newErr as any).code = e.code;\n    throw newErr;\n  }\n}\n\n/**\n * Factory function to invoke 'awsCall' for specific services.\n *\n * Not strictly necessary but calling this replaces a whole bunch of annoying generics you otherwise have to type:\n *\n * ```ts\n * export function cloudFormation<\n *   C extends keyof ServiceCalls<AWS.CloudFormation>,\n * >(call: C, request: First<ServiceCalls<AWS.CloudFormation>[C]>): Promise<Second<ServiceCalls<AWS.CloudFormation>[C]>> {\n *   return awsCall(AWS.CloudFormation, call, request);\n * }\n * ```\n */\nfunction makeAwsCaller<A extends AWS.Service>(ctor: new (config: any) => A) {\n  return <B extends keyof ServiceCalls<A>>(call: B, request: First<ServiceCalls<A>[B]>): Promise<Second<ServiceCalls<A>[B]>> => {\n    return awsCall(ctor, call, request);\n  };\n}\n\ntype ServiceCalls<T> = NoNayNever<SimplifiedService<T>>;\n// Map ever member in the type to the important AWS call overload, or to 'never'\ntype SimplifiedService<T> = {[k in keyof T]: AwsCallIO<T[k]>};\n// Remove all 'never' types from an object type\ntype NoNayNever<T> = Pick<T, {[k in keyof T]: T[k] extends never ? never : k }[keyof T]>;\n\n// Because of the overloads an AWS handler type looks like this:\n//\n//   {\n//      (params: INPUTSTRUCT, callback?: ((err: AWSError, data: {}) => void) | undefined): Request<OUTPUT, ...>;\n//      (callback?: ((err: AWS.AWSError, data: {}) => void) | undefined): AWS.Request<...>;\n//   }\n//\n// Get the first overload and extract the input and output struct types\ntype AwsCallIO<T> =\n  T extends {\n    (args: infer INPUT, callback?: ((err: AWS.AWSError, data: any) => void) | undefined): AWS.Request<infer OUTPUT, AWS.AWSError>;\n    (callback?: ((err: AWS.AWSError, data: {}) => void) | undefined): AWS.Request<any, any>;\n  } ? [INPUT, OUTPUT] : never;\n\ntype First<T> = T extends [any, any] ? T[0] : never;\ntype Second<T> = T extends [any, any] ? T[1] : never;\n\nexport async function deleteStacks(...stackNames: string[]) {\n  if (stackNames.length === 0) { return; }\n\n  for (const stackName of stackNames) {\n    await cloudFormation('updateTerminationProtection', {\n      EnableTerminationProtection: false,\n      StackName: stackName,\n    });\n    await cloudFormation('deleteStack', {\n      StackName: stackName,\n    });\n  }\n\n  await retry(`Deleting ${stackNames}`, retry.forSeconds(600), async () => {\n    for (const stackName of stackNames) {\n      const status = await stackStatus(stackName);\n      if (status !== undefined && status.endsWith('_FAILED')) {\n        throw retry.abort(new Error(`'${stackName}' is in state '${status}'`));\n      }\n      if (status !== undefined) {\n        throw new Error(`Delete of '${stackName}' not complete yet`);\n      }\n    }\n  });\n}\n\nexport async function stackStatus(stackName: string): Promise<string | undefined> {\n  try {\n    return (await cloudFormation('describeStacks', { StackName: stackName })).Stacks?.[0].StackStatus;\n  } catch (e) {\n    if (isStackMissingError(e)) { return undefined; }\n    throw e;\n  }\n}\n\nexport function isStackMissingError(e: Error) {\n  return e.message.indexOf('does not exist') > -1;\n}\n\nexport function isBucketMissingError(e: Error) {\n  return e.message.indexOf('does not exist') > -1;\n}\n\n/**\n * Retry an async operation until a deadline is hit.\n *\n * Use `retry.forSeconds()` to construct a deadline relative to right now.\n *\n * Exceptions will cause the operation to retry. Use `retry.abort` to annotate an exception\n * to stop the retry and end in a failure.\n */\nexport async function retry<A>(operation: string, deadline: Date, block: () => Promise<A>): Promise<A> {\n  let i = 0;\n  log(`💈 ${operation}`);\n  while (true) {\n    try {\n      i++;\n      const ret = await block();\n      log(`💈 ${operation}: succeeded after ${i} attempts`);\n      return ret;\n    } catch (e) {\n      if (e.abort || Date.now() > deadline.getTime( )) {\n        throw new Error(`${operation}: did not succeed after ${i} attempts: ${e}`);\n      }\n      log(`⏳ ${operation} (${e.message})`);\n      await sleep(5000);\n    }\n  }\n}\n\n/**\n * Make a deadline for the `retry` function relative to the current time.\n */\nretry.forSeconds = (seconds: number): Date => {\n  return new Date(Date.now() + seconds * 1000);\n};\n\n/**\n * Annotate an error to stop the retrying\n */\nretry.abort = (e: Error): Error => {\n  (e as any).abort = true;\n  return e;\n};\n\nexport async function sleep(ms: number) {\n  return new Promise(ok => setTimeout(ok, ms));\n}\n\nexport async function emptyBucket(bucketName: string) {\n  const objects = await s3('listObjects', { Bucket: bucketName });\n  const deletes = (objects.Contents || []).map(obj => obj.Key || '').filter(d => !!d);\n  if (deletes.length === 0) {\n    return Promise.resolve();\n  }\n  return s3('deleteObjects', {\n    Bucket: bucketName,\n    Delete: {\n      Objects: deletes.map(d => ({ Key: d })),\n      Quiet: false,\n    },\n  });\n}\n\nexport async function deleteImageRepository(repositoryName: string) {\n  await ecr('deleteRepository', { repositoryName, force: true });\n}\n\nexport async function deleteBucket(bucketName: string) {\n  try {\n    await emptyBucket(bucketName);\n    await s3('deleteBucket', {\n      Bucket: bucketName,\n    });\n  } catch (e) {\n    if (isBucketMissingError(e)) { return; }\n    throw e;\n  }\n}\n\nexport function outputFromStack(key: string, stack: AWS.CloudFormation.Stack): string | undefined {\n  return (stack.Outputs ?? []).find(o => o.OutputKey === key)?.OutputValue;\n}\n"]} \ No newline at end of file diff --git a/packages/aws-cdk/test/integ/cli/app/app.js b/packages/aws-cdk/test/integ/cli/app/app.js index ab5efffbded80..175b658949fc4 100644 --- a/packages/aws-cdk/test/integ/cli/app/app.js +++ b/packages/aws-cdk/test/integ/cli/app/app.js @@ -239,6 +239,14 @@ class ConditionalResourceStack extends cdk.Stack { } } +class SomeStage extends cdk.Stage { + constructor(parent, id, props) { + super(parent, id, props); + + new YourStack(this, 'StackInStage'); + } +} + const app = new cdk.App(); const defaultEnv = { @@ -286,4 +294,6 @@ new YourStack(app, `${stackPrefix}-termination-protection`, { terminationProtection: process.env.TERMINATION_PROTECTION !== 'FALSE' ? true : false, }); +new SomeStage(app, `${stackPrefix}-stage`); + app.synth(); diff --git a/packages/aws-cdk/test/integ/cli/aws-helpers.ts b/packages/aws-cdk/test/integ/cli/aws-helpers.ts index 05fbc6607983c..a8058ba6595ef 100644 --- a/packages/aws-cdk/test/integ/cli/aws-helpers.ts +++ b/packages/aws-cdk/test/integ/cli/aws-helpers.ts @@ -1,30 +1,113 @@ import * as AWS from 'aws-sdk'; -import { log } from './cdk-helpers'; -interface Env { - account: string; - region: string; -} +export class AwsClients { + public static async default(output: NodeJS.WritableStream) { + const region = process.env.AWS_REGION ?? process.env.AWS_DEFAULT_REGION ?? 'us-east-1'; + return AwsClients.forRegion(region, output); + } -export let testEnv = async (): Promise => { - const response = await new AWS.STS().getCallerIdentity().promise(); + public static async forRegion(region: string, output: NodeJS.WritableStream) { + return new AwsClients(region, output); + } - const ret: Env = { - account: response.Account!, - region: process.env.AWS_REGION ?? process.env.AWS_DEFAULT_REGION ?? 'us-east-1', - }; + private readonly config: any; - testEnv = () => Promise.resolve(ret); - return ret; -}; + public readonly cloudFormation: AwsCaller; + public readonly s3: AwsCaller; + public readonly ecr: AwsCaller; + public readonly sns: AwsCaller; + public readonly iam: AwsCaller; + public readonly lambda: AwsCaller; + public readonly sts: AwsCaller; -export const cloudFormation = makeAwsCaller(AWS.CloudFormation); -export const s3 = makeAwsCaller(AWS.S3); -export const ecr = makeAwsCaller(AWS.ECR); -export const sns = makeAwsCaller(AWS.SNS); -export const iam = makeAwsCaller(AWS.IAM); -export const lambda = makeAwsCaller(AWS.Lambda); -export const sts = makeAwsCaller(AWS.STS); + constructor(public readonly region: string, private readonly output: NodeJS.WritableStream) { + this.config = { + credentials: chainableCredentials(this.region), + region: this.region, + maxRetries: 8, + retryDelayOptions: { base: 500 }, + stsRegionalEndpoints: 'regional', + }; + this.cloudFormation = makeAwsCaller(AWS.CloudFormation, this.config); + this.s3 = makeAwsCaller(AWS.S3, this.config); + this.ecr = makeAwsCaller(AWS.ECR, this.config); + this.sns = makeAwsCaller(AWS.SNS, this.config); + this.iam = makeAwsCaller(AWS.IAM, this.config); + this.lambda = makeAwsCaller(AWS.Lambda, this.config); + this.sts = makeAwsCaller(AWS.STS, this.config); + } + + public async account(): Promise { + // Reduce # of retries, we use this as a circuit breaker for detecting no-config + return (await new AWS.STS({ ...this.config, maxRetries: 1 }).getCallerIdentity().promise()).Account!; + } + + public async deleteStacks(...stackNames: string[]) { + if (stackNames.length === 0) { return; } + + for (const stackName of stackNames) { + await this.cloudFormation('updateTerminationProtection', { + EnableTerminationProtection: false, + StackName: stackName, + }); + await this.cloudFormation('deleteStack', { + StackName: stackName, + }); + } + + await retry(this.output, `Deleting ${stackNames}`, retry.forSeconds(600), async () => { + for (const stackName of stackNames) { + const status = await this.stackStatus(stackName); + if (status !== undefined && status.endsWith('_FAILED')) { + throw retry.abort(new Error(`'${stackName}' is in state '${status}'`)); + } + if (status !== undefined) { + throw new Error(`Delete of '${stackName}' not complete yet`); + } + } + }); + } + + public async stackStatus(stackName: string): Promise { + try { + return (await this.cloudFormation('describeStacks', { StackName: stackName })).Stacks?.[0].StackStatus; + } catch (e) { + if (isStackMissingError(e)) { return undefined; } + throw e; + } + } + + public async emptyBucket(bucketName: string) { + const objects = await this.s3('listObjects', { Bucket: bucketName }); + const deletes = (objects.Contents || []).map(obj => obj.Key || '').filter(d => !!d); + if (deletes.length === 0) { + return Promise.resolve(); + } + return this.s3('deleteObjects', { + Bucket: bucketName, + Delete: { + Objects: deletes.map(d => ({ Key: d })), + Quiet: false, + }, + }); + } + + public async deleteImageRepository(repositoryName: string) { + await this.ecr('deleteRepository', { repositoryName, force: true }); + } + + public async deleteBucket(bucketName: string) { + try { + await this.emptyBucket(bucketName); + await this.s3('deleteBucket', { + Bucket: bucketName, + }); + } catch (e) { + if (isBucketMissingError(e)) { return; } + throw e; + } + } +} /** * Perform an AWS call from nothing @@ -34,9 +117,8 @@ export const sts = makeAwsCaller(AWS.STS); async function awsCall< A extends AWS.Service, B extends keyof ServiceCalls, ->(ctor: new (config: any) => A, call: B, request: First[B]>): Promise[B]>> { - const env = await testEnv(); - const cfn = new ctor({ region: env.region, maxRetries: 6, retryDelayOptions: { base: 500 } }); +>(ctor: new (config: any) => A, config: any, call: B, request: First[B]>): Promise[B]>> { + const cfn = new ctor(config); const response = cfn[call](request); try { return await response.promise(); @@ -47,6 +129,8 @@ async function awsCall< } } +type AwsCaller = >(call: B, request: First[B]>) => Promise[B]>>; + /** * Factory function to invoke 'awsCall' for specific services. * @@ -60,9 +144,9 @@ async function awsCall< * } * ``` */ -function makeAwsCaller(ctor: new (config: any) => A) { +function makeAwsCaller(ctor: new (config: any) => A, config: any): AwsCaller { return >(call: B, request: First[B]>): Promise[B]>> => { - return awsCall(ctor, call, request); + return awsCall(ctor, config, call, request); }; } @@ -89,40 +173,6 @@ type AwsCallIO = type First = T extends [any, any] ? T[0] : never; type Second = T extends [any, any] ? T[1] : never; -export async function deleteStacks(...stackNames: string[]) { - if (stackNames.length === 0) { return; } - - for (const stackName of stackNames) { - await cloudFormation('updateTerminationProtection', { - EnableTerminationProtection: false, - StackName: stackName, - }); - await cloudFormation('deleteStack', { - StackName: stackName, - }); - } - - await retry(`Deleting ${stackNames}`, retry.forSeconds(600), async () => { - for (const stackName of stackNames) { - const status = await stackStatus(stackName); - if (status !== undefined && status.endsWith('_FAILED')) { - throw retry.abort(new Error(`'${stackName}' is in state '${status}'`)); - } - if (status !== undefined) { - throw new Error(`Delete of '${stackName}' not complete yet`); - } - } - }); -} - -export async function stackStatus(stackName: string): Promise { - try { - return (await cloudFormation('describeStacks', { StackName: stackName })).Stacks?.[0].StackStatus; - } catch (e) { - if (isStackMissingError(e)) { return undefined; } - throw e; - } -} export function isStackMissingError(e: Error) { return e.message.indexOf('does not exist') > -1; @@ -140,20 +190,20 @@ export function isBucketMissingError(e: Error) { * Exceptions will cause the operation to retry. Use `retry.abort` to annotate an exception * to stop the retry and end in a failure. */ -export async function retry(operation: string, deadline: Date, block: () => Promise): Promise { +export async function retry(output: NodeJS.WritableStream, operation: string, deadline: Date, block: () => Promise): Promise { let i = 0; - log(`💈 ${operation}`); + output.write(`💈 ${operation}\n`); while (true) { try { i++; const ret = await block(); - log(`💈 ${operation}: succeeded after ${i} attempts`); + output.write(`💈 ${operation}: succeeded after ${i} attempts\n`); return ret; } catch (e) { if (e.abort || Date.now() > deadline.getTime( )) { throw new Error(`${operation}: did not succeed after ${i} attempts: ${e}`); } - log(`⏳ ${operation} (${e.message})`); + output.write(`⏳ ${operation} (${e.message})\n`); await sleep(5000); } } @@ -178,37 +228,56 @@ export async function sleep(ms: number) { return new Promise(ok => setTimeout(ok, ms)); } -export async function emptyBucket(bucketName: string) { - const objects = await s3('listObjects', { Bucket: bucketName }); - const deletes = (objects.Contents || []).map(obj => obj.Key || '').filter(d => !!d); - if (deletes.length === 0) { - return Promise.resolve(); - } - return s3('deleteObjects', { - Bucket: bucketName, - Delete: { - Objects: deletes.map(d => ({ Key: d })), - Quiet: false, - }, - }); +export function outputFromStack(key: string, stack: AWS.CloudFormation.Stack): string | undefined { + return (stack.Outputs ?? []).find(o => o.OutputKey === key)?.OutputValue; } -export async function deleteImageRepository(repositoryName: string) { - await ecr('deleteRepository', { repositoryName, force: true }); -} +function chainableCredentials(region: string): AWS.Credentials | undefined { -export async function deleteBucket(bucketName: string) { - try { - await emptyBucket(bucketName); - await s3('deleteBucket', { - Bucket: bucketName, + const profileName = process.env.AWS_PROFILE; + if (process.env.CODEBUILD_BUILD_ARN && profileName) { + + // in codebuild we must assume the role that the cdk uses + // otherwise credentials will just be picked up by the normal sdk + // heuristics and expire after an hour. + + // can't use '~' since the SDK doesn't seem to expand it...? + const configPath = `${process.env.HOME}/.aws/config`; + const ini = new AWS.IniLoader().loadFrom({ + filename: configPath, + isConfig: true, + }); + + const profile = ini[profileName]; + + if (!profile) { + throw new Error(`Profile '${profileName}' does not exist in config file (${configPath})`); + } + + const arn = profile.role_arn; + const externalId = profile.external_id; + + if (!arn) { + throw new Error(`role_arn does not exist in profile ${profileName}`); + } + + if (!externalId) { + throw new Error(`external_id does not exist in profile ${externalId}`); + } + + return new AWS.ChainableTemporaryCredentials({ + params: { + RoleArn: arn, + ExternalId: externalId, + RoleSessionName: 'integ-tests', + }, + stsConfig: { + region, + }, + masterCredentials: new AWS.ECSCredentials(), }); - } catch (e) { - if (isBucketMissingError(e)) { return; } - throw e; } -} -export function outputFromStack(key: string, stack: AWS.CloudFormation.Stack): string | undefined { - return (stack.Outputs ?? []).find(o => o.OutputKey === key)?.OutputValue; + return undefined; + } diff --git a/packages/aws-cdk/test/integ/cli/bootstrapping.integtest.ts b/packages/aws-cdk/test/integ/cli/bootstrapping.integtest.ts index a250cd5551526..5c33732d630d3 100644 --- a/packages/aws-cdk/test/integ/cli/bootstrapping.integtest.ts +++ b/packages/aws-cdk/test/integ/cli/bootstrapping.integtest.ts @@ -1,62 +1,47 @@ import * as fs from 'fs'; import * as path from 'path'; -import { cloudFormation } from './aws-helpers'; -import { cdk, cdkDeploy, cleanup, fullStackName, prepareAppFixture, rememberToDeleteBucket, INTEG_TEST_DIR } from './cdk-helpers'; +import { randomString, withDefaultFixture } from './cdk-helpers'; import { integTest } from './test-helpers'; jest.setTimeout(600_000); -const QUALIFIER = randomString().substr(0, 10); +integTest('can bootstrap without execution', withDefaultFixture(async (fixture) => { + const bootstrapStackName = fixture.fullStackName('bootstrap-stack'); -beforeAll(async () => { - await prepareAppFixture(); -}); - -beforeEach(async () => { - await cleanup(); -}); - -afterEach(async () => { - await cleanup(); -}); - -integTest('can bootstrap without execution', async () => { - const bootstrapStackName = fullStackName('bootstrap-stack'); - - await cdk(['bootstrap', + await fixture.cdk(['bootstrap', '--toolkit-stack-name', bootstrapStackName, '--no-execute']); - const resp = await cloudFormation('describeStacks', { + const resp = await fixture.aws.cloudFormation('describeStacks', { StackName: bootstrapStackName, }); expect(resp.Stacks?.[0].StackStatus).toEqual('REVIEW_IN_PROGRESS'); -}); +})); -integTest('upgrade legacy bootstrap stack to new bootstrap stack while in use', async () => { - const bootstrapStackName = fullStackName('bootstrap-stack'); +integTest('upgrade legacy bootstrap stack to new bootstrap stack while in use', withDefaultFixture(async (fixture) => { + const bootstrapStackName = fixture.fullStackName('bootstrap-stack'); const legacyBootstrapBucketName = `aws-cdk-bootstrap-integ-test-legacy-bckt-${randomString()}`; const newBootstrapBucketName = `aws-cdk-bootstrap-integ-test-v2-bckt-${randomString()}`; - rememberToDeleteBucket(legacyBootstrapBucketName); // This one will leak - rememberToDeleteBucket(newBootstrapBucketName); // This one shouldn't leak if the test succeeds, but let's be safe in case it doesn't + fixture.rememberToDeleteBucket(legacyBootstrapBucketName); // This one will leak + fixture.rememberToDeleteBucket(newBootstrapBucketName); // This one shouldn't leak if the test succeeds, but let's be safe in case it doesn't // Legacy bootstrap - await cdk(['bootstrap', + await fixture.cdk(['bootstrap', '--toolkit-stack-name', bootstrapStackName, '--bootstrap-bucket-name', legacyBootstrapBucketName]); // Deploy stack that uses file assets - await cdkDeploy('lambda', { + await fixture.cdkDeploy('lambda', { options: ['--toolkit-stack-name', bootstrapStackName], }); // Upgrade bootstrap stack to "new" style - await cdk(['bootstrap', + await fixture.cdk(['bootstrap', '--toolkit-stack-name', bootstrapStackName, '--bootstrap-bucket-name', newBootstrapBucketName, - '--qualifier', QUALIFIER], { + '--qualifier', fixture.qualifier], { modEnv: { CDK_NEW_BOOTSTRAP: '1', }, @@ -64,20 +49,20 @@ integTest('upgrade legacy bootstrap stack to new bootstrap stack while in use', // (Force) deploy stack again // --force to bypass the check which says that the template hasn't changed. - await cdkDeploy('lambda', { + await fixture.cdkDeploy('lambda', { options: [ '--toolkit-stack-name', bootstrapStackName, '--force', ], }); -}); +})); -integTest('deploy new style synthesis to new style bootstrap', async () => { - const bootstrapStackName = fullStackName('bootstrap-stack'); +integTest('deploy new style synthesis to new style bootstrap', withDefaultFixture(async (fixture) => { + const bootstrapStackName = fixture.fullStackName('bootstrap-stack'); - await cdk(['bootstrap', + await fixture.cdk(['bootstrap', '--toolkit-stack-name', bootstrapStackName, - '--qualifier', QUALIFIER, + '--qualifier', fixture.qualifier, '--cloudformation-execution-policies', 'arn:aws:iam::aws:policy/AdministratorAccess'], { modEnv: { CDK_NEW_BOOTSTRAP: '1', @@ -85,21 +70,21 @@ integTest('deploy new style synthesis to new style bootstrap', async () => { }); // Deploy stack that uses file assets - await cdkDeploy('lambda', { + await fixture.cdkDeploy('lambda', { options: [ '--toolkit-stack-name', bootstrapStackName, - '--context', `@aws-cdk/core:bootstrapQualifier=${QUALIFIER}`, + '--context', `@aws-cdk/core:bootstrapQualifier=${fixture.qualifier}`, '--context', '@aws-cdk/core:newStyleStackSynthesis=1', ], }); -}); +})); -integTest('deploy new style synthesis to new style bootstrap (with docker image)', async () => { - const bootstrapStackName = fullStackName('bootstrap-stack'); +integTest('deploy new style synthesis to new style bootstrap (with docker image)', withDefaultFixture(async (fixture) => { + const bootstrapStackName = fixture.fullStackName('bootstrap-stack'); - await cdk(['bootstrap', + await fixture.cdk(['bootstrap', '--toolkit-stack-name', bootstrapStackName, - '--qualifier', QUALIFIER, + '--qualifier', fixture.qualifier, '--cloudformation-execution-policies', 'arn:aws:iam::aws:policy/AdministratorAccess'], { modEnv: { CDK_NEW_BOOTSTRAP: '1', @@ -107,21 +92,21 @@ integTest('deploy new style synthesis to new style bootstrap (with docker image) }); // Deploy stack that uses file assets - await cdkDeploy('docker', { + await fixture.cdkDeploy('docker', { options: [ '--toolkit-stack-name', bootstrapStackName, - '--context', `@aws-cdk/core:bootstrapQualifier=${QUALIFIER}`, + '--context', `@aws-cdk/core:bootstrapQualifier=${fixture.qualifier}`, '--context', '@aws-cdk/core:newStyleStackSynthesis=1', ], }); -}); +})); -integTest('deploy old style synthesis to new style bootstrap', async () => { - const bootstrapStackName = fullStackName('bootstrap-stack'); +integTest('deploy old style synthesis to new style bootstrap', withDefaultFixture(async (fixture) => { + const bootstrapStackName = fixture.fullStackName('bootstrap-stack'); - await cdk(['bootstrap', + await fixture.cdk(['bootstrap', '--toolkit-stack-name', bootstrapStackName, - '--qualifier', QUALIFIER, + '--qualifier', fixture.qualifier, '--cloudformation-execution-policies', 'arn:aws:iam::aws:policy/AdministratorAccess'], { modEnv: { CDK_NEW_BOOTSTRAP: '1', @@ -129,56 +114,56 @@ integTest('deploy old style synthesis to new style bootstrap', async () => { }); // Deploy stack that uses file assets - await cdkDeploy('lambda', { + await fixture.cdkDeploy('lambda', { options: [ '--toolkit-stack-name', bootstrapStackName, ], }); -}); +})); -integTest('deploying new style synthesis to old style bootstrap fails', async () => { - const bootstrapStackName = fullStackName('bootstrap-stack'); +integTest('deploying new style synthesis to old style bootstrap fails', withDefaultFixture(async (fixture) => { + const bootstrapStackName = fixture.fullStackName('bootstrap-stack'); - await cdk(['bootstrap', '--toolkit-stack-name', bootstrapStackName]); + await fixture.cdk(['bootstrap', '--toolkit-stack-name', bootstrapStackName]); // Deploy stack that uses file assets, this fails because the bootstrap stack // is version checked. - await expect(cdkDeploy('lambda', { + await expect(fixture.cdkDeploy('lambda', { options: [ '--toolkit-stack-name', bootstrapStackName, '--context', '@aws-cdk/core:newStyleStackSynthesis=1', ], })).rejects.toThrow('exited with error'); -}); +})); -integTest('can create a legacy bootstrap stack with --public-access-block-configuration=false', async () => { - const bootstrapStackName = fullStackName('bootstrap-stack-1'); +integTest('can create a legacy bootstrap stack with --public-access-block-configuration=false', withDefaultFixture(async (fixture) => { + const bootstrapStackName = fixture.fullStackName('bootstrap-stack-1'); - await cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName, '--public-access-block-configuration', 'false', '--tags', 'Foo=Bar']); + await fixture.cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName, '--public-access-block-configuration', 'false', '--tags', 'Foo=Bar']); - const response = await cloudFormation('describeStacks', { StackName: bootstrapStackName }); + const response = await fixture.aws.cloudFormation('describeStacks', { StackName: bootstrapStackName }); expect(response.Stacks?.[0].Tags).toEqual([ { Key: 'Foo', Value: 'Bar' }, ]); -}); +})); -integTest('can create multiple legacy bootstrap stacks', async () => { - const bootstrapStackName1 = fullStackName('bootstrap-stack-1'); - const bootstrapStackName2 = fullStackName('bootstrap-stack-2'); +integTest('can create multiple legacy bootstrap stacks', withDefaultFixture(async (fixture) => { + const bootstrapStackName1 = fixture.fullStackName('bootstrap-stack-1'); + const bootstrapStackName2 = fixture.fullStackName('bootstrap-stack-2'); // deploy two toolkit stacks into the same environment (see #1416) // one with tags - await cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName1, '--tags', 'Foo=Bar']); - await cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName2]); + await fixture.cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName1, '--tags', 'Foo=Bar']); + await fixture.cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName2]); - const response = await cloudFormation('describeStacks', { StackName: bootstrapStackName1 }); + const response = await fixture.aws.cloudFormation('describeStacks', { StackName: bootstrapStackName1 }); expect(response.Stacks?.[0].Tags).toEqual([ { Key: 'Foo', Value: 'Bar' }, ]); -}); +})); -integTest('can dump the template, modify and use it to deploy a custom bootstrap stack', async () => { - let template = await cdk(['bootstrap', '--show-template'], { +integTest('can dump the template, modify and use it to deploy a custom bootstrap stack', withDefaultFixture(async (fixture) => { + let template = await fixture.cdk(['bootstrap', '--show-template'], { captureStderr: false, modEnv: { CDK_NEW_BOOTSTRAP: '1', @@ -192,46 +177,41 @@ integTest('can dump the template, modify and use it to deploy a custom bootstrap ' Value: Template got twiddled', ].join('\n'); - const filename = path.join(INTEG_TEST_DIR, `${QUALIFIER}-template.yaml`); + const filename = path.join(fixture.integTestDir, `${fixture.qualifier}-template.yaml`); fs.writeFileSync(filename, template, { encoding: 'utf-8' }); - await cdk(['bootstrap', - '--toolkit-stack-name', fullStackName('bootstrap-stack'), - '--qualifier', QUALIFIER, + await fixture.cdk(['bootstrap', + '--toolkit-stack-name', fixture.fullStackName('bootstrap-stack'), + '--qualifier', fixture.qualifier, '--template', filename, '--cloudformation-execution-policies', 'arn:aws:iam::aws:policy/AdministratorAccess'], { modEnv: { CDK_NEW_BOOTSTRAP: '1', }, }); -}); +})); -integTest('switch on termination protection, switch is left alone on re-bootstrap', async () => { - const bootstrapStackName = fullStackName('bootstrap-stack'); +integTest('switch on termination protection, switch is left alone on re-bootstrap', withDefaultFixture(async (fixture) => { + const bootstrapStackName = fixture.fullStackName('bootstrap-stack'); - await cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName, + await fixture.cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName, '--termination-protection', 'true', - '--qualifier', QUALIFIER], { modEnv: { CDK_NEW_BOOTSTRAP: '1' } }); - await cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName, '--force'], { modEnv: { CDK_NEW_BOOTSTRAP: '1' } }); + '--qualifier', fixture.qualifier], { modEnv: { CDK_NEW_BOOTSTRAP: '1' } }); + await fixture.cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName, '--force'], { modEnv: { CDK_NEW_BOOTSTRAP: '1' } }); - const response = await cloudFormation('describeStacks', { StackName: bootstrapStackName }); + const response = await fixture.aws.cloudFormation('describeStacks', { StackName: bootstrapStackName }); expect(response.Stacks?.[0].EnableTerminationProtection).toEqual(true); -}); +})); -integTest('add tags, left alone on re-bootstrap', async () => { - const bootstrapStackName = fullStackName('bootstrap-stack'); +integTest('add tags, left alone on re-bootstrap', withDefaultFixture(async (fixture) => { + const bootstrapStackName = fixture.fullStackName('bootstrap-stack'); - await cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName, + await fixture.cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName, '--tags', 'Foo=Bar', - '--qualifier', QUALIFIER], { modEnv: { CDK_NEW_BOOTSTRAP: '1' } }); - await cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName, '--force'], { modEnv: { CDK_NEW_BOOTSTRAP: '1' } }); + '--qualifier', fixture.qualifier], { modEnv: { CDK_NEW_BOOTSTRAP: '1' } }); + await fixture.cdk(['bootstrap', '-v', '--toolkit-stack-name', bootstrapStackName, '--force'], { modEnv: { CDK_NEW_BOOTSTRAP: '1' } }); - const response = await cloudFormation('describeStacks', { StackName: bootstrapStackName }); + const response = await fixture.aws.cloudFormation('describeStacks', { StackName: bootstrapStackName }); expect(response.Stacks?.[0].Tags).toEqual([ { Key: 'Foo', Value: 'Bar' }, ]); -}); - -function randomString() { - // Crazy - return Math.random().toString(36).replace(/[^a-z0-9]+/g, ''); -} +})); \ No newline at end of file diff --git a/packages/aws-cdk/test/integ/cli/cdk-helpers.ts b/packages/aws-cdk/test/integ/cli/cdk-helpers.ts index b337f3916b9c2..8604135d895ef 100644 --- a/packages/aws-cdk/test/integ/cli/cdk-helpers.ts +++ b/packages/aws-cdk/test/integ/cli/cdk-helpers.ts @@ -1,19 +1,98 @@ import * as child_process from 'child_process'; +import * as fs from 'fs'; import * as os from 'os'; import * as path from 'path'; -import { cloudFormation, deleteBucket, deleteImageRepository, deleteStacks, emptyBucket, outputFromStack, testEnv } from './aws-helpers'; -import { writeOutput } from './corking'; +import { outputFromStack, AwsClients } from './aws-helpers'; +import { ResourcePool } from './resource-pool'; +import { TestContext } from './test-helpers'; -export const INTEG_TEST_DIR = path.join(os.tmpdir(), 'cdk-integ-test2'); +const REGIONS = process.env.AWS_REGIONS + ? process.env.AWS_REGIONS.split(',') + : [process.env.AWS_REGION ?? process.env.AWS_DEFAULT_REGION ?? 'us-east-1']; -// create a unique stack name prefix for this test test run. this is passed -// through an environment variable to app.js so that all stacks use this prefix. -const timestamp = new Date().toISOString().replace(/[^0-9]/g, ''); -export const STACK_NAME_PREFIX = `cdktest-${timestamp}`; +process.stdout.write(`Using regions: ${REGIONS}\n`); -process.stdout.write('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'); -process.stdout.write(` All stacks created by this test run will have the prefix: ${STACK_NAME_PREFIX}\n`); -process.stdout.write('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'); +const REGION_POOL = new ResourcePool(REGIONS); + + +export type AwsContext = { readonly aws: AwsClients }; + +/** + * Higher order function to execute a block with an AWS client setup + * + * Allocate the next region from the REGION pool and dispose it afterwards. + */ +export function withAws(block: (context: A & AwsContext) => Promise) { + return (context: A) => REGION_POOL.using(async (region) => { + const aws = await AwsClients.forRegion(region, context.output); + await sanityCheck(aws); + + return block({ ...context, aws }); + }); +} + +/** + * Higher order function to execute a block with a CDK app fixture + * + * Requires an AWS client to be passed in. + * + * For backwards compatibility with existing tests (so we don't have to change + * too much) the inner block is expecte to take a `TestFixture` object. + */ +export function withCdkApp(block: (context: TestFixture) => Promise) { + return async (context: A) => { + const randy = randomString(); + const stackNamePrefix = `cdktest-${randy}`; + const integTestDir = path.join(os.tmpdir(), `cdk-integ-${randy}`); + + context.output.write(` Stack prefix: ${stackNamePrefix}\n`); + context.output.write(` Test directory: ${integTestDir}\n`); + context.output.write(` Region: ${context.aws.region}\n`); + + await cloneDirectory(path.join(__dirname, 'app'), integTestDir, context.output); + const fixture = new TestFixture( + integTestDir, + stackNamePrefix, + context.output, + context.aws); + + let success = true; + try { + await fixture.shell(['npm', 'install', + '@aws-cdk/core', + '@aws-cdk/aws-sns', + '@aws-cdk/aws-iam', + '@aws-cdk/aws-lambda', + '@aws-cdk/aws-ssm', + '@aws-cdk/aws-ecr-assets', + '@aws-cdk/aws-cloudformation', + '@aws-cdk/aws-ec2']); + + await ensureBootstrapped(fixture); + + await block(fixture); + } catch (e) { + success = false; + throw e; + } finally { + await fixture.dispose(success); + } + }; +} + +/** + * Default test fixture for most (all?) integ tests + * + * It's a composition of withAws/withCdkApp, expecting the test block to take a `TestFixture` + * object. + * + * We could have put `withAws(withCdkApp(fixture => { /... actual test here.../ }))` in every + * test declaration but centralizing it is going to make it convenient to modify in the future. + */ +export function withDefaultFixture(block: (context: TestFixture) => Promise) { + return withAws(withCdkApp(block)); + // ^~~~~~ this is disappointing TypeScript! Feels like you should have been able to derive this. +} export interface ShellOptions extends child_process.SpawnOptions { /** @@ -34,6 +113,11 @@ export interface ShellOptions extends child_process.SpawnOptions { * @default true */ captureStderr?: boolean; + + /** + * Pass output here + */ + output?: NodeJS.WritableStream; } export interface CdkCliOptions extends ShellOptions { @@ -41,132 +125,184 @@ export interface CdkCliOptions extends ShellOptions { neverRequireApproval?: boolean; } -export function log(x: string) { - process.stderr.write(x + '\n'); +/** + * Prepare a target dir byreplicating a source directory + */ +export async function cloneDirectory(source: string, target: string, output?: NodeJS.WritableStream) { + await shell(['rm', '-rf', target], { output }); + await shell(['mkdir', '-p', target], { output }); + await shell(['cp', '-R', source + '/*', target], { output }); } -export async function cdkDeploy(stackNames: string | string[], options: CdkCliOptions = {}) { - stackNames = typeof stackNames === 'string' ? [stackNames] : stackNames; +export class TestFixture { + public readonly qualifier = randomString().substr(0, 10); + private readonly bucketsToDelete = new Array(); - const neverRequireApproval = options.neverRequireApproval ?? true; + constructor( + public readonly integTestDir: string, + public readonly stackNamePrefix: string, + public readonly output: NodeJS.WritableStream, + public readonly aws: AwsClients) { + } - return await cdk(['deploy', - ...(neverRequireApproval ? ['--require-approval=never'] : []), // Default to no approval in an unattended test - ...(options.options ?? []), - ...fullStackName(stackNames)], options); -} + public log(s: string) { + this.output.write(`${s}\n`); + } -export async function cdkDestroy(stackNames: string | string[], options: CdkCliOptions = {}) { - stackNames = typeof stackNames === 'string' ? [stackNames] : stackNames; + public async shell(command: string[], options: Omit = {}): Promise { + return await shell(command, { + output: this.output, + cwd: this.integTestDir, + ...options, + }); + } - return await cdk(['destroy', - '-f', // We never want a prompt in an unattended test - ...(options.options ?? []), - ...fullStackName(stackNames)], options); -} + public async cdkDeploy(stackNames: string | string[], options: CdkCliOptions = {}) { + stackNames = typeof stackNames === 'string' ? [stackNames] : stackNames; -export async function cdk(args: string[], options: CdkCliOptions = {}) { - return await shell(['cdk', ...args], { - cwd: INTEG_TEST_DIR, - ...options, - modEnv: { - AWS_REGION: (await testEnv()).region, - AWS_DEFAULT_REGION: (await testEnv()).region, - STACK_NAME_PREFIX, - ...options.modEnv, - }, - }); -} + const neverRequireApproval = options.neverRequireApproval ?? true; -export function fullStackName(stackName: string): string; -export function fullStackName(stackNames: string[]): string[]; -export function fullStackName(stackNames: string | string[]): string | string[] { - if (typeof stackNames === 'string') { - return `${STACK_NAME_PREFIX}-${stackNames}`; - } else { - return stackNames.map(s => `${STACK_NAME_PREFIX}-${s}`); + return this.cdk(['deploy', + ...(neverRequireApproval ? ['--require-approval=never'] : []), // Default to no approval in an unattended test + ...(options.options ?? []), + ...this.fullStackName(stackNames)], options); } -} -/** - * Prepare a target dir byreplicating a source directory - */ -export async function cloneDirectory(source: string, target: string) { - await shell(['rm', '-rf', target]); - await shell(['mkdir', '-p', target]); - await shell(['cp', '-R', source + '/*', target]); -} + public async cdkDestroy(stackNames: string | string[], options: CdkCliOptions = {}) { + stackNames = typeof stackNames === 'string' ? [stackNames] : stackNames; -/** - * Prepare the app fixture - * - * If this is done in the main test script, it will be skipped - * in the subprocess scripts since the app fixture can just be reused. - */ -export async function prepareAppFixture() { - await cloneDirectory(path.join(__dirname, 'app'), INTEG_TEST_DIR); - - await shell(['npm', 'install', - '@aws-cdk/core', - '@aws-cdk/aws-sns', - '@aws-cdk/aws-iam', - '@aws-cdk/aws-lambda', - '@aws-cdk/aws-ssm', - '@aws-cdk/aws-ecr-assets', - '@aws-cdk/aws-cloudformation', - '@aws-cdk/aws-ec2'], { - cwd: INTEG_TEST_DIR, - }); + return this.cdk(['destroy', + '-f', // We never want a prompt in an unattended test + ...(options.options ?? []), + ...this.fullStackName(stackNames)], options); + } + + public async cdk(args: string[], options: CdkCliOptions = {}) { + return await this.shell(['cdk', ...args], { + ...options, + modEnv: { + AWS_REGION: this.aws.region, + AWS_DEFAULT_REGION: this.aws.region, + STACK_NAME_PREFIX: this.stackNamePrefix, + ...options.modEnv, + }, + }); + } + + public fullStackName(stackName: string): string; + public fullStackName(stackNames: string[]): string[]; + public fullStackName(stackNames: string | string[]): string | string[] { + if (typeof stackNames === 'string') { + return `${this.stackNamePrefix}-${stackNames}`; + } else { + return stackNames.map(s => `${this.stackNamePrefix}-${s}`); + } + } + + /** + * Append this to the list of buckets to potentially delete + * + * At the end of a test, we clean up buckets that may not have gotten destroyed + * (for whatever reason). + */ + public rememberToDeleteBucket(bucketName: string) { + this.bucketsToDelete.push(bucketName); + } + + /** + * Cleanup leftover stacks and buckets + */ + public async dispose(success: boolean) { + const stacksToDelete = await this.deleteableStacks(this.stackNamePrefix); + + // Bootstrap stacks have buckets that need to be cleaned + const bucketNames = stacksToDelete.map(stack => outputFromStack('BucketName', stack)).filter(defined); + await Promise.all(bucketNames.map(b => this.aws.emptyBucket(b))); + + // Bootstrap stacks have ECR repositories with images which should be deleted + const imageRepositoryNames = stacksToDelete.map(stack => outputFromStack('ImageRepositoryName', stack)).filter(defined); + await Promise.all(imageRepositoryNames.map(r => this.aws.deleteImageRepository(r))); + + await this.aws.deleteStacks(...stacksToDelete.map(s => s.StackName)); + + // We might have leaked some buckets by upgrading the bootstrap stack. Be + // sure to clean everything. + for (const bucket of this.bucketsToDelete) { + await this.aws.deleteBucket(bucket); + } + + // If the tests completed successfully, happily delete the fixture + // (otherwise leave it for humans to inspect) + if (success) { + rimraf(this.integTestDir); + } + } + + /** + * Return the stacks starting with our testing prefix that should be deleted + */ + private async deleteableStacks(prefix: string): Promise { + const statusFilter = [ + 'CREATE_IN_PROGRESS', 'CREATE_FAILED', 'CREATE_COMPLETE', + 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE', + 'DELETE_FAILED', + 'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS', + 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_IN_PROGRESS', + 'UPDATE_ROLLBACK_FAILED', + 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', + 'UPDATE_ROLLBACK_COMPLETE', 'REVIEW_IN_PROGRESS', + 'IMPORT_IN_PROGRESS', 'IMPORT_COMPLETE', + 'IMPORT_ROLLBACK_IN_PROGRESS', 'IMPORT_ROLLBACK_FAILED', + 'IMPORT_ROLLBACK_COMPLETE', + ]; + + const response = await this.aws.cloudFormation('describeStacks', {}); + + return (response.Stacks ?? []) + .filter(s => s.StackName.startsWith(prefix)) + .filter(s => statusFilter.includes(s.StackStatus)) + .filter(s => s.RootId === undefined); // Only delete parent stacks. Nested stacks are deleted in the process + } } /** - * Return the stacks starting with our testing prefix that should be deleted + * Perform a one-time quick sanity check that the AWS clients has properly configured credentials + * + * If we don't do this, calls are going to fail and they'll be retried and everything will take + * forever before the user notices a simple misconfiguration. + * + * We can't check for the presence of environment variables since credentials could come from + * anywhere, so do simple account retrieval. + * + * Only do it once per process. */ -export async function deleteableStacks(prefix: string): Promise { - const statusFilter = [ - 'CREATE_IN_PROGRESS', 'CREATE_FAILED', 'CREATE_COMPLETE', - 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE', - 'DELETE_FAILED', - 'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS', - 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_IN_PROGRESS', - 'UPDATE_ROLLBACK_FAILED', - 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', - 'UPDATE_ROLLBACK_COMPLETE', 'REVIEW_IN_PROGRESS', - 'IMPORT_IN_PROGRESS', 'IMPORT_COMPLETE', - 'IMPORT_ROLLBACK_IN_PROGRESS', 'IMPORT_ROLLBACK_FAILED', - 'IMPORT_ROLLBACK_COMPLETE', - ]; - - const response = await cloudFormation('describeStacks', {}); - - return (response.Stacks ?? []) - .filter(s => s.StackName.startsWith(prefix)) - .filter(s => statusFilter.includes(s.StackStatus)) - .filter(s => s.RootId === undefined); // Only delete parent stacks. Nested stacks are deleted in the process +async function sanityCheck(aws: AwsClients) { + if (sanityChecked === undefined) { + try { + await aws.account(); + sanityChecked = true; + } catch (e) { + sanityChecked = false; + throw new Error(`AWS credentials probably not configured, got error: ${e.message}`); + } + } + if (!sanityChecked) { + throw new Error('AWS credentials probably not configured, see previous error'); + } } +let sanityChecked: boolean | undefined; /** - * Cleanup leftover stacks and buckets + * Make sure that the given environment is bootstrapped + * + * Since we go striping across regions, it's going to suck doing this + * by hand so let's just mass-automate it. */ -export async function cleanup(): Promise { - const stacksToDelete = await deleteableStacks(STACK_NAME_PREFIX); - - // Bootstrap stacks have buckets that need to be cleaned - const bucketNames = stacksToDelete.map(stack => outputFromStack('BucketName', stack)).filter(defined); - await Promise.all(bucketNames.map(emptyBucket)); - - // Bootstrap stacks have ECR repositories with images which should be deleted - const imageRepositoryNames = stacksToDelete.map(stack => outputFromStack('ImageRepositoryName', stack)).filter(defined); - await Promise.all(imageRepositoryNames.map(deleteImageRepository)); - - await deleteStacks(...stacksToDelete.map(s => s.StackName)); - - // We might have leaked some buckets by upgrading the bootstrap stack. Be - // sure to clean everything. - for (const bucket of bucketsToDelete) { - await deleteBucket(bucket); +async function ensureBootstrapped(fixture: TestFixture) { + // Old-style bootstrap stack with default name + if (await fixture.aws.stackStatus('CDKToolkit') === undefined) { + await fixture.cdk(['bootstrap', `aws://${await fixture.aws.account()}/${fixture.aws.region}`]); } - bucketsToDelete = []; } /** @@ -179,7 +315,7 @@ export async function shell(command: string[], options: ShellOptions = {}): Prom throw new Error('Use either env or modEnv but not both'); } - log(`💻 ${command.join(' ')}`); + options.output?.write(`💻 ${command.join(' ')}\n`); const env = options.env ?? (options.modEnv ? { ...process.env, ...options.modEnv } : undefined); @@ -196,12 +332,12 @@ export async function shell(command: string[], options: ShellOptions = {}): Prom const stderr = new Array(); child.stdout!.on('data', chunk => { - writeOutput('stdout', chunk); + options.output?.write(chunk); stdout.push(chunk); }); child.stderr!.on('data', chunk => { - writeOutput('stderr', chunk); + options.output?.write(chunk); if (options.captureStderr ?? true) { stderr.push(chunk); } @@ -213,24 +349,38 @@ export async function shell(command: string[], options: ShellOptions = {}): Prom if (code === 0 || options.allowErrExit) { resolve((Buffer.concat(stdout).toString('utf-8') + Buffer.concat(stderr).toString('utf-8')).trim()); } else { - reject(new Error(`'${command.join(' ')}' exited with error code ${code}: ${Buffer.concat(stderr).toString('utf-8').trim()}`)); + reject(new Error(`'${command.join(' ')}' exited with error code ${code}`)); } }); }); } -let bucketsToDelete = new Array(); +function defined(x: A): x is NonNullable { + return x !== undefined; +} /** - * Append this to the list of buckets to potentially delete - * - * At the end of a test, we clean up buckets that may not have gotten destroyed - * (for whatever reason). + * rm -rf reimplementation, don't want to depend on an NPM package for this */ -export function rememberToDeleteBucket(bucketName: string) { - bucketsToDelete.push(bucketName); +export function rimraf(fsPath: string) { + try { + const isDir = fs.lstatSync(fsPath).isDirectory(); + + if (isDir) { + for (const file of fs.readdirSync(fsPath)) { + rimraf(path.join(fsPath, file)); + } + fs.rmdirSync(fsPath); + } else { + fs.unlinkSync(fsPath); + } + } catch (e) { + // We will survive ENOENT + if (e.code !== 'ENOENT') { throw e; } + } } -function defined(x: A): x is NonNullable { - return x !== undefined; +export function randomString() { + // Crazy + return Math.random().toString(36).replace(/[^a-z0-9]+/g, ''); } \ No newline at end of file diff --git a/packages/aws-cdk/test/integ/cli/cli.integtest.ts b/packages/aws-cdk/test/integ/cli/cli.integtest.ts index e030f27799dc4..d203c0f66e605 100644 --- a/packages/aws-cdk/test/integ/cli/cli.integtest.ts +++ b/packages/aws-cdk/test/integ/cli/cli.integtest.ts @@ -1,135 +1,120 @@ import { promises as fs } from 'fs'; import * as os from 'os'; import * as path from 'path'; -import { cloudFormation, iam, lambda, retry, sleep, sns, sts, testEnv } from './aws-helpers'; -import { - cdk, cdkDeploy, cdkDestroy, cleanup, cloneDirectory, fullStackName, - INTEG_TEST_DIR, log, prepareAppFixture, shell, STACK_NAME_PREFIX, -} from './cdk-helpers'; +import { retry, sleep } from './aws-helpers'; +import { cloneDirectory, shell, withDefaultFixture } from './cdk-helpers'; import { integTest } from './test-helpers'; jest.setTimeout(600 * 1000); -beforeAll(async () => { - await prepareAppFixture(); -}); +integTest('VPC Lookup', withDefaultFixture(async (fixture) => { + fixture.log('Making sure we are clean before starting.'); + await fixture.cdkDestroy('define-vpc', { modEnv: { ENABLE_VPC_TESTING: 'DEFINE' } }); -beforeEach(async () => { - await cleanup(); -}); + fixture.log('Setting up: creating a VPC with known tags'); + await fixture.cdkDeploy('define-vpc', { modEnv: { ENABLE_VPC_TESTING: 'DEFINE' } }); + fixture.log('Setup complete!'); -afterEach(async () => { - await cleanup(); -}); + fixture.log('Verifying we can now import that VPC'); + await fixture.cdkDeploy('import-vpc', { modEnv: { ENABLE_VPC_TESTING: 'IMPORT' } }); +})); -integTest('VPC Lookup', async () => { - log('Making sure we are clean before starting.'); - await cdkDestroy('define-vpc', { modEnv: { ENABLE_VPC_TESTING: 'DEFINE' } }); - - log('Setting up: creating a VPC with known tags'); - await cdkDeploy('define-vpc', { modEnv: { ENABLE_VPC_TESTING: 'DEFINE' } }); - log('Setup complete!'); - - log('Verifying we can now import that VPC'); - await cdkDeploy('import-vpc', { modEnv: { ENABLE_VPC_TESTING: 'IMPORT' } }); -}); - -integTest('Two ways of shoing the version', async () => { - const version1 = await cdk(['version']); - const version2 = await cdk(['--version']); +integTest('Two ways of shoing the version', withDefaultFixture(async (fixture) => { + const version1 = await fixture.cdk(['version']); + const version2 = await fixture.cdk(['--version']); expect(version1).toEqual(version2); -}); +})); -integTest('Termination protection', async () => { +integTest('Termination protection', withDefaultFixture(async (fixture) => { const stackName = 'termination-protection'; - await cdkDeploy(stackName); + await fixture.cdkDeploy(stackName); // Try a destroy that should fail - await expect(cdkDestroy(stackName)).rejects.toThrow('exited with error'); + await expect(fixture.cdkDestroy(stackName)).rejects.toThrow('exited with error'); // Can update termination protection even though the change set doesn't contain changes - await cdkDeploy(stackName, { modEnv: { TERMINATION_PROTECTION: 'FALSE' } }); - await cdkDestroy(stackName); -}); + await fixture.cdkDeploy(stackName, { modEnv: { TERMINATION_PROTECTION: 'FALSE' } }); + await fixture.cdkDestroy(stackName); +})); -integTest('cdk synth', async () => { - await expect(cdk(['synth', fullStackName('test-1')])).resolves.toEqual( +integTest('cdk synth', withDefaultFixture(async (fixture) => { + await expect(fixture.cdk(['synth', fixture.fullStackName('test-1')])).resolves.toEqual( `Resources: topic69831491: Type: AWS::SNS::Topic Metadata: - aws:cdk:path: ${STACK_NAME_PREFIX}-test-1/topic/Resource`); + aws:cdk:path: ${fixture.stackNamePrefix}-test-1/topic/Resource`); - await expect(cdk(['synth', fullStackName('test-2')])).resolves.toEqual( + await expect(fixture.cdk(['synth', fixture.fullStackName('test-2')])).resolves.toEqual( `Resources: topic152D84A37: Type: AWS::SNS::Topic Metadata: - aws:cdk:path: ${STACK_NAME_PREFIX}-test-2/topic1/Resource + aws:cdk:path: ${fixture.stackNamePrefix}-test-2/topic1/Resource topic2A4FB547F: Type: AWS::SNS::Topic Metadata: - aws:cdk:path: ${STACK_NAME_PREFIX}-test-2/topic2/Resource`); -}); + aws:cdk:path: ${fixture.stackNamePrefix}-test-2/topic2/Resource`); +})); -integTest('ssm parameter provider error', async () => { - await expect(cdk(['synth', - fullStackName('missing-ssm-parameter'), +integTest('ssm parameter provider error', withDefaultFixture(async (fixture) => { + await expect(fixture.cdk(['synth', + fixture.fullStackName('missing-ssm-parameter'), '-c', 'test:ssm-parameter-name=/does/not/exist'], { allowErrExit: true, })).resolves.toContain('SSM parameter not available in account'); -}); +})); -integTest('automatic ordering', async () => { +integTest('automatic ordering', withDefaultFixture(async (fixture) => { // Deploy the consuming stack which will include the producing stack - await cdkDeploy('order-consuming'); + await fixture.cdkDeploy('order-consuming'); // Destroy the providing stack which will include the consuming stack - await cdkDestroy('order-providing'); -}); + await fixture.cdkDestroy('order-providing'); +})); -integTest('context setting', async () => { - await fs.writeFile(path.join(INTEG_TEST_DIR, 'cdk.context.json'), JSON.stringify({ +integTest('context setting', withDefaultFixture(async (fixture) => { + await fs.writeFile(path.join(fixture.integTestDir, 'cdk.context.json'), JSON.stringify({ contextkey: 'this is the context value', })); try { - await expect(cdk(['context'])).resolves.toContain('this is the context value'); + await expect(fixture.cdk(['context'])).resolves.toContain('this is the context value'); // Test that deleting the contextkey works - await cdk(['context', '--reset', 'contextkey']); - await expect(cdk(['context'])).resolves.not.toContain('this is the context value'); + await fixture.cdk(['context', '--reset', 'contextkey']); + await expect(fixture.cdk(['context'])).resolves.not.toContain('this is the context value'); // Test that forced delete of the context key does not throw - await cdk(['context', '-f', '--reset', 'contextkey']); + await fixture.cdk(['context', '-f', '--reset', 'contextkey']); } finally { - await fs.unlink(path.join(INTEG_TEST_DIR, 'cdk.context.json')); + await fs.unlink(path.join(fixture.integTestDir, 'cdk.context.json')); } -}); +})); -integTest('deploy', async () => { - const stackArn = await cdkDeploy('test-2', { captureStderr: false }); +integTest('deploy', withDefaultFixture(async (fixture) => { + const stackArn = await fixture.cdkDeploy('test-2', { captureStderr: false }); // verify the number of resources in the stack - const response = await cloudFormation('describeStackResources', { + const response = await fixture.aws.cloudFormation('describeStackResources', { StackName: stackArn, }); expect(response.StackResources?.length).toEqual(2); -}); +})); -integTest('deploy all', async () => { - const arns = await cdkDeploy('test-*', { captureStderr: false }); +integTest('deploy all', withDefaultFixture(async (fixture) => { + const arns = await fixture.cdkDeploy('test-*', { captureStderr: false }); // verify that we only deployed a single stack (there's a single ARN in the output) expect(arns.split('\n').length).toEqual(2); -}); +})); -integTest('nested stack with parameters', async () => { -// STACK_NAME_PREFIX is used in MyTopicParam to allow multiple instances -// of this test to run in parallel, othewise they will attempt to create the same SNS topic. - const stackArn = await cdkDeploy('with-nested-stack-using-parameters', { - options: ['--parameters', 'MyTopicParam=${STACK_NAME_PREFIX}ThereIsNoSpoon'], +integTest('nested stack with parameters', withDefaultFixture(async (fixture) => { + // STACK_NAME_PREFIX is used in MyTopicParam to allow multiple instances + // of this test to run in parallel, othewise they will attempt to create the same SNS topic. + const stackArn = await fixture.cdkDeploy('with-nested-stack-using-parameters', { + options: ['--parameters', `MyTopicParam=${fixture.stackNamePrefix}ThereIsNoSpoon`], captureStderr: false, }); @@ -137,107 +122,107 @@ integTest('nested stack with parameters', async () => { expect(stackArn.split('\n').length).toEqual(1); // verify the number of resources in the stack - const response = await cloudFormation('describeStackResources', { + const response = await fixture.aws.cloudFormation('describeStackResources', { StackName: stackArn, }); expect(response.StackResources?.length).toEqual(1); -}); +})); -integTest('deploy without execute', async () => { - const stackArn = await cdkDeploy('test-2', { +integTest('deploy without execute', withDefaultFixture(async (fixture) => { + const stackArn = await fixture.cdkDeploy('test-2', { options: ['--no-execute'], captureStderr: false, }); // verify that we only deployed a single stack (there's a single ARN in the output) expect(stackArn.split('\n').length).toEqual(1); - const response = await cloudFormation('describeStacks', { + const response = await fixture.aws.cloudFormation('describeStacks', { StackName: stackArn, }); expect(response.Stacks?.[0].StackStatus).toEqual('REVIEW_IN_PROGRESS'); -}); +})); -integTest('security related changes without a CLI are expected to fail', async () => { +integTest('security related changes without a CLI are expected to fail', withDefaultFixture(async (fixture) => { // redirect /dev/null to stdin, which means there will not be tty attached // since this stack includes security-related changes, the deployment should // immediately fail because we can't confirm the changes const stackName = 'iam-test'; - await expect(cdkDeploy(stackName, { + await expect(fixture.cdkDeploy(stackName, { options: ['<', '/dev/null'], // H4x, this only works because I happen to know we pass shell: true. neverRequireApproval: false, })).rejects.toThrow('exited with error'); // Ensure stack was not deployed - await expect(cloudFormation('describeStacks', { - StackName: fullStackName(stackName), + await expect(fixture.aws.cloudFormation('describeStacks', { + StackName: fixture.fullStackName(stackName), })).rejects.toThrow('does not exist'); -}); +})); -integTest('deploy wildcard with outputs', async () => { - const outputsFile = path.join(INTEG_TEST_DIR, 'outputs', 'outputs.json'); +integTest('deploy wildcard with outputs', withDefaultFixture(async (fixture) => { + const outputsFile = path.join(fixture.integTestDir, 'outputs', 'outputs.json'); await fs.mkdir(path.dirname(outputsFile), { recursive: true }); - await cdkDeploy(['outputs-test-*'], { + await fixture.cdkDeploy(['outputs-test-*'], { options: ['--outputs-file', outputsFile], }); const outputs = JSON.parse((await fs.readFile(outputsFile, { encoding: 'utf-8' })).toString()); expect(outputs).toEqual({ - [`${STACK_NAME_PREFIX}-outputs-test-1`]: { - TopicName: `${STACK_NAME_PREFIX}-outputs-test-1MyTopic`, + [`${fixture.stackNamePrefix}-outputs-test-1`]: { + TopicName: `${fixture.stackNamePrefix}-outputs-test-1MyTopic`, }, - [`${STACK_NAME_PREFIX}-outputs-test-2`]: { - TopicName: `${STACK_NAME_PREFIX}-outputs-test-2MyOtherTopic`, + [`${fixture.stackNamePrefix}-outputs-test-2`]: { + TopicName: `${fixture.stackNamePrefix}-outputs-test-2MyOtherTopic`, }, }); -}); +})); -integTest('deploy with parameters', async () => { - const stackArn = await cdkDeploy('param-test-1', { +integTest('deploy with parameters', withDefaultFixture(async (fixture) => { + const stackArn = await fixture.cdkDeploy('param-test-1', { options: [ - '--parameters', `TopicNameParam=${STACK_NAME_PREFIX}bazinga`, + '--parameters', `TopicNameParam=${fixture.stackNamePrefix}bazinga`, ], captureStderr: false, }); - const response = await cloudFormation('describeStacks', { + const response = await fixture.aws.cloudFormation('describeStacks', { StackName: stackArn, }); expect(response.Stacks?.[0].Parameters).toEqual([ { ParameterKey: 'TopicNameParam', - ParameterValue: `${STACK_NAME_PREFIX}bazinga`, + ParameterValue: `${fixture.stackNamePrefix}bazinga`, }, ]); -}); +})); -integTest('update to stack in ROLLBACK_COMPLETE state will delete stack and create a new one', async () => { +integTest('update to stack in ROLLBACK_COMPLETE state will delete stack and create a new one', withDefaultFixture(async (fixture) => { // GIVEN - await expect(cdkDeploy('param-test-1', { + await expect(fixture.cdkDeploy('param-test-1', { options: [ - '--parameters', `TopicNameParam=${STACK_NAME_PREFIX}@aww`, + '--parameters', `TopicNameParam=${fixture.stackNamePrefix}@aww`, ], captureStderr: false, })).rejects.toThrow('exited with error'); - const response = await cloudFormation('describeStacks', { - StackName: fullStackName('param-test-1'), + const response = await fixture.aws.cloudFormation('describeStacks', { + StackName: fixture.fullStackName('param-test-1'), }); const stackArn = response.Stacks?.[0].StackId; expect(response.Stacks?.[0].StackStatus).toEqual('ROLLBACK_COMPLETE'); // WHEN - const newStackArn = await cdkDeploy('param-test-1', { + const newStackArn = await fixture.cdkDeploy('param-test-1', { options: [ - '--parameters', `TopicNameParam=${STACK_NAME_PREFIX}allgood`, + '--parameters', `TopicNameParam=${fixture.stackNamePrefix}allgood`, ], captureStderr: false, }); - const newStackResponse = await cloudFormation('describeStacks', { + const newStackResponse = await fixture.aws.cloudFormation('describeStacks', { StackName: newStackArn, }); @@ -247,49 +232,49 @@ integTest('update to stack in ROLLBACK_COMPLETE state will delete stack and crea expect(newStackResponse.Stacks?.[0].Parameters).toEqual([ { ParameterKey: 'TopicNameParam', - ParameterValue: `${STACK_NAME_PREFIX}allgood`, + ParameterValue: `${fixture.stackNamePrefix}allgood`, }, ]); -}); +})); -integTest('stack in UPDATE_ROLLBACK_COMPLETE state can be updated', async () => { +integTest('stack in UPDATE_ROLLBACK_COMPLETE state can be updated', withDefaultFixture(async (fixture) => { // GIVEN - const stackArn = await cdkDeploy('param-test-1', { + const stackArn = await fixture.cdkDeploy('param-test-1', { options: [ - '--parameters', `TopicNameParam=${STACK_NAME_PREFIX}nice`, + '--parameters', `TopicNameParam=${fixture.stackNamePrefix}nice`, ], captureStderr: false, }); - let response = await cloudFormation('describeStacks', { + let response = await fixture.aws.cloudFormation('describeStacks', { StackName: stackArn, }); expect(response.Stacks?.[0].StackStatus).toEqual('CREATE_COMPLETE'); // bad parameter name with @ will put stack into UPDATE_ROLLBACK_COMPLETE - await expect(cdkDeploy('param-test-1', { + await expect(fixture.cdkDeploy('param-test-1', { options: [ - '--parameters', `TopicNameParam=${STACK_NAME_PREFIX}@aww`, + '--parameters', `TopicNameParam=${fixture.stackNamePrefix}@aww`, ], captureStderr: false, })).rejects.toThrow('exited with error');; - response = await cloudFormation('describeStacks', { + response = await fixture.aws.cloudFormation('describeStacks', { StackName: stackArn, }); expect(response.Stacks?.[0].StackStatus).toEqual('UPDATE_ROLLBACK_COMPLETE'); // WHEN - await cdkDeploy('param-test-1', { + await fixture.cdkDeploy('param-test-1', { options: [ - '--parameters', `TopicNameParam=${STACK_NAME_PREFIX}allgood`, + '--parameters', `TopicNameParam=${fixture.stackNamePrefix}allgood`, ], captureStderr: false, }); - response = await cloudFormation('describeStacks', { + response = await fixture.aws.cloudFormation('describeStacks', { StackName: stackArn, }); @@ -298,27 +283,27 @@ integTest('stack in UPDATE_ROLLBACK_COMPLETE state can be updated', async () => expect(response.Stacks?.[0].Parameters).toEqual([ { ParameterKey: 'TopicNameParam', - ParameterValue: `${STACK_NAME_PREFIX}allgood`, + ParameterValue: `${fixture.stackNamePrefix}allgood`, }, ]); -}); +})); -integTest('deploy with wildcard and parameters', async () => { - await cdkDeploy('param-test-*', { +integTest('deploy with wildcard and parameters', withDefaultFixture(async (fixture) => { + await fixture.cdkDeploy('param-test-*', { options: [ - '--parameters', `${STACK_NAME_PREFIX}-param-test-1:TopicNameParam=${STACK_NAME_PREFIX}bazinga`, - '--parameters', `${STACK_NAME_PREFIX}-param-test-2:OtherTopicNameParam=${STACK_NAME_PREFIX}ThatsMySpot`, - '--parameters', `${STACK_NAME_PREFIX}-param-test-3:DisplayNameParam=${STACK_NAME_PREFIX}HeyThere`, - '--parameters', `${STACK_NAME_PREFIX}-param-test-3:OtherDisplayNameParam=${STACK_NAME_PREFIX}AnotherOne`, + '--parameters', `${fixture.stackNamePrefix}-param-test-1:TopicNameParam=${fixture.stackNamePrefix}bazinga`, + '--parameters', `${fixture.stackNamePrefix}-param-test-2:OtherTopicNameParam=${fixture.stackNamePrefix}ThatsMySpot`, + '--parameters', `${fixture.stackNamePrefix}-param-test-3:DisplayNameParam=${fixture.stackNamePrefix}HeyThere`, + '--parameters', `${fixture.stackNamePrefix}-param-test-3:OtherDisplayNameParam=${fixture.stackNamePrefix}AnotherOne`, ], }); -}); +})); -integTest('deploy with parameters multi', async () => { - const paramVal1 = `${STACK_NAME_PREFIX}bazinga`; - const paramVal2 = `${STACK_NAME_PREFIX}=jagshemash`; +integTest('deploy with parameters multi', withDefaultFixture(async (fixture) => { + const paramVal1 = `${fixture.stackNamePrefix}bazinga`; + const paramVal2 = `${fixture.stackNamePrefix}=jagshemash`; - const stackArn = await cdkDeploy('param-test-3', { + const stackArn = await fixture.cdkDeploy('param-test-3', { options: [ '--parameters', `DisplayNameParam=${paramVal1}`, '--parameters', `OtherDisplayNameParam=${paramVal2}`, @@ -326,7 +311,7 @@ integTest('deploy with parameters multi', async () => { captureStderr: false, }); - const response = await cloudFormation('describeStacks', { + const response = await fixture.aws.cloudFormation('describeStacks', { StackName: stackArn, }); @@ -340,36 +325,36 @@ integTest('deploy with parameters multi', async () => { ParameterValue: paramVal2, }, ]); -}); +})); -integTest('deploy with notification ARN', async () => { - const topicName = `${STACK_NAME_PREFIX}-test-topic`; +integTest('deploy with notification ARN', withDefaultFixture(async (fixture) => { + const topicName = `${fixture.stackNamePrefix}-test-topic`; - const response = await sns('createTopic', { Name: topicName }); + const response = await fixture.aws.sns('createTopic', { Name: topicName }); const topicArn = response.TopicArn!; try { - await cdkDeploy('test-2', { + await fixture.cdkDeploy('test-2', { options: ['--notification-arns', topicArn], }); // verify that the stack we deployed has our notification ARN - const describeResponse = await cloudFormation('describeStacks', { - StackName: fullStackName('test-2'), + const describeResponse = await fixture.aws.cloudFormation('describeStacks', { + StackName: fixture.fullStackName('test-2'), }); expect(describeResponse.Stacks?.[0].NotificationARNs).toEqual([topicArn]); } finally { - await sns('deleteTopic', { + await fixture.aws.sns('deleteTopic', { TopicArn: topicArn, }); } -}); +})); -integTest('deploy with role', async () => { - const roleName = `${STACK_NAME_PREFIX}-test-role`; +integTest('deploy with role', withDefaultFixture(async (fixture) => { + const roleName = `${fixture.stackNamePrefix}-test-role`; await deleteRole(); - const createResponse = await iam('createRole', { + const createResponse = await fixture.aws.iam('createRole', { RoleName: roleName, AssumeRolePolicyDocument: JSON.stringify({ Version: '2012-10-17', @@ -379,14 +364,14 @@ integTest('deploy with role', async () => { Effect: 'Allow', }, { Action: 'sts:AssumeRole', - Principal: { AWS: (await sts('getCallerIdentity', {})).Arn }, + Principal: { AWS: (await fixture.aws.sts('getCallerIdentity', {})).Arn }, Effect: 'Allow', }], }), }); const roleArn = createResponse.Role.Arn; try { - await iam('putRolePolicy', { + await fixture.aws.iam('putRolePolicy', { RoleName: roleName, PolicyName: 'DefaultPolicy', PolicyDocument: JSON.stringify({ @@ -399,8 +384,8 @@ integTest('deploy with role', async () => { }), }); - await retry('Trying to assume fresh role', retry.forSeconds(300), async () => { - await sts('assumeRole', { + await retry(fixture.output, 'Trying to assume fresh role', retry.forSeconds(300), async () => { + await fixture.aws.sts('assumeRole', { RoleArn: roleArn, RoleSessionName: 'testing', }); @@ -411,7 +396,7 @@ integTest('deploy with role', async () => { // that doesn't have it yet. await sleep(5000); - await cdkDeploy('test-2', { + await fixture.cdkDeploy('test-2', { options: ['--role-arn', roleArn], }); @@ -419,7 +404,7 @@ integTest('deploy with role', async () => { // // Since roles are sticky, if we delete the role before the stack, subsequent DeleteStack // operations will fail when CloudFormation tries to assume the role that's already gone. - await cdkDestroy('test-2'); + await fixture.cdkDestroy('test-2'); } finally { await deleteRole(); @@ -427,66 +412,66 @@ integTest('deploy with role', async () => { async function deleteRole() { try { - for (const policyName of (await iam('listRolePolicies', { RoleName: roleName })).PolicyNames) { - await iam('deleteRolePolicy', { + for (const policyName of (await fixture.aws.iam('listRolePolicies', { RoleName: roleName })).PolicyNames) { + await fixture.aws.iam('deleteRolePolicy', { RoleName: roleName, PolicyName: policyName, }); } - await iam('deleteRole', { RoleName: roleName }); + await fixture.aws.iam('deleteRole', { RoleName: roleName }); } catch (e) { if (e.message.indexOf('cannot be found') > -1) { return; } throw e; } } -}); +})); -integTest('cdk diff', async () => { - const diff1 = await cdk(['diff', fullStackName('test-1')]); +integTest('cdk diff', withDefaultFixture(async (fixture) => { + const diff1 = await fixture.cdk(['diff', fixture.fullStackName('test-1')]); expect(diff1).toContain('AWS::SNS::Topic'); - const diff2 = await cdk(['diff', fullStackName('test-2')]); + const diff2 = await fixture.cdk(['diff', fixture.fullStackName('test-2')]); expect(diff2).toContain('AWS::SNS::Topic'); // We can make it fail by passing --fail - await expect(cdk(['diff', '--fail', fullStackName('test-1')])) + await expect(fixture.cdk(['diff', '--fail', fixture.fullStackName('test-1')])) .rejects.toThrow('exited with error'); -}); +})); -integTest('cdk diff --fail on multiple stacks exits with error if any of the stacks contains a diff', async () => { +integTest('cdk diff --fail on multiple stacks exits with error if any of the stacks contains a diff', withDefaultFixture(async (fixture) => { // GIVEN - const diff1 = await cdk(['diff', fullStackName('test-1')]); + const diff1 = await fixture.cdk(['diff', fixture.fullStackName('test-1')]); expect(diff1).toContain('AWS::SNS::Topic'); - await cdkDeploy('test-2'); - const diff2 = await cdk(['diff', fullStackName('test-2')]); + await fixture.cdkDeploy('test-2'); + const diff2 = await fixture.cdk(['diff', fixture.fullStackName('test-2')]); expect(diff2).toContain('There were no differences'); // WHEN / THEN - await expect(cdk(['diff', '--fail', fullStackName('test-1'), fullStackName('test-2')])).rejects.toThrow('exited with error'); -}); + await expect(fixture.cdk(['diff', '--fail', fixture.fullStackName('test-1'), fixture.fullStackName('test-2')])).rejects.toThrow('exited with error'); +})); -integTest('cdk diff --fail with multiple stack exits with if any of the stacks contains a diff', async () => { +integTest('cdk diff --fail with multiple stack exits with if any of the stacks contains a diff', withDefaultFixture(async (fixture) => { // GIVEN - await cdkDeploy('test-1'); - const diff1 = await cdk(['diff', fullStackName('test-1')]); + await fixture.cdkDeploy('test-1'); + const diff1 = await fixture.cdk(['diff', fixture.fullStackName('test-1')]); expect(diff1).toContain('There were no differences'); - const diff2 = await cdk(['diff', fullStackName('test-2')]); + const diff2 = await fixture.cdk(['diff', fixture.fullStackName('test-2')]); expect(diff2).toContain('AWS::SNS::Topic'); // WHEN / THEN - await expect(cdk(['diff', '--fail', fullStackName('test-1'), fullStackName('test-2')])).rejects.toThrow('exited with error'); -}); + await expect(fixture.cdk(['diff', '--fail', fixture.fullStackName('test-1'), fixture.fullStackName('test-2')])).rejects.toThrow('exited with error'); +})); -integTest('deploy stack with docker asset', async () => { - await cdkDeploy('docker'); -}); +integTest('deploy stack with docker asset', withDefaultFixture(async (fixture) => { + await fixture.cdkDeploy('docker'); +})); -integTest('deploy and test stack with lambda asset', async () => { - const stackArn = await cdkDeploy('lambda', { captureStderr: false }); +integTest('deploy and test stack with lambda asset', withDefaultFixture(async (fixture) => { + const stackArn = await fixture.cdkDeploy('lambda', { captureStderr: false }); - const response = await cloudFormation('describeStacks', { + const response = await fixture.aws.cloudFormation('describeStacks', { StackName: stackArn, }); const lambdaArn = response.Stacks?.[0].Outputs?.[0].OutputValue; @@ -494,15 +479,15 @@ integTest('deploy and test stack with lambda asset', async () => { throw new Error('Stack did not have expected Lambda ARN output'); } - const output = await lambda('invoke', { + const output = await fixture.aws.lambda('invoke', { FunctionName: lambdaArn, }); expect(JSON.stringify(output.Payload)).toContain('dear asset'); -}); +})); -integTest('cdk ls', async () => { - const listing = await cdk(['ls'], { captureStderr: false }); +integTest('cdk ls', withDefaultFixture(async (fixture) => { + const listing = await fixture.cdk(['ls'], { captureStderr: false }); const expectedStacks = [ 'conditional-resource', @@ -527,30 +512,30 @@ integTest('cdk ls', async () => { ]; for (const stack of expectedStacks) { - expect(listing).toContain(fullStackName(stack)); + expect(listing).toContain(fixture.fullStackName(stack)); } -}); +})); -integTest('deploy stack without resource', async () => { +integTest('deploy stack without resource', withDefaultFixture(async (fixture) => { // Deploy the stack without resources - await cdkDeploy('conditional-resource', { modEnv: { NO_RESOURCE: 'TRUE' } }); + await fixture.cdkDeploy('conditional-resource', { modEnv: { NO_RESOURCE: 'TRUE' } }); // This should have succeeded but not deployed the stack. - await expect(cloudFormation('describeStacks', { StackName: fullStackName('conditional-resource') })) + await expect(fixture.aws.cloudFormation('describeStacks', { StackName: fixture.fullStackName('conditional-resource') })) .rejects.toThrow('conditional-resource does not exist'); // Deploy the stack with resources - await cdkDeploy('conditional-resource'); + await fixture.cdkDeploy('conditional-resource'); // Then again WITHOUT resources (this should destroy the stack) - await cdkDeploy('conditional-resource', { modEnv: { NO_RESOURCE: 'TRUE' } }); + await fixture.cdkDeploy('conditional-resource', { modEnv: { NO_RESOURCE: 'TRUE' } }); - await expect(cloudFormation('describeStacks', { StackName: fullStackName('conditional-resource') })) + await expect(fixture.aws.cloudFormation('describeStacks', { StackName: fixture.fullStackName('conditional-resource') })) .rejects.toThrow('conditional-resource does not exist'); -}); +})); -integTest('IAM diff', async () => { - const output = await cdk(['diff', fullStackName('iam-test')]); +integTest('IAM diff', withDefaultFixture(async (fixture) => { + const output = await fixture.cdk(['diff', fixture.fullStackName('iam-test')]); // Roughly check for a table like this: // @@ -563,50 +548,50 @@ integTest('IAM diff', async () => { expect(output).toContain('${SomeRole.Arn}'); expect(output).toContain('sts:AssumeRole'); expect(output).toContain('ec2.amazonaws.com'); -}); +})); -integTest('fast deploy', async () => { +integTest('fast deploy', withDefaultFixture(async (fixture) => { // we are using a stack with a nested stack because CFN will always attempt to // update a nested stack, which will allow us to verify that updates are actually // skipped unless --force is specified. - const stackArn = await cdkDeploy('with-nested-stack', { captureStderr: false }); + const stackArn = await fixture.cdkDeploy('with-nested-stack', { captureStderr: false }); const changeSet1 = await getLatestChangeSet(); // Deploy the same stack again, there should be no new change set created - await cdkDeploy('with-nested-stack'); + await fixture.cdkDeploy('with-nested-stack'); const changeSet2 = await getLatestChangeSet(); expect(changeSet2.ChangeSetId).toEqual(changeSet1.ChangeSetId); // Deploy the stack again with --force, now we should create a changeset - await cdkDeploy('with-nested-stack', { options: ['--force'] }); + await fixture.cdkDeploy('with-nested-stack', { options: ['--force'] }); const changeSet3 = await getLatestChangeSet(); expect(changeSet3.ChangeSetId).not.toEqual(changeSet2.ChangeSetId); // Deploy the stack again with tags, expected to create a new changeset // even though the resources didn't change. - await cdkDeploy('with-nested-stack', { options: ['--tags', 'key=value'] }); + await fixture.cdkDeploy('with-nested-stack', { options: ['--tags', 'key=value'] }); const changeSet4 = await getLatestChangeSet(); expect(changeSet4.ChangeSetId).not.toEqual(changeSet3.ChangeSetId); async function getLatestChangeSet() { - const response = await cloudFormation('describeStacks', { StackName: stackArn }); + const response = await fixture.aws.cloudFormation('describeStacks', { StackName: stackArn }); if (!response.Stacks?.[0]) { throw new Error('Did not get a ChangeSet at all'); } - log(`Found Change Set ${response.Stacks?.[0].ChangeSetId}`); + fixture.log(`Found Change Set ${response.Stacks?.[0].ChangeSetId}`); return response.Stacks?.[0]; } -}); +})); -integTest('failed deploy does not hang', async () => { +integTest('failed deploy does not hang', withDefaultFixture(async (fixture) => { // this will hang if we introduce https://github.com/aws/aws-cdk/issues/6403 again. - await expect(cdkDeploy('failed')).rejects.toThrow('exited with error'); -}); + await expect(fixture.cdkDeploy('failed')).rejects.toThrow('exited with error'); +})); -integTest('can still load old assemblies', async () => { +integTest('can still load old assemblies', withDefaultFixture(async (fixture) => { const cxAsmDir = path.join(os.tmpdir(), 'cdk-integ-cx'); const testAssembliesDirectory = path.join(__dirname, 'cloud-assemblies'); for (const asmdir of await listChildDirs(testAssembliesDirectory)) { - log(`ASSEMBLY ${asmdir}`); + fixture.log(`ASSEMBLY ${asmdir}`); await cloneDirectory(asmdir, cxAsmDir); // Some files in the asm directory that have a .js extension are @@ -616,15 +601,16 @@ integTest('can still load old assemblies', async () => { const targetName = template.replace(/.js$/, ''); await shell([process.execPath, template, '>', targetName], { cwd: cxAsmDir, + output: fixture.output, modEnv: { - TEST_ACCOUNT: (await testEnv()).account, - TEST_REGION: (await testEnv()).region, + TEST_ACCOUNT: await fixture.aws.account(), + TEST_REGION: fixture.aws.region, }, }); } // Use this directory as a Cloud Assembly - const output = await cdk([ + const output = await fixture.cdk([ '--app', cxAsmDir, '-v', 'synth', @@ -635,49 +621,66 @@ integTest('can still load old assemblies', async () => { // provider fails, we inspect the logs here. expect(output).not.toContain('$providerError'); } -}); - -integTest('generating and loading assembly', async () => { - const asmOutputDir = path.join(os.tmpdir(), 'cdk-integ-asm'); - await shell(['rm', '-rf', asmOutputDir]); +})); - // Make sure our fixture directory is clean - await prepareAppFixture(); +integTest('generating and loading assembly', withDefaultFixture(async (fixture) => { + const asmOutputDir = `${fixture.integTestDir}-cdk-integ-asm`; + await fixture.shell(['rm', '-rf', asmOutputDir]); // Synthesize a Cloud Assembly tothe default directory (cdk.out) and a specific directory. - await cdk(['synth']); - await cdk(['synth', '--output', asmOutputDir]); + await fixture.cdk(['synth']); + await fixture.cdk(['synth', '--output', asmOutputDir]); // cdk.out in the current directory and the indicated --output should be the same - await shell(['diff', 'cdk.out', asmOutputDir], { - cwd: INTEG_TEST_DIR, - }); + await fixture.shell(['diff', 'cdk.out', asmOutputDir]); // Check that we can 'ls' the synthesized asm. // Change to some random directory to make sure we're not accidentally loading cdk.json - const list = await cdk(['--app', asmOutputDir, 'ls'], { cwd: os.tmpdir() }); + const list = await fixture.cdk(['--app', asmOutputDir, 'ls'], { cwd: os.tmpdir() }); // Same stacks we know are in the app - expect(list).toContain(`${STACK_NAME_PREFIX}-lambda`); - expect(list).toContain(`${STACK_NAME_PREFIX}-test-1`); - expect(list).toContain(`${STACK_NAME_PREFIX}-test-2`); + expect(list).toContain(`${fixture.stackNamePrefix}-lambda`); + expect(list).toContain(`${fixture.stackNamePrefix}-test-1`); + expect(list).toContain(`${fixture.stackNamePrefix}-test-2`); // Check that we can use '.' and just synth ,the generated asm - const stackTemplate = await cdk(['--app', '.', 'synth', fullStackName('test-2')], { + const stackTemplate = await fixture.cdk(['--app', '.', 'synth', fixture.fullStackName('test-2')], { cwd: asmOutputDir, }); expect(stackTemplate).toContain('topic152D84A37'); // Deploy a Lambda from the copied asm - await cdkDeploy('lambda', { options: ['-a', '.'], cwd: asmOutputDir }); + await fixture.cdkDeploy('lambda', { options: ['-a', '.'], cwd: asmOutputDir }); - // Remove the original custom docker file that was used during synth. + // Remove (rename) the original custom docker file that was used during synth. // this verifies that the assemly has a copy of it and that the manifest uses // relative paths to reference to it. - await fs.unlink(path.join(INTEG_TEST_DIR, 'docker', 'Dockerfile.Custom')); + const customDockerFile = path.join(fixture.integTestDir, 'docker', 'Dockerfile.Custom'); + await fs.rename(customDockerFile, `${customDockerFile}~`); + try { + + // deploy a docker image with custom file without synth (uses assets) + await fixture.cdkDeploy('docker-with-custom-file', { options: ['-a', '.'], cwd: asmOutputDir }); + + } finally { + // Rename back to restore fixture to original state + await fs.rename(`${customDockerFile}~`, customDockerFile); + } +})); + +integTest('templates on disk contain metadata resource, also in nested assemblies', withDefaultFixture(async (fixture) => { + // Synth first, and switch on version reporting because cdk.json is disabling it + await fixture.cdk(['synth', '--version-reporting=true']); + + // Load template from disk from root assembly + const templateContents = await fixture.shell(['cat', 'cdk.out/*-lambda.template.json']); + + expect(JSON.parse(templateContents).Resources.CDKMetadata).toBeTruthy(); + + // Load template from nested assembly + const nestedTemplateContents = await fixture.shell(['cat', 'cdk.out/assembly-*-stage/*-stage-StackInStage.template.json']); - // deploy a docker image with custom file without synth (uses assets) - await cdkDeploy('docker-with-custom-file', { options: ['-a', '.'], cwd: asmOutputDir }); -}); + expect(JSON.parse(nestedTemplateContents).Resources.CDKMetadata).toBeTruthy(); +})); async function listChildren(parent: string, pred: (x: string) => Promise) { const ret = new Array(); diff --git a/packages/aws-cdk/test/integ/cli/corking.ts b/packages/aws-cdk/test/integ/cli/corking.ts index 3b9dbc3e7c206..c3970586ead03 100644 --- a/packages/aws-cdk/test/integ/cli/corking.ts +++ b/packages/aws-cdk/test/integ/cli/corking.ts @@ -1,45 +1,28 @@ /** * Routines for corking stdout and stderr */ +import * as stream from 'stream'; -let _corkShellOutput = false; -const _corked = { - stdout: new Array(), - stderr: new Array(), -}; +export class MemoryStream extends stream.Writable { + private parts = new Array(); -function cleanStreams() { - _corked.stdout.splice(0, _corked.stdout.length); - _corked.stderr.splice(0, _corked.stderr.length); -} - -export function corkShellOutput() { - _corkShellOutput = true; - cleanStreams(); -} - -export function writeOutput(stream: 'stdout' | 'stderr', content: Buffer) { - if (_corkShellOutput) { - _corked[stream].push(content); - } else { - process[stream].write(content); + public _write(chunk: Buffer, _encoding: string, callback: (error?: Error | null) => void): void { + this.parts.push(chunk); + callback(); } -} -async function writeAndFlush(stream: 'stdout' | 'stderr', content: Buffer) { - const flushed = process[stream].write(content); - if (!flushed) { - return new Promise(ok => process[stream].once('drain', ok)); + public buffer() { + return Buffer.concat(this.parts); } -} -export function uncorkShellOutput() { - _corkShellOutput = false; -} + public clear() { + this.parts.splice(0, this.parts.length); + } -export async function flushCorkedOutput() { - await writeAndFlush('stdout', Buffer.concat(_corked.stdout)); - await writeAndFlush('stderr', Buffer.concat(_corked.stderr)); - cleanStreams(); + public async flushTo(strm: NodeJS.WritableStream) { + const flushed = strm.write(this.buffer()); + if (!flushed) { + return new Promise(ok => strm.once('drain', ok)); + } + } } - diff --git a/packages/aws-cdk/test/integ/cli/resource-pool.test.ts b/packages/aws-cdk/test/integ/cli/resource-pool.test.ts new file mode 100644 index 0000000000000..edae1a1ed170a --- /dev/null +++ b/packages/aws-cdk/test/integ/cli/resource-pool.test.ts @@ -0,0 +1,47 @@ +import { ResourcePool } from './resource-pool'; + +test('take and dispose', async () => { + const pool = new ResourcePool(['a']); + + const take1 = pool.take(); + const take2 = pool.take(); + + let released = false; + + const lease1 = await take1; + // awaiting 'take2' would now block but we add an async + // handler to it to flip a boolean to see when it gets activated. + void(take2.then(() => released = true)); + + expect(lease1.value).toEqual('a'); + await waitTick(); + expect(released).toEqual(false); + + lease1.dispose(); + await waitTick(); // This works because setImmediate is scheduled in LIFO order + expect(released).toEqual(true); +}); + +test('double dispose throws', async () => { + const pool = new ResourcePool(['a']); + const lease = await pool.take(); + + lease.dispose(); + expect(() => lease.dispose()).toThrow(); +}); + +test('more consumers than values', async () => { + const pool = new ResourcePool(['a', 'b']); + + const values = await Promise.all([ + pool.using(x => x), + pool.using(x => x), + pool.using(x => x), + ]); + + expect(values).toEqual(['a', 'b', 'a']); +}); + +function waitTick() { + return new Promise(setImmediate); +} \ No newline at end of file diff --git a/packages/aws-cdk/test/integ/cli/resource-pool.ts b/packages/aws-cdk/test/integ/cli/resource-pool.ts new file mode 100644 index 0000000000000..444a0bc193edd --- /dev/null +++ b/packages/aws-cdk/test/integ/cli/resource-pool.ts @@ -0,0 +1,95 @@ +/** + * A class that holds a pool of resources and gives them out and returns them on-demand + * + * The resources will be given out front to back, when they are returned + * the most recently returned version will be given out again (for best + * cache coherency). + * + * If there are multiple consumers waiting for a resource, consumers are serviced + * in FIFO order for most fairness. + */ +export class ResourcePool { + private readonly resources: A[]; + private readonly waiters: Array<(x: A) => void> = []; + + constructor(resources: A[]) { + if (resources.length === 0) { + throw new Error('Must have at least one resource in the pool'); + } + this.resources = [...resources]; + } + + /** + * Take one value from the resource pool + * + * If no such value is currently available, wait until it is. + */ + public take(): Promise> { + const next = this.resources.shift(); + if (next !== undefined) { + return Promise.resolve(this.makeLease(next)); + } else { + return new Promise(ok => { + this.waiters.push((resource) => ok(this.makeLease(resource))); + }); + } + } + + /** + * Execute a block using a single resource from the pool + */ + public async using(block: (x: A) => B | Promise): Promise { + const lease = await this.take(); + try { + return await block(lease.value); + } finally { + lease.dispose(); + } + } + + private makeLease(value: A): ILease { + let disposed = false; + return { + value, + dispose: () => { + if (disposed) { + throw new Error('Calling dispose() on an already-disposed lease.'); + } + disposed = true; + this.returnValue(value); + }, + }; + } + + /** + * When a value is returned: + * + * - If someone's waiting for it, give it to them + * - Otherwise put it back into the pool + */ + private returnValue(value: A) { + const nextWaiter = this.waiters.shift(); + if (nextWaiter !== undefined) { + // Execute in the next tick, otherwise the call stack is going to get very + // confusing. + setImmediate(() => nextWaiter(value)); + } else { + this.resources.unshift(value); + } + } +} + +/** + * A single value taken from the pool + */ +export interface ILease { + /** + * The value obtained by the lease + */ + readonly value: A; + + /** + * Return the leased value to the pool + */ + dispose(): void; +} \ No newline at end of file diff --git a/packages/aws-cdk/test/integ/cli/test-helpers.ts b/packages/aws-cdk/test/integ/cli/test-helpers.ts index a8fbc0eb5d04d..549c7a3b747d7 100644 --- a/packages/aws-cdk/test/integ/cli/test-helpers.ts +++ b/packages/aws-cdk/test/integ/cli/test-helpers.ts @@ -1,28 +1,40 @@ import * as fs from 'fs'; import * as path from 'path'; -import { corkShellOutput, uncorkShellOutput, flushCorkedOutput } from './corking'; +import { MemoryStream } from './corking'; const SKIP_TESTS = fs.readFileSync(path.join(__dirname, 'skip-tests.txt'), { encoding: 'utf-8' }).split('\n'); +export type TestContext = { readonly output: NodeJS.WritableStream; }; + /** * A wrapper for jest's 'test' which takes regression-disabled tests into account and prints a banner */ -export function integTest(name: string, callback: () => A | Promise) { - const runner = shouldSkip(name) ? test.skip : test; +export function integTest(name: string, + callback: (context: TestContext) => Promise) { + + // Integ tests can run concurrently, and are responsible for blocking themselves if they cannot. + const runner = shouldSkip(name) ? test.skip : test.concurrent; runner(name, async () => { - process.stdout.write('================================================================\n'); - process.stdout.write(`${name}\n`); - process.stdout.write('================================================================\n'); + const output = new MemoryStream(); + + output.write('================================================================\n'); + output.write(`${name}\n`); + output.write('================================================================\n'); + let success = true; try { - corkShellOutput(); - return await callback(); + return await callback({ output }); } catch (e) { - await flushCorkedOutput(); + await output.flushTo(process.stderr); + process.stderr.write(`❌ ${e.toString()}\n`); + success = false; throw e; } finally { - uncorkShellOutput(); + if (success) { + // Show people there's progress + process.stderr.write('✅'); + } } }); } diff --git a/packages/aws-cdk/test/integ/cli/test.sh b/packages/aws-cdk/test/integ/cli/test.sh index 42da413a46f40..42b4994b72ce9 100755 --- a/packages/aws-cdk/test/integ/cli/test.sh +++ b/packages/aws-cdk/test/integ/cli/test.sh @@ -23,4 +23,7 @@ if ! npx --no-install jest --version; then npm install --prefix . jest jest-junit aws-sdk fi -npx jest --runInBand --verbose "$@" \ No newline at end of file +# This must --runInBand because parallelism is arranged for inside the tests +# themselves and they must run in the same process in order to coordinate to +# make sure no 2 tests use the same region at the same time. +npx jest --runInBand --verbose "$@" diff --git a/packages/aws-cdk/test/integ/test-cli-regression-against-current-code.sh b/packages/aws-cdk/test/integ/test-cli-regression-against-current-code.sh index 1a62b61d04b33..dc62bab8f698c 100755 --- a/packages/aws-cdk/test/integ/test-cli-regression-against-current-code.sh +++ b/packages/aws-cdk/test/integ/test-cli-regression-against-current-code.sh @@ -89,4 +89,5 @@ fi echo "Running integration tests of version ${VERSION_UNDER_TEST} from ${integ_under_test}" set -x + VERSION_UNDER_TEST=${VERSION_UNDER_TEST} ${integ_under_test}/test.sh "$@" diff --git a/packages/aws-cdk/test/util.ts b/packages/aws-cdk/test/util.ts index f21f3c4abd8d3..f21c0bb2dd78e 100644 --- a/packages/aws-cdk/test/util.ts +++ b/packages/aws-cdk/test/util.ts @@ -159,4 +159,29 @@ export async function withMockedClassSingleton(obj: A, key: K, block: (fn: jest.Mocked[K]) => B): B { + const original = obj[key]; + const mockFn = jest.fn(); + (obj as any)[key] = mockFn; + + let ret; + try { + ret = block(mockFn as any); + } catch (e) { + obj[key] = original; + throw e; + } + + if (!isPromise(ret)) { + obj[key] = original; + return ret; + } + + return ret.finally(() => { obj[key] = original; }) as any; +} + +function isPromise(object: any): object is Promise { + return Promise.resolve(object) === object; } \ No newline at end of file diff --git a/packages/monocdk-experiment/package.json b/packages/monocdk-experiment/package.json index e4eb7cf36e3cc..cad3ccd08550c 100644 --- a/packages/monocdk-experiment/package.json +++ b/packages/monocdk-experiment/package.json @@ -12,7 +12,7 @@ "stability": "experimental", "maturity": "developer-preview", "scripts": { - "gen": "npx ts-node build-tools/gen.ts", + "gen": "ubergen", "build": "cdk-build", "lint": "cdk-lint", "test": "echo done", @@ -255,7 +255,8 @@ "fs-extra": "^9.0.1", "pkglint": "0.0.0", "ts-node": "^8.10.2", - "typescript": "~3.8.3" + "typescript": "~3.8.3", + "ubergen": "0.0.0" }, "peerDependencies": { "constructs": "^3.0.4" diff --git a/tools/cfn2ts/lib/augmentation-generator.ts b/tools/cfn2ts/lib/augmentation-generator.ts index 0ac3540a103d8..f23462b02b188 100644 --- a/tools/cfn2ts/lib/augmentation-generator.ts +++ b/tools/cfn2ts/lib/augmentation-generator.ts @@ -17,12 +17,16 @@ export class AugmentationGenerator { } public emitCode(): boolean { + let importedCloudWatch = false; let hadAugmentations = false; for (const resourceTypeName of Object.keys(this.spec.ResourceTypes).sort()) { const aug = resourceAugmentation(resourceTypeName); if (aug.metrics) { - this.code.line("import * as cloudwatch from '@aws-cdk/aws-cloudwatch';"); + if (!importedCloudWatch) { + this.code.line("import * as cloudwatch from '@aws-cdk/aws-cloudwatch';"); + importedCloudWatch = true; + } this.emitMetricAugmentations(resourceTypeName, aug.metrics, aug.options); hadAugmentations = true; } @@ -46,11 +50,16 @@ export class AugmentationGenerator { const classFile = `./${(options && options.classFile) || `${kebabL2ClassName}-base`}`; const className = (options && options.class) || l2ClassName + 'Base'; + const interfaceFile = (options && options.interfaceFile) ? `./${options.interfaceFile}` : classFile; const interfaceName = (options && options.interface) || 'I' + l2ClassName; this.code.line(`import { ${className} } from "${classFile}";`); - this.code.openBlock(`declare module "${classFile}"`); + if (classFile === interfaceFile) { + this.code.openBlock(`declare module "${classFile}"`); + } else { + this.code.openBlock(`declare module "${interfaceFile}"`); + } // Add to the interface this.code.openBlock(`interface ${interfaceName}`); @@ -60,6 +69,11 @@ export class AugmentationGenerator { } this.code.closeBlock(); + if (classFile !== interfaceFile) { + this.code.closeBlock(); + this.code.openBlock(`declare module "${classFile}"`); + } + // Add declaration to the base class (implementation added below) this.code.openBlock(`interface ${className}`); this.emitMetricFunctionDeclaration(cfnName); diff --git a/tools/pkglint/lib/rules.ts b/tools/pkglint/lib/rules.ts index cf89fed209354..709ed2953dd71 100644 --- a/tools/pkglint/lib/rules.ts +++ b/tools/pkglint/lib/rules.ts @@ -482,7 +482,7 @@ export class JSIIProjectReferences extends ValidationRule { this.name, pkg, 'jsii.projectReferences', - pkg.json.name !== 'monocdk-experiment', + pkg.json.name !== 'monocdk-experiment' && pkg.json.name !== 'aws-cdk-lib', ); } } diff --git a/tools/ubergen/.eslintrc.js b/tools/ubergen/.eslintrc.js new file mode 100644 index 0000000000000..61dd8dd001f63 --- /dev/null +++ b/tools/ubergen/.eslintrc.js @@ -0,0 +1,3 @@ +const baseConfig = require('cdk-build-tools/config/eslintrc'); +baseConfig.parserOptions.project = __dirname + '/tsconfig.json'; +module.exports = baseConfig; diff --git a/tools/ubergen/.gitignore b/tools/ubergen/.gitignore new file mode 100644 index 0000000000000..bbaee6515fdd0 --- /dev/null +++ b/tools/ubergen/.gitignore @@ -0,0 +1,6 @@ +*.snk +.LAST_BUILD +junit.xml +*.js +*.d.ts +!.eslintrc.js diff --git a/tools/ubergen/.npmignore b/tools/ubergen/.npmignore new file mode 100644 index 0000000000000..3ea8254615ff6 --- /dev/null +++ b/tools/ubergen/.npmignore @@ -0,0 +1,12 @@ +# Don't include original .ts files when doing `npm pack` +*.ts +!*.d.ts +coverage +.nyc_output +*.tgz + +*.snk +.eslintrc.js +junit.xml +.LAST_BUILD +**/cdk.out \ No newline at end of file diff --git a/tools/ubergen/LICENSE b/tools/ubergen/LICENSE new file mode 100644 index 0000000000000..b71ec1688783a --- /dev/null +++ b/tools/ubergen/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tools/ubergen/NOTICE b/tools/ubergen/NOTICE new file mode 100644 index 0000000000000..bfccac9a7f69c --- /dev/null +++ b/tools/ubergen/NOTICE @@ -0,0 +1,2 @@ +AWS Cloud Development Kit (AWS CDK) +Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/tools/ubergen/README.md b/tools/ubergen/README.md new file mode 100644 index 0000000000000..36b6249ae245c --- /dev/null +++ b/tools/ubergen/README.md @@ -0,0 +1,7 @@ +# ubergen + +Aggregates all individual CDK construct library modules into a single +uber package. + +This is used in the CDK to create a monolithic package that is then +published for customers to consume. \ No newline at end of file diff --git a/tools/ubergen/bin/ubergen b/tools/ubergen/bin/ubergen new file mode 100755 index 0000000000000..dd62f3f249e59 --- /dev/null +++ b/tools/ubergen/bin/ubergen @@ -0,0 +1,2 @@ +#!/usr/bin/env node +require('./ubergen.js'); \ No newline at end of file diff --git a/packages/monocdk-experiment/build-tools/gen.ts b/tools/ubergen/bin/ubergen.ts similarity index 94% rename from packages/monocdk-experiment/build-tools/gen.ts rename to tools/ubergen/bin/ubergen.ts index 3cc90564f320f..7f654bdafb97d 100644 --- a/packages/monocdk-experiment/build-tools/gen.ts +++ b/tools/ubergen/bin/ubergen.ts @@ -1,10 +1,10 @@ import * as console from 'console'; -import * as fs from 'fs-extra'; import * as path from 'path'; import * as process from 'process'; +import * as fs from 'fs-extra'; import * as ts from 'typescript'; -const LIB_ROOT = path.resolve(__dirname, '..', 'lib'); +const LIB_ROOT = path.resolve(process.cwd(), 'lib'); async function main() { const libraries = await findLibrariesToPackage(); @@ -59,7 +59,7 @@ async function findLibrariesToPackage(): Promise { const result = new Array(); - const librariesRoot = path.resolve(__dirname, '..', '..', '@aws-cdk'); + const librariesRoot = path.resolve(process.cwd(), '..', '..', 'packages', '@aws-cdk'); for (const dir of await fs.readdir(librariesRoot)) { const packageJson = await fs.readJson(path.resolve(librariesRoot, dir, 'package.json')); @@ -88,7 +88,7 @@ async function findLibrariesToPackage(): Promise { async function verifyDependencies(libraries: readonly LibraryReference[]): Promise { console.log('🧐 Verifying dependencies are complete...'); - const packageJsonPath = path.resolve(__dirname, '..', 'package.json'); + const packageJsonPath = path.resolve(process.cwd(), 'package.json'); const packageJson = await fs.readJson(packageJsonPath); let changed = false; @@ -122,9 +122,9 @@ async function verifyDependencies(libraries: readonly LibraryReference[]): Promi }); } - const workspacePath = path.resolve(__dirname, '..', '..', '..', 'package.json'); + const workspacePath = path.resolve(process.cwd(), '..', '..', 'package.json'); const workspace = await fs.readJson(workspacePath); - let workspaceChanged = false + let workspaceChanged = false; const spuriousBundledDeps = new Set(packageJson.bundledDependencies ?? []); for (const [name, version] of Object.entries(toBundle)) { @@ -136,7 +136,7 @@ async function verifyDependencies(libraries: readonly LibraryReference[]): Promi workspace.workspaces.nohoist = Array.from(new Set([ ...workspace.workspaces.nohoist ?? [], nohoist, - `${nohoist}/**` + `${nohoist}/**`, ])).sort(); workspaceChanged = true; } @@ -273,8 +273,8 @@ async function copyOrTransformFiles(from: string, to: string, libraries: readonl const promises = (await fs.readdir(from)).map(async name => { if (shouldIgnoreFile(name)) { return; } - if (name.endsWith(".d.ts") || name.endsWith(".js")) { - if (await fs.pathExists(path.join(from, name.replace(/\.(d\.ts|js)$/, ".ts")))) { + if (name.endsWith('.d.ts') || name.endsWith('.js')) { + if (await fs.pathExists(path.join(from, name.replace(/\.(d\.ts|js)$/, '.ts')))) { // We won't copy .d.ts and .js files with a corresponding .ts file return; } @@ -360,13 +360,13 @@ async function rewriteImports(fromFile: string, targetDir: string, libraries: re const sourceLibrary = libraries.find( lib => moduleSpecifier === lib.packageJson.name || - moduleSpecifier.startsWith(`${lib.packageJson.name}/`) + moduleSpecifier.startsWith(`${lib.packageJson.name}/`), ); if (sourceLibrary == null) { return undefined; } const importedFile = moduleSpecifier === sourceLibrary.packageJson.name - ? path.join(LIB_ROOT, sourceLibrary.shortName) - : path.join(LIB_ROOT, sourceLibrary.shortName, moduleSpecifier.substr(sourceLibrary.packageJson.name.length + 1)); + ? path.join(LIB_ROOT, sourceLibrary.shortName) + : path.join(LIB_ROOT, sourceLibrary.shortName, moduleSpecifier.substr(sourceLibrary.packageJson.name.length + 1)); return ts.createStringLiteral( path.relative(targetDir, importedFile), ); diff --git a/tools/ubergen/package.json b/tools/ubergen/package.json new file mode 100644 index 0000000000000..8e2e2e0eaa5a9 --- /dev/null +++ b/tools/ubergen/package.json @@ -0,0 +1,45 @@ +{ + "name": "ubergen", + "private": true, + "version": "0.0.0", + "description": "Generate an uber CDK package from all individual CDK construct libraries", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-cdk.git", + "directory": "tools/ubergen" + }, + "bin": { + "ubergen": "bin/ubergen" + }, + "scripts": { + "build": "tsc -b && chmod +x bin/ubergen && eslint . --ext=.ts", + "watch": "tsc -b -w", + "pkglint": "pkglint -f", + "test": "echo success", + "build+test+package": "npm run build+test", + "build+test": "npm run build && npm test" + }, + "author": { + "name": "Amazon Web Services", + "url": "https://aws.amazon.com", + "organization": true + }, + "license": "Apache-2.0", + "devDependencies": { + "@types/fs-extra": "^8.1.1", + "cdk-build-tools": "0.0.0", + "pkglint": "0.0.0" + }, + "dependencies": { + "fs-extra": "^9.0.1", + "typescript": "~3.9.7" + }, + "keywords": [ + "aws", + "cdk" + ], + "homepage": "https://github.com/aws/aws-cdk", + "engines": { + "node": ">= 10.13.0 <13 || >=13.7.0" + } +} diff --git a/tools/ubergen/tsconfig.json b/tools/ubergen/tsconfig.json new file mode 100644 index 0000000000000..14499cd2abfaf --- /dev/null +++ b/tools/ubergen/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ES2018", + "module": "commonjs", + "lib": ["es2018"], + "strict": true, + "alwaysStrict": true, + "declaration": true, + "inlineSourceMap": true, + "inlineSources": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": true, + "resolveJsonModule": true, + "composite": true, + "incremental": true + }, + "include": ["**/*.ts"] +}