Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
"Secret2",
secretProps(aurora2, `${DB_CLUSTER_ID}2`)
);
secret.addDependsOn(aurora);
secret2.addDependsOn(aurora2);
new CfnOutput(this, "AASASecretArn", {
value: secret.ref
});
new CfnOutput(this, "AASASecretArn2", {
value: secret2.ref
});
// TEST USER
const user = new User(this, "TestUser");
const policy = new Policy(this, "TestUserPolicy", {
statements: [
new PolicyStatement({
actions: ["rds-data:*"],
resources: [
`arn:aws:rds:${this.region}:${this.account}:cluster:${DB_CLUSTER_ID}*`,
`arn:aws:rds:${this.region}:${this.account}:cluster:${DB_CLUSTER_ID}2*`
]
}),
new PolicyStatement({
actions: ["secretsmanager:*"],
resources: [`${secret.ref}*`, `${secret2.ref}*`]
})
]
});
user.attachInlinePolicy(policy);
const key = new CfnAccessKey(this, "TestUserKey", {
this,
"Secret2",
secretProps(aurora2, `${DB_CLUSTER_ID}2`)
);
secret.addDependsOn(aurora);
secret2.addDependsOn(aurora2);
new CfnOutput(this, "AASASecretArn", {
value: secret.ref
});
new CfnOutput(this, "AASASecretArn2", {
value: secret2.ref
});
// TEST USER
const user = new User(this, "TestUser");
const policy = new Policy(this, "TestUserPolicy", {
statements: [
new PolicyStatement({
actions: ["rds-data:*"],
resources: [
`arn:aws:rds:${this.region}:${this.account}:cluster:${DB_CLUSTER_ID}*`,
`arn:aws:rds:${this.region}:${this.account}:cluster:${DB_CLUSTER_ID}2*`
]
}),
new PolicyStatement({
actions: ["secretsmanager:*"],
resources: [`${secret.ref}*`, `${secret2.ref}*`]
})
]
});
user.attachInlinePolicy(policy);
codepipeline.ActionConfig {
const createEvent = this.props.trigger === undefined ||
this.props.trigger === CodeCommitTrigger.EVENTS;
if (createEvent) {
this.props.repository.onCommit(stage.pipeline.node.uniqueId + 'EventRule', {
target: new targets.CodePipeline(stage.pipeline),
branches: [this.branch],
});
}
// the Action will write the contents of the Git repository to the Bucket,
// so its Role needs write permissions to the Pipeline Bucket
options.bucket.grantReadWrite(options.role);
// https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-permissions-reference.html#aa-acp
options.role.addToPolicy(new iam.PolicyStatement({
resources: [this.props.repository.repositoryArn],
actions: [
'codecommit:GetBranch',
'codecommit:GetCommit',
'codecommit:UploadArchive',
'codecommit:GetUploadArchiveStatus',
'codecommit:CancelUploadArchive',
],
}));
return {
configuration: {
RepositoryName: this.props.repository.repositoryName,
BranchName: this.branch,
PollForSourceChanges: this.props.trigger === CodeCommitTrigger.POLL,
},
private makePolicyStatements(task: sfn.Task): iam.PolicyStatement[] {
const stack = cdk.Stack.of(task);
// https://docs.aws.amazon.com/step-functions/latest/dg/ecs-iam.html
const policyStatements = [
new iam.PolicyStatement({
actions: ['ecs:RunTask'],
resources: [this.props.taskDefinition.taskDefinitionArn],
}),
new iam.PolicyStatement({
actions: ['ecs:StopTask', 'ecs:DescribeTasks'],
resources: ['*'],
}),
new iam.PolicyStatement({
actions: ['iam:PassRole'],
resources: cdk.Lazy.listValue({ produce: () => this.taskExecutionRoles().map(r => r.roleArn) })
}),
];
if (this.integrationPattern === sfn.ServiceIntegrationPattern.SYNC) {
policyStatements.push(new iam.PolicyStatement({
actions: ["events:PutTargets", "events:PutRule", "events:DescribeRule"],
resources: [stack.formatArn({
service: 'events',
resource: 'rule',
resourceName: 'StepFunctionsGetEventsForECSTaskRule'
})]
}));
}
if (!options.canContainersAccessInstanceRole) {
// Deny containers access to instance metadata service
// Source: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html
autoScalingGroup.addUserData('sudo iptables --insert FORWARD 1 --in-interface docker+ --destination 169.254.169.254/32 --jump DROP');
autoScalingGroup.addUserData('sudo service iptables save');
// The following is only for AwsVpc networking mode, but doesn't hurt for the other modes.
autoScalingGroup.addUserData('echo ECS_AWSVPC_BLOCK_IMDS=true >> /etc/ecs/ecs.config');
}
if (autoScalingGroup.spotPrice && options.spotInstanceDraining) {
autoScalingGroup.addUserData('echo ECS_ENABLE_SPOT_INSTANCE_DRAINING=true >> /etc/ecs/ecs.config');
}
// ECS instances must be able to do these things
// Source: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html
autoScalingGroup.addToRolePolicy(new iam.PolicyStatement({
actions: [
"ecs:CreateCluster",
"ecs:DeregisterContainerInstance",
"ecs:DiscoverPollEndpoint",
"ecs:Poll",
"ecs:RegisterContainerInstance",
"ecs:StartTelemetrySession",
"ecs:Submit*",
"ecr:GetAuthorizationToken",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
resources: ['*']
}));
// 0 disables, otherwise forward to underlying implementation which picks the sane default
): ActionConfig {
const { parameterName, regExp, logParameter = false } = this.props;
const checkParameterFunction = new Function(
scope,
'CheckParamterFunction',
{
runtime: Runtime.PYTHON_3_7,
code: Code.fromAsset(`${LAMBDA_PATH}/check-parameter`),
handler: 'check_parameter.lambda_handler',
},
);
// allow pipeline to list functions
options.role.addToPolicy(
new PolicyStatement({
actions: ['lambda:ListFunctions'],
resources: ['*'],
}),
);
// allow pipeline to invoke this lambda functionn
options.role.addToPolicy(
new PolicyStatement({
actions: ['lambda:InvokeFunction'],
resources: [checkParameterFunction.functionArn],
}),
);
// allow lambda to put job results for this pipeline
// CodePipeline requires this to be granted to '*'
// (the Pipeline ARN will not be enough)
taskDefinition.addContainer('cloudmapper-container', {
image: ecs.ContainerImage.fromAsset('./resources'),
memoryLimitMiB: 512,
cpu: 256,
environment: {
S3_BUCKET: config['s3_bucket'],
MINIMUM_ALERT_SEVERITY: config['minimum_alert_severity']
},
logging: new ecs.AwsLogDriver({
streamPrefix: 'cloudmapper',
logRetention: logs.RetentionDays.TWO_WEEKS
})
});
// Grant the ability to assume the IAM role in any account
taskDefinition.addToTaskRolePolicy(new iam.PolicyStatement({
resources: ["arn:aws:iam::*:role/"+config['iam_role']],
actions: ['sts:AssumeRole']
}));
// Grant the ability to read and write the files from the S3 bucket
taskDefinition.addToTaskRolePolicy(new iam.PolicyStatement({
resources: ["arn:aws:s3:::"+config['s3_bucket']],
actions: ['s3:ListBucket']
}));
taskDefinition.addToTaskRolePolicy(new iam.PolicyStatement({
resources: ["arn:aws:s3:::"+config['s3_bucket']+"/*"],
actions: ['s3:GetObject','s3:PutObject', 's3:DeleteObject']
}));
// Grant the ability to record the stdout to CloudWatch Logs
taskDefinition.addToTaskRolePolicy(new iam.PolicyStatement({
actions: ['ecs:DescribeContainerInstances', 'ecs:DescribeTasks'],
resources: ['*'],
}));
// Restrict to the ECS Cluster
fn.addToRolePolicy(new iam.PolicyStatement({
actions: [
'ecs:ListContainerInstances',
'ecs:SubmitContainerStateChange',
'ecs:SubmitTaskStateChange'
],
resources: [props.cluster.clusterArn]
}));
// Restrict the container-instance operations to the ECS Cluster
fn.addToRolePolicy(new iam.PolicyStatement({
actions: [
'ecs:UpdateContainerInstancesState',
'ecs:ListTasks'
],
conditions: {
ArnEquals: {'ecs:cluster': props.cluster.clusterArn}
},
resources: ['*']
}));
}
}
// Create Lambda to forward alarms
const alarm_forwarder = new lambda.Function(this, "alarm_forwarder", {
runtime: lambda.Runtime.PYTHON_3_7,
code: lambda.Code.asset("resources/alarm_forwarder"),
handler: "main.handler",
description: "Forwards alarms from the local SNS to another",
logRetention: logs.RetentionDays.TWO_WEEKS,
timeout: cdk.Duration.seconds(30),
memorySize: 128,
environment: {
"ALARM_SNS": config['alarm_sns_arn']
},
});
// Add priv to publish the events so the alarms can be forwarded
alarm_forwarder.addToRolePolicy(new iam.PolicyStatement({
resources: [config['alarm_sns_arn']],
actions: ['sns:Publish']
}));
// Connect the SNS to the Lambda
sns_topic.addSubscription(new sns_subscription.LambdaSubscription(alarm_forwarder));
}
}
tableName: tableName,
partitionKey: {
name: `${tableName}Id`,
type: AttributeType.STRING
},
billingMode: BillingMode.PAY_PER_REQUEST,
stream: StreamViewType.NEW_IMAGE,
// The default removal policy is RETAIN, which means that cdk destroy will not attempt to delete
// the new table, and it will remain in your account until manually deleted. By setting the policy to
// DESTROY, cdk destroy will delete the table (even if it has data in it)
removalPolicy: cdk.RemovalPolicy.DESTROY, // NOT recommended for production code
});
const itemsTableRole = new Role(this, 'ItemsDynamoDBRole', {
assumedBy: new ServicePrincipal('appsync.amazonaws.com')
});
itemsTableRole.addManagedPolicy(ManagedPolicy.fromAwsManagedPolicyName('AmazonDynamoDBFullAccess'));
const dataSource = new CfnDataSource(this, 'ItemsDataSource', {
apiId: itemsGraphQLApi.attrApiId,
name: 'ItemsDynamoDataSource',
type: 'AMAZON_DYNAMODB',
dynamoDbConfig: {
tableName: itemsTable.tableName,
awsRegion: this.region
},
serviceRoleArn: itemsTableRole.roleArn
});
const getOneResolver = new CfnResolver(this, 'GetOneQueryResolver', {