fleet/infrastructure/sandbox/JITProvisioner/deprovisioner.tf

272 lines
6.8 KiB
Terraform
Raw Normal View History

Fleet Sandbox (#5079) * Add code for the shared infra part of the demo environment * Checkin * checkin * Checkin for pre-provisioner, got terraform working * Checkin with the pre-deployer working, now blocked by helm chart * Add interface for helm * Add some initial code for the JIT Provisioner lambda Lots of code taken from https://gitlab.com/hmajid2301/articles/-/tree/master/41.%20Create%20a%20webapp%20with%20fizz * Update helm chart to work with shared infra (#5621) * Update helm chart to work with shared infra * Update helm chart README to reflect changes. * Checkin * Checkin * Checkin, Pre-provisioner actually works * PreProvisioner is now complete * Make changes to the JIT provisioner based off of actually learning how to do stuff * checkin * Check in, broken currently * Add all code except provisioning and emailing user * Checkin * Checkin, fixed kubernetes * Checkin * Forgot a file * Finish jit provisioner, need to test now * Checkin, switching to nginx ingress * Fleets are now actually accessible * JITProvisioner now returns working fleet instances * Deprovisioner code done, just need a few bugs fixed * Fix the deprovisioner so it works now and re-ip * fixup * Finished testing the deprovisioner * Added monitoring and fixed some bugs * Add stuff for #6548 * fixed per luke's suggestion * Fix for inactive task definition arns * move everything to the prod account * Bump fleet version and fix a couple of bugs * Fix a couple of bugs * Lots of security fixes and a few bug fixes * Rename demo to sandbox to match product's naming * Revert "Update helm chart to work with shared infra (#5621)" This reverts commit 610bbd1c00338620f6cc65fe2aff86139551f465. Co-authored-by: Robert Fairburn <8029478+rfairburn@users.noreply.github.com>
2022-07-19 18:56:53 +00:00
data "aws_iam_policy_document" "sfn-assume-role" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["states.amazonaws.com"]
}
}
}
resource "aws_iam_role_policy_attachment" "sfn" {
role = aws_iam_role.sfn.id
policy_arn = aws_iam_policy.sfn.arn
}
resource "aws_iam_policy" "sfn" {
name = "${local.full_name}-sfn"
policy = data.aws_iam_policy_document.sfn.json
}
data "aws_iam_policy_document" "sfn" {
statement {
actions = ["ecs:RunTask"]
resources = [replace(aws_ecs_task_definition.deprovisioner.arn, "/:\\d+$/", ":*"), replace(aws_ecs_task_definition.deprovisioner.arn, "/:\\d+$/", "")]
condition {
test = "ArnLike"
variable = "ecs:cluster"
values = [var.ecs_cluster.arn]
}
}
statement {
actions = ["iam:PassRole"]
resources = ["*"]
condition {
test = "StringLike"
variable = "iam:PassedToService"
values = ["ecs-tasks.amazonaws.com"]
}
}
statement {
actions = ["events:PutTargets", "events:PutRule", "events:DescribeRule"]
resources = ["*"]
}
}
resource "aws_iam_role" "sfn" {
name = "${local.full_name}-sfn"
assume_role_policy = data.aws_iam_policy_document.sfn-assume-role.json
}
resource "aws_iam_role_policy_attachment" "deprovisioner" {
role = aws_iam_role.deprovisioner.id
policy_arn = aws_iam_policy.deprovisioner.arn
}
resource "aws_iam_policy" "deprovisioner" {
name = "${local.full_name}-deprovisioner"
policy = data.aws_iam_policy_document.deprovisioner.json
}
data "aws_iam_policy_document" "deprovisioner" {
statement {
actions = [
"dynamodb:List*",
"dynamodb:DescribeReservedCapacity*",
"dynamodb:DescribeLimits",
"dynamodb:DescribeTimeToLive"
]
resources = ["*"]
}
statement {
actions = [
"dynamodb:BatchGet*",
"dynamodb:DescribeStream",
"dynamodb:DescribeTable",
"dynamodb:Get*",
"dynamodb:Query",
"dynamodb:Scan",
"dynamodb:BatchWrite*",
"dynamodb:CreateTable",
"dynamodb:Delete*",
"dynamodb:Update*",
"dynamodb:PutItem"
]
resources = [var.dynamodb_table.arn]
}
statement {
actions = [ #tfsec:ignore:aws-iam-no-policy-wildcards
"kms:Encrypt*",
"kms:Decrypt*",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:Describe*"
]
resources = [aws_kms_key.ecr.arn, var.kms_key.arn]
}
statement {
actions = ["*"]
resources = ["*"]
}
}
data "aws_iam_policy_document" "deprovisioner-assume-role" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ecs-tasks.amazonaws.com"]
}
}
}
resource "aws_iam_role" "deprovisioner" {
name = "${local.full_name}-deprovisioner"
assume_role_policy = data.aws_iam_policy_document.deprovisioner-assume-role.json
}
resource "aws_ecs_task_definition" "deprovisioner" {
family = "${local.full_name}-deprovisioner"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
execution_role_arn = aws_iam_role.deprovisioner.arn
task_role_arn = aws_iam_role.deprovisioner.arn
cpu = 1024
memory = 4096
container_definitions = jsonencode(
[
{
name = "${var.prefix}-deprovisioner"
image = docker_registry_image.deprovisioner.name
mountPoints = []
volumesFrom = []
essential = true
networkMode = "awsvpc"
logConfiguration = {
logDriver = "awslogs"
options = {
awslogs-group = aws_cloudwatch_log_group.main.name
awslogs-region = data.aws_region.current.name
awslogs-stream-prefix = "${local.full_name}-deprovisioner"
}
},
environment = concat([
{
name = "TF_VAR_mysql_secret"
value = var.mysql_secret.id
},
{
name = "TF_VAR_eks_cluster"
value = var.eks_cluster.eks_cluster_id
},
])
}
])
lifecycle {
create_before_destroy = true
}
}
resource "aws_security_group" "deprovisioner" {
name = "${local.full_name}-deprovisioner"
description = "security group for ${local.full_name}-deprovisioner"
vpc_id = var.vpc.vpc_id
egress {
description = "egress to all"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
}
resource "aws_sfn_state_machine" "main" {
name = var.prefix
role_arn = aws_iam_role.sfn.arn
definition = <<EOF
{
"Comment": "Controls the lifecycle of a Fleet demo environment",
"StartAt": "Wait",
"States": {
"Wait": {
"Type": "Wait",
"SecondsPath": "$.waitTime",
"Next": "Deprovisioner"
},
"Deprovisioner": {
"Type": "Task",
"Resource": "arn:aws:states:::ecs:runTask.sync",
"Parameters": {
"LaunchType": "FARGATE",
"NetworkConfiguration": {
"AwsvpcConfiguration": {
"Subnets": ${jsonencode(var.vpc.private_subnets)},
"SecurityGroups": ["${aws_security_group.deprovisioner.id}"],
"AssignPublicIp": "DISABLED"
}
},
"Cluster": "${var.ecs_cluster.arn}",
"TaskDefinition": "${replace(aws_ecs_task_definition.deprovisioner.arn, "/:\\d+$/", "")}",
"Overrides": {
"ContainerOverrides": [
{
"Name": "${var.prefix}-deprovisioner",
"Environment": [
{
"Name": "INSTANCE_ID",
"Value.$": "$.instanceID"
}
]
}
]
}
},
"End": true
}
}
}
EOF
}
output "deprovisioner" {
value = aws_sfn_state_machine.main
}
resource "random_uuid" "deprovisioner" {
keepers = {
lambda = data.archive_file.deprovisioner.output_sha
}
}
resource "local_file" "backend-config" {
content = templatefile("${path.module}/deprovisioner/backend-template.conf",
{
remote_state = var.remote_state
})
filename = "${path.module}/deprovisioner/deploy_terraform/backend.conf"
}
data "archive_file" "deprovisioner" {
type = "zip"
output_path = "${path.module}/.deprovisioner.zip"
source_dir = "${path.module}/deprovisioner"
}
resource "docker_registry_image" "deprovisioner" {
name = "${aws_ecr_repository.main.repository_url}:${data.git_repository.main.branch}-${random_uuid.deprovisioner.result}"
keep_remotely = true
build {
context = "${path.module}/deprovisioner/"
pull_parent = true
platform = "linux/amd64"
Fleet Sandbox (#5079) * Add code for the shared infra part of the demo environment * Checkin * checkin * Checkin for pre-provisioner, got terraform working * Checkin with the pre-deployer working, now blocked by helm chart * Add interface for helm * Add some initial code for the JIT Provisioner lambda Lots of code taken from https://gitlab.com/hmajid2301/articles/-/tree/master/41.%20Create%20a%20webapp%20with%20fizz * Update helm chart to work with shared infra (#5621) * Update helm chart to work with shared infra * Update helm chart README to reflect changes. * Checkin * Checkin * Checkin, Pre-provisioner actually works * PreProvisioner is now complete * Make changes to the JIT provisioner based off of actually learning how to do stuff * checkin * Check in, broken currently * Add all code except provisioning and emailing user * Checkin * Checkin, fixed kubernetes * Checkin * Forgot a file * Finish jit provisioner, need to test now * Checkin, switching to nginx ingress * Fleets are now actually accessible * JITProvisioner now returns working fleet instances * Deprovisioner code done, just need a few bugs fixed * Fix the deprovisioner so it works now and re-ip * fixup * Finished testing the deprovisioner * Added monitoring and fixed some bugs * Add stuff for #6548 * fixed per luke's suggestion * Fix for inactive task definition arns * move everything to the prod account * Bump fleet version and fix a couple of bugs * Fix a couple of bugs * Lots of security fixes and a few bug fixes * Rename demo to sandbox to match product's naming * Revert "Update helm chart to work with shared infra (#5621)" This reverts commit 610bbd1c00338620f6cc65fe2aff86139551f465. Co-authored-by: Robert Fairburn <8029478+rfairburn@users.noreply.github.com>
2022-07-19 18:56:53 +00:00
}
depends_on = [
local_file.backend-config
]
}
output "deprovisioner_role" {
value = aws_iam_role.deprovisioner
}