Added support for multipule loadtest environments (#5526)

This commit is contained in:
Zachary Winnerman 2022-05-03 10:51:11 -04:00 committed by GitHub
parent 25ce199f34
commit e7b9f41097
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
41 changed files with 570 additions and 420 deletions

View File

@ -0,0 +1 @@
.external_modules

View File

@ -0,0 +1,70 @@
resource "aws_lb_listener_rule" "main" {
listener_arn = data.terraform_remote_state.shared.outputs.alb-listener.arn
action {
type = "forward"
target_group_arn = aws_alb_target_group.main.arn
}
condition {
host_header {
values = ["${terraform.workspace}.loadtest.fleetdm.com"]
}
}
}
resource "aws_lb_listener_rule" "internal" {
listener_arn = data.terraform_remote_state.shared.outputs.alb-listener-internal.arn
action {
type = "forward"
target_group_arn = aws_alb_target_group.internal.arn
}
condition {
host_header {
values = ["${terraform.workspace}.loadtest.fleetdm.com"]
}
}
}
resource "aws_alb_target_group" "internal" {
name = "${local.prefix}-internal"
protocol = "HTTP"
target_type = "ip"
port = "8080"
vpc_id = data.terraform_remote_state.shared.outputs.vpc.vpc_id
deregistration_delay = 30
load_balancing_algorithm_type = "least_outstanding_requests"
health_check {
path = "/healthz"
matcher = "200"
timeout = 10
interval = 15
healthy_threshold = 5
unhealthy_threshold = 5
}
}
resource "aws_alb_target_group" "main" {
name = local.prefix
protocol = "HTTP"
target_type = "ip"
port = "8080"
vpc_id = data.terraform_remote_state.shared.outputs.vpc.vpc_id
deregistration_delay = 30
load_balancing_algorithm_type = "least_outstanding_requests"
health_check {
path = "/healthz"
matcher = "200"
timeout = 10
interval = 15
healthy_threshold = 5
unhealthy_threshold = 5
}
}

View File

@ -1,8 +0,0 @@
bucket = "fleet-terraform-state20220408141538466600000002"
key = "frontend-loadtesting/loadtesting/terraform.tfstate" # This should be set to account_alias/unique_key/terraform.tfstate
workspace_key_prefix = "frontend-loadtesting" # This should be set to the account alias
region = "us-east-2"
encrypt = true
kms_key_id = "9f98a443-ffd7-4dbe-a9c3-37df89b2e42a"
dynamodb_table = "tf-remote-state-lock"
role_arn = "arn:aws:iam::353365949058:role/terraform-frontend-loadtesting"

View File

@ -1,8 +0,0 @@
bucket = "fleet-terraform-state20220408141538466600000002"
key = "loadtesting/loadtesting/terraform.tfstate" # This should be set to account_alias/unique_key/terraform.tfstate
workspace_key_prefix = "loadtesting" # This should be set to the account alias
region = "us-east-2"
encrypt = true
kms_key_id = "9f98a443-ffd7-4dbe-a9c3-37df89b2e42a"
dynamodb_table = "tf-remote-state-lock"
role_arn = "arn:aws:iam::353365949058:role/terraform-loadtesting"

View File

@ -4,22 +4,8 @@ resource "aws_kms_key" "main" {
enable_key_rotation = true
}
resource "aws_ecr_repository" "prometheus-to-cloudwatch" {
name = "prometheus-to-cloudwatch"
image_tag_mutability = "IMMUTABLE"
image_scanning_configuration {
scan_on_push = true
}
encryption_configuration {
encryption_type = "KMS"
kms_key = aws_kms_key.main.arn
}
}
resource "aws_ecr_repository" "fleet" {
name = "fleet"
name = local.prefix
image_tag_mutability = "IMMUTABLE"
image_scanning_configuration {

View File

@ -47,7 +47,7 @@ data "aws_iam_policy_document" "fleet" {
"kms:GenerateDataKey*",
"kms:Describe*"
]
resources = [aws_kms_key.main.arn]
resources = [aws_kms_key.main.arn, data.terraform_remote_state.shared.outputs.ecr-kms.arn]
}
}
@ -63,7 +63,7 @@ data "aws_iam_policy_document" "assume_role" {
}
resource "aws_iam_role" "main" {
name = "fleetdm-role"
name = "${local.prefix}-role"
assume_role_policy = data.aws_iam_policy_document.assume_role.json
}
@ -73,7 +73,7 @@ resource "aws_iam_role_policy_attachment" "role_attachment" {
}
resource "aws_iam_policy" "main" {
name = "fleet-iam-policy"
name = "${local.prefix}-iam-policy"
policy = data.aws_iam_policy_document.fleet.json
}

View File

@ -1,88 +1,9 @@
# Security group for the public internet facing load balancer
resource "aws_security_group" "lb" {
name = "${local.prefix} load balancer"
description = "${local.prefix} Load balancer security group"
vpc_id = module.vpc.vpc_id
}
# Allow traffic from public internet
resource "aws_security_group_rule" "lb-ingress" {
description = "${local.prefix}: allow traffic from public internet"
type = "ingress"
from_port = "443"
to_port = "443"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"] #tfsec:ignore:aws-vpc-no-public-ingress-sgr
security_group_id = aws_security_group.lb.id
}
resource "aws_security_group_rule" "lb-http-ingress" {
description = "${local.prefix}: allow traffic from public internet"
type = "ingress"
from_port = "80"
to_port = "80"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"] #tfsec:ignore:aws-vpc-no-public-ingress-sgr
security_group_id = aws_security_group.lb.id
}
resource "aws_security_group_rule" "lb-es" {
description = "${local.prefix}: allow traffic from public internet"
type = "ingress"
from_port = "9200"
to_port = "9200"
protocol = "tcp"
cidr_blocks = ["10.0.0.0/8"]
security_group_id = aws_security_group.lb.id
}
resource "aws_security_group_rule" "lb-es-apm" {
description = "${local.prefix}: allow traffic from public internet"
type = "ingress"
from_port = "8200"
to_port = "8200"
protocol = "tcp"
cidr_blocks = concat(["10.0.0.0/8"], [for ip in module.vpc.nat_public_ips : "${ip}/32"])
security_group_id = aws_security_group.lb.id
}
resource "aws_security_group_rule" "lb-kibana" {
description = "${local.prefix}: allow traffic from public internet"
type = "ingress"
from_port = "5601"
to_port = "5601"
protocol = "tcp"
cidr_blocks = ["10.0.0.0/8"]
security_group_id = aws_security_group.lb.id
}
# Allow outbound traffic
resource "aws_security_group_rule" "lb-egress" {
description = "${local.prefix}: allow all outbound traffic"
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"] #tfsec:ignore:aws-vpc-no-public-egress-sgr
security_group_id = aws_security_group.lb.id
}
# Security group for the backends that run the application.
# Allows traffic from the load balancer
resource "aws_security_group" "backend" {
name = "${local.prefix} backend"
description = "${local.prefix} Backend security group"
vpc_id = module.vpc.vpc_id
vpc_id = data.terraform_remote_state.shared.outputs.vpc.vpc_id
}
@ -94,7 +15,7 @@ resource "aws_security_group_rule" "backend-ingress" {
from_port = "8080"
to_port = "8080"
protocol = "tcp"
source_security_group_id = aws_security_group.lb.id
source_security_group_id = data.terraform_remote_state.shared.outputs.alb_security_group.id
security_group_id = aws_security_group.backend.id
}

View File

@ -1,107 +1,3 @@
resource "aws_alb" "main" {
name = "fleetdm"
internal = false #tfsec:ignore:aws-elb-alb-not-public
security_groups = [aws_security_group.lb.id, aws_security_group.backend.id]
subnets = module.vpc.public_subnets
idle_timeout = 600
drop_invalid_header_fields = true
#checkov:skip=CKV_AWS_150:don't like it
}
resource "aws_alb" "internal" {
name = "fleetdm-internal"
internal = true
security_groups = [aws_security_group.lb.id, aws_security_group.backend.id]
subnets = module.vpc.private_subnets
idle_timeout = 600
drop_invalid_header_fields = true
#checkov:skip=CKV_AWS_150:don't like it
}
resource "aws_alb_listener" "https-fleetdm-internal" {
load_balancer_arn = aws_alb.internal.arn
port = 80
protocol = "HTTP" #tfsec:ignore:aws-elb-http-not-used
default_action {
target_group_arn = aws_alb_target_group.internal.arn
type = "forward"
}
}
resource "aws_alb_target_group" "internal" {
name = "fleetdm-internal"
protocol = "HTTP"
target_type = "ip"
port = "8080"
vpc_id = module.vpc.vpc_id
deregistration_delay = 30
load_balancing_algorithm_type = "least_outstanding_requests"
health_check {
path = "/healthz"
matcher = "200"
timeout = 10
interval = 15
healthy_threshold = 5
unhealthy_threshold = 5
}
depends_on = [aws_alb.main]
}
resource "aws_alb_target_group" "main" {
name = "fleetdm"
protocol = "HTTP"
target_type = "ip"
port = "8080"
vpc_id = module.vpc.vpc_id
deregistration_delay = 30
load_balancing_algorithm_type = "least_outstanding_requests"
health_check {
path = "/healthz"
matcher = "200"
timeout = 10
interval = 15
healthy_threshold = 5
unhealthy_threshold = 5
}
depends_on = [aws_alb.main]
}
resource "aws_alb_listener" "https-fleetdm" {
load_balancer_arn = aws_alb.main.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-FS-1-2-Res-2019-08"
certificate_arn = aws_acm_certificate_validation.dogfood_fleetdm_com.certificate_arn
default_action {
target_group_arn = aws_alb_target_group.main.arn
type = "forward"
}
}
resource "aws_alb_listener" "http" {
load_balancer_arn = aws_alb.main.arn
port = "80"
protocol = "HTTP"
default_action {
type = "redirect"
redirect {
port = "443"
protocol = "HTTPS"
status_code = "HTTP_301"
}
}
}
resource "aws_ecs_cluster" "fleet" {
name = "${local.prefix}-backend"
@ -134,26 +30,22 @@ resource "aws_ecs_service" "fleet" {
}
network_configuration {
subnets = module.vpc.private_subnets
subnets = data.terraform_remote_state.shared.outputs.vpc.private_subnets
security_groups = [aws_security_group.backend.id]
}
depends_on = [aws_alb_listener.http, aws_alb_listener.https-fleetdm]
}
resource "aws_cloudwatch_log_group" "backend" { #tfsec:ignore:aws-cloudwatch-log-group-customer-key
name = "fleetdm"
name = local.prefix
retention_in_days = 1
}
data "aws_region" "current" {}
data "aws_secretsmanager_secret" "license" {
name = "/fleet/license"
}
resource "aws_ecs_task_definition" "backend" {
family = "fleet"
family = local.prefix
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
execution_role_arn = aws_iam_role.main.arn
@ -164,7 +56,7 @@ resource "aws_ecs_task_definition" "backend" {
[
{
name = "prometheus-exporter"
image = "917007347864.dkr.ecr.us-east-2.amazonaws.com/prometheus-to-cloudwatch:latest"
image = "${data.terraform_remote_state.shared.outputs.ecr.repository_url}:latest"
essential = false
logConfiguration = {
logDriver = "awslogs"
@ -314,7 +206,7 @@ resource "aws_ecs_task_definition" "backend" {
resource "aws_ecs_task_definition" "migration" {
family = "fleet-migrate"
family = "${local.prefix}-migrate"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
execution_role_arn = aws_iam_role.main.arn
@ -396,7 +288,7 @@ resource "aws_appautoscaling_target" "ecs_target" {
}
resource "aws_appautoscaling_policy" "ecs_policy_memory" {
name = "fleet-memory-autoscaling"
name = "${local.prefix}-memory-autoscaling"
policy_type = "TargetTrackingScaling"
resource_id = aws_appautoscaling_target.ecs_target.resource_id
scalable_dimension = aws_appautoscaling_target.ecs_target.scalable_dimension
@ -411,7 +303,7 @@ resource "aws_appautoscaling_policy" "ecs_policy_memory" {
}
resource "aws_appautoscaling_policy" "ecs_policy_cpu" {
name = "fleet-cpu-autoscaling"
name = "${local.prefix}-cpu-autoscaling"
policy_type = "TargetTrackingScaling"
resource_id = aws_appautoscaling_target.ecs_target.resource_id
scalable_dimension = aws_appautoscaling_target.ecs_target.scalable_dimension
@ -425,23 +317,3 @@ resource "aws_appautoscaling_policy" "ecs_policy_cpu" {
target_value = 90
}
}
output "fleet_migration_revision" {
value = aws_ecs_task_definition.migration.revision
}
output "fleet_migration_subnets" {
value = jsonencode(aws_ecs_service.fleet.network_configuration[0].subnets)
}
output "fleet_migration_security_groups" {
value = jsonencode(aws_ecs_service.fleet.network_configuration[0].security_groups)
}
output "fleet_ecs_cluster_arn" {
value = aws_ecs_cluster.fleet.arn
}
output "fleet_ecs_cluster_id" {
value = aws_ecs_cluster.fleet.id
}

View File

@ -1,5 +1,5 @@
resource "aws_s3_bucket" "osquery-results" { #tfsec:ignore:aws-s3-encryption-customer-key tfsec:ignore:aws-s3-enable-bucket-logging tfsec:ignore:aws-s3-enable-versioning
bucket = "fleet-loadtest-osquery-logs-archive"
bucket = "${local.prefix}-loadtest-osquery-logs-archive"
acl = "private"
lifecycle_rule {
@ -31,7 +31,7 @@ resource "aws_s3_bucket_public_access_block" "osquery-results" {
}
resource "aws_s3_bucket" "osquery-status" { #tfsec:ignore:aws-s3-encryption-customer-key tfsec:ignore:aws-s3-enable-bucket-logging tfsec:ignore:aws-s3-enable-versioning
bucket = "fleet-loadtest-osquery-status-archive"
bucket = "${local.prefix}-loadtest-osquery-status-archive"
acl = "private"
lifecycle_rule {
@ -90,12 +90,12 @@ data "aws_iam_policy_document" "osquery_status_policy_doc" {
}
resource "aws_iam_policy" "firehose-results" {
name = "osquery_results_firehose_policy"
name = "${local.prefix}-osquery_results_firehose_policy"
policy = data.aws_iam_policy_document.osquery_results_policy_doc.json
}
resource "aws_iam_policy" "firehose-status" {
name = "osquery_status_firehose_policy"
name = "${local.prefix}-osquery_status_firehose_policy"
policy = data.aws_iam_policy_document.osquery_status_policy_doc.json
}
@ -129,7 +129,7 @@ data "aws_iam_policy_document" "osquery_firehose_assume_role" {
}
resource "aws_kinesis_firehose_delivery_stream" "osquery_results" {
name = "osquery_results"
name = "${local.prefix}-osquery_results"
destination = "s3"
s3_configuration {
@ -139,7 +139,7 @@ resource "aws_kinesis_firehose_delivery_stream" "osquery_results" {
}
resource "aws_kinesis_firehose_delivery_stream" "osquery_status" {
name = "osquery_status"
name = "${local.prefix}-osquery_status"
destination = "s3"
s3_configuration {

View File

@ -0,0 +1,69 @@
provider "aws" {
region = "us-east-2"
default_tags {
tags = {
environment = "loadtest"
terraform = "https://github.com/fleetdm/fleet/tree/main/tools/terraform"
state = "local"
}
}
}
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.74.0"
}
docker = {
source = "kreuzwerker/docker"
version = "~> 2.16.0"
}
git = {
source = "paultyng/git"
version = "~> 0.1.0"
}
}
backend "s3" {
bucket = "fleet-terraform-state20220408141538466600000002"
key = "loadtesting/loadtesting/terraform.tfstate" # This should be set to account_alias/unique_key/terraform.tfstate
workspace_key_prefix = "loadtesting" # This should be set to the account alias
region = "us-east-2"
encrypt = true
kms_key_id = "9f98a443-ffd7-4dbe-a9c3-37df89b2e42a"
dynamodb_table = "tf-remote-state-lock"
role_arn = "arn:aws:iam::353365949058:role/terraform-loadtesting"
}
}
data "aws_caller_identity" "current" {}
data "aws_region" "current" {}
provider "docker" {
# Configuration options
registry_auth {
address = "${data.aws_caller_identity.current.account_id}.dkr.ecr.us-east-2.amazonaws.com"
username = data.aws_ecr_authorization_token.token.user_name
password = data.aws_ecr_authorization_token.token.password
}
}
provider "git" {}
data "git_repository" "tf" {
path = "${path.module}/../../../"
}
data "terraform_remote_state" "shared" {
backend = "s3"
config = {
bucket = "fleet-terraform-state20220408141538466600000002"
key = "loadtesting/loadtesting/shared/terraform.tfstate" # This should be set to account_alias/unique_key/terraform.tfstate
workspace_key_prefix = "loadtesting" # This should be set to the account alias
region = "us-east-2"
encrypt = true
kms_key_id = "9f98a443-ffd7-4dbe-a9c3-37df89b2e42a"
dynamodb_table = "tf-remote-state-lock"
role_arn = "arn:aws:iam::353365949058:role/terraform-loadtesting"
}
}

View File

@ -8,13 +8,13 @@ resource "aws_ecs_service" "loadtest" {
deployment_maximum_percent = 200
network_configuration {
subnets = module.vpc.private_subnets
subnets = data.terraform_remote_state.shared.outputs.vpc.private_subnets
security_groups = [aws_security_group.backend.id]
}
}
resource "aws_ecs_task_definition" "loadtest" {
family = "fleet-loadtest"
family = "${local.prefix}-loadtest"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
execution_role_arn = aws_iam_role.main.arn
@ -52,7 +52,7 @@ resource "aws_ecs_task_definition" "loadtest" {
"go", "run", "/go/fleet/cmd/osquery-perf/agent.go",
"-enroll_secret", data.aws_secretsmanager_secret_version.enroll_secret.secret_string,
"-host_count", "5000",
"-server_url", "http://${aws_alb.internal.dns_name}",
"-server_url", "http://${data.terraform_remote_state.shared.outputs.alb-internal.dns_name}",
"-node_key_file", "nodekeys",
"--policy_pass_prob", "0.5",
"--start_period", "5m",
@ -65,14 +65,5 @@ resource "aws_ecs_task_definition" "loadtest" {
}
data "aws_secretsmanager_secret_version" "enroll_secret" {
secret_id = aws_secretsmanager_secret.enroll_secret.id
}
resource "aws_secretsmanager_secret" "enroll_secret" {
name = "/fleet/loadtest/enroll/${random_pet.enroll_secret_postfix.id}"
kms_key_id = aws_kms_key.main.id
}
resource "random_pet" "enroll_secret_postfix" {
length = 1
secret_id = data.terraform_remote_state.shared.outputs.enroll_secret.id
}

View File

@ -1,8 +1,6 @@
locals {
name = "fleetdm"
prefix = "fleet"
domain_fleetdm = "loadtest.fleetdm.com"
domain_fleetctl = "loadtest.fleetctl.com"
name = "fleetdm-${terraform.workspace}"
prefix = "fleet-${terraform.workspace}"
additional_env_vars = [for k, v in merge({
"FLEET_VULNERABILITIES_DATABASES_PATH" : "/home/fleet"
"FLEET_OSQUERY_ENABLE_ASYNC_HOST_PROCESSING" : "false"
@ -11,7 +9,7 @@ locals {
"FLEET_LOGGING_TRACING_TYPE" : "elasticapm"
"ELASTIC_APM_SERVER_URL" : "https://loadtest.fleetdm.com:8200"
"ELASTIC_APM_SERVICE_NAME" : "fleet"
"ELASTIC_APM_ENVIRONMENT" : "loadtest"
"ELASTIC_APM_ENVIRONMENT" : "${terraform.workspace}"
"ELASTIC_APM_TRANSACTION_SAMPLE_RATE" : "0.004"
"ELASTIC_APM_SERVICE_VERSION" : "${var.tag}-${split(":", data.docker_registry_image.dockerhub.sha256_digest)[1]}"
}, var.fleet_config) : { name = k, value = v }]

View File

@ -1,49 +0,0 @@
variable "region" {
default = "us-east-2"
}
provider "aws" {
region = var.region
default_tags {
tags = {
environment = "loadtest"
terraform = "https://github.com/fleetdm/fleet/tree/main/tools/terraform"
state = "local"
}
}
}
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.74.0"
}
docker = {
source = "kreuzwerker/docker"
version = "~> 2.16.0"
}
git = {
source = "paultyng/git"
version = "~> 0.1.0"
}
}
backend "s3" {}
}
data "aws_caller_identity" "current" {}
provider "docker" {
# Configuration options
registry_auth {
address = "${data.aws_caller_identity.current.account_id}.dkr.ecr.us-east-2.amazonaws.com"
username = data.aws_ecr_authorization_token.token.user_name
password = data.aws_ecr_authorization_token.token.password
}
}
provider "git" {}
data "git_repository" "tf" {
path = "${path.module}/../../../"
}

View File

@ -1,15 +1,23 @@
output "nameservers_fleetctl" {
value = aws_route53_zone.dogfood_fleetctl_com.name_servers
}
output "nameservers_fleetdm" {
value = aws_route53_zone.dogfood_fleetdm_com.name_servers
}
output "backend_security_group" {
value = aws_security_group.backend.arn
}
output "private_subnets" {
value = module.vpc.private_subnet_arns
}
value = data.terraform_remote_state.shared.outputs.vpc.private_subnet_arns
}
output "fleet_migration_revision" {
value = aws_ecs_task_definition.migration.revision
}
output "fleet_migration_subnets" {
value = jsonencode(aws_ecs_service.fleet.network_configuration[0].subnets)
}
output "fleet_migration_security_groups" {
value = jsonencode(aws_ecs_service.fleet.network_configuration[0].security_groups)
}
output "fleet_ecs_cluster_arn" {
value = aws_ecs_cluster.fleet.arn
}
output "fleet_ecs_cluster_id" {
value = aws_ecs_cluster.fleet.id
}

View File

@ -1,62 +0,0 @@
resource "aws_route53_zone" "dogfood_fleetctl_com" {
name = local.domain_fleetctl
}
resource "aws_route53_zone" "dogfood_fleetdm_com" {
name = local.domain_fleetdm
}
resource "aws_route53_record" "dogfood_fleetctl_com" {
zone_id = aws_route53_zone.dogfood_fleetctl_com.zone_id
name = local.domain_fleetctl
type = "A"
alias {
name = aws_alb.main.dns_name
zone_id = aws_alb.main.zone_id
evaluate_target_health = false
}
}
resource "aws_route53_record" "dogfood_fleetdm_com" {
zone_id = aws_route53_zone.dogfood_fleetdm_com.zone_id
name = local.domain_fleetdm
type = "A"
alias {
name = aws_alb.main.dns_name
zone_id = aws_alb.main.zone_id
evaluate_target_health = false
}
}
resource "aws_acm_certificate" "dogfood_fleetdm_com" {
domain_name = local.domain_fleetdm
validation_method = "DNS"
lifecycle {
create_before_destroy = true
}
}
resource "aws_route53_record" "dogfood_fleetdm_com_validation" {
for_each = {
for dvo in aws_acm_certificate.dogfood_fleetdm_com.domain_validation_options : dvo.domain_name => {
name = dvo.resource_record_name
record = dvo.resource_record_value
type = dvo.resource_record_type
}
}
allow_overwrite = true
name = each.value.name
records = [each.value.record]
ttl = 60
type = each.value.type
zone_id = aws_route53_zone.dogfood_fleetdm_com.zone_id
}
resource "aws_acm_certificate_validation" "dogfood_fleetdm_com" {
certificate_arn = aws_acm_certificate.dogfood_fleetdm_com.arn
validation_record_fqdns = [for record in aws_route53_record.dogfood_fleetdm_com_validation : record.fqdn]
}

View File

@ -21,11 +21,11 @@ module "aurora_mysql" { #tfsec:ignore:aws-rds-enable-performance-insights-encryp
source = "terraform-aws-modules/rds-aurora/aws"
version = "5.3.0"
name = "${local.name}-mysql-iam"
name = "${local.name}-mysql"
engine = "aurora-mysql"
engine_version = "5.7.mysql_aurora.2.10.0"
instance_type = "db.r6g.large"
instance_type_replica = "db.r6g.large"
instance_type = "db.r6g.4xlarge"
instance_type_replica = "db.r6g.4xlarge"
iam_database_authentication_enabled = true
storage_encrypted = true
@ -37,11 +37,11 @@ module "aurora_mysql" { #tfsec:ignore:aws-rds-enable-performance-insights-encryp
performance_insights_enabled = true
enabled_cloudwatch_logs_exports = ["slowquery"]
vpc_id = module.vpc.vpc_id
vpc_id = data.terraform_remote_state.shared.outputs.vpc.vpc_id
vpc_security_group_ids = [aws_security_group.backend.id]
subnets = module.vpc.database_subnets
subnets = data.terraform_remote_state.shared.outputs.vpc.database_subnets
create_security_group = true
allowed_cidr_blocks = module.vpc.private_subnets_cidr_blocks
allowed_cidr_blocks = data.terraform_remote_state.shared.outputs.vpc.private_subnets_cidr_blocks
replica_count = var.scale_down ? 0 : 1
replica_scale_enabled = true
@ -50,7 +50,7 @@ module "aurora_mysql" { #tfsec:ignore:aws-rds-enable-performance-insights-encryp
snapshot_identifier = "arn:aws:rds:us-east-2:917007347864:cluster-snapshot:cleaned"
monitoring_interval = 60
iam_role_name = "${local.name}-rds-enhanced-monitoring"
iam_role_name = "${local.name}-rds"
iam_role_use_name_prefix = true
iam_role_description = "${local.name} RDS enhanced monitoring IAM role"
iam_role_path = "/autoscaling/"

View File

@ -10,7 +10,7 @@ If you require changes beyond whats described here, contact @zwinnerman-fleetdm.
### Running migrations
After applying terraform with the commands above:
`aws ecs run-task --region us-east-2 --cluster fleet-backend --task-definition fleet-migrate:"$(terraform output -raw fleet_migration_revision)" --launch-type FARGATE --network-configuration "awsvpcConfiguration={subnets="$(terraform output -raw fleet_migration_subnets)",securityGroups="$(terraform output -raw fleet_migration_security_groups)"}"`
`aws ecs run-task --region us-east-2 --cluster fleet-"$(terraform workspace show)"-backend --task-definition fleet-"$(terraform workspace show)"-migrate:"$(terraform output -raw fleet_migration_revision)" --launch-type FARGATE --network-configuration "awsvpcConfiguration={subnets="$(terraform output -raw fleet_migration_subnets)",securityGroups="$(terraform output -raw fleet_migration_security_groups)"}"`
### Running a loadtest
We run simulated hosts in containers of 5,000 at a time. Once the infrastructure is running, you can run the following command:

View File

@ -2,9 +2,9 @@ resource "aws_elasticache_replication_group" "default" {
availability_zones = ["us-east-2a", "us-east-2b", "us-east-2c"]
engine = "redis"
parameter_group_name = aws_elasticache_parameter_group.default.id
subnet_group_name = module.vpc.elasticache_subnet_group_name
subnet_group_name = data.terraform_remote_state.shared.outputs.vpc.elasticache_subnet_group_name
security_group_ids = [aws_security_group.redis.id, aws_security_group.backend.id]
replication_group_id = "fleetdm-redis"
replication_group_id = "${local.prefix}-redis"
number_cache_clusters = 3
node_type = "cache.m6g.large"
engine_version = "5.0.6"
@ -14,12 +14,12 @@ resource "aws_elasticache_replication_group" "default" {
at_rest_encryption_enabled = false #tfsec:ignore:aws-elasticache-enable-at-rest-encryption
transit_encryption_enabled = false #tfsec:ignore:aws-elasticache-enable-in-transit-encryption
apply_immediately = true
replication_group_description = "fleetdm-redis"
replication_group_description = "${local.prefix}-redis"
}
resource "aws_elasticache_parameter_group" "default" { #tfsec:ignore:aws-vpc-add-description-to-security-group-rule
name = "fleetdm-redis-foobar"
name = "${local.prefix}-redis"
family = "redis5.0"
parameter {
@ -34,7 +34,7 @@ resource "aws_elasticache_parameter_group" "default" { #tfsec:ignore:aws-vpc-add
resource "aws_security_group" "redis" { #tfsec:ignore:aws-cloudwatch-log-group-customer-key tfsec:ignore:aws-vpc-add-description-to-security-group
name = local.security_group_name
vpc_id = module.vpc.vpc_id
vpc_id = data.terraform_remote_state.shared.outputs.vpc.vpc_id
}
locals {
@ -46,7 +46,7 @@ resource "aws_security_group_rule" "ingress" { #tfsec:ignore:aws-vpc-add-descrip
from_port = "6379"
to_port = "6379"
protocol = "tcp"
cidr_blocks = module.vpc.private_subnets_cidr_blocks
cidr_blocks = data.terraform_remote_state.shared.outputs.vpc.private_subnets_cidr_blocks
security_group_id = aws_security_group.redis.id
}

View File

@ -0,0 +1,147 @@
resource "aws_alb" "main" {
name = "fleetdm"
internal = false #tfsec:ignore:aws-elb-alb-not-public
security_groups = [aws_security_group.lb.id]
subnets = module.vpc.public_subnets
idle_timeout = 600
drop_invalid_header_fields = true
#checkov:skip=CKV_AWS_150:don't like it
}
resource "aws_alb_listener" "https-fleetdm" {
load_balancer_arn = aws_alb.main.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-FS-1-2-Res-2019-08"
certificate_arn = aws_acm_certificate_validation.wildcard.certificate_arn
default_action {
type = "fixed-response"
fixed_response {
content_type = "text/plain"
message_body = "moved to subdomains, try https://default.loadtesting.fleetdm.com"
status_code = "404"
}
}
}
resource "aws_alb_listener" "http" {
load_balancer_arn = aws_alb.main.arn
port = "80"
protocol = "HTTP"
default_action {
type = "redirect"
redirect {
port = "443"
protocol = "HTTPS"
status_code = "HTTP_301"
}
}
}
resource "aws_alb" "internal" {
name = "fleetdm-internal"
internal = true
security_groups = [aws_security_group.lb.id]
subnets = module.vpc.private_subnets
idle_timeout = 600
drop_invalid_header_fields = true
#checkov:skip=CKV_AWS_150:don't like it
}
resource "aws_alb_listener" "https-fleetdm-internal" {
load_balancer_arn = aws_alb.internal.arn
port = 80
protocol = "HTTP" #tfsec:ignore:aws-elb-http-not-used
default_action {
type = "fixed-response"
fixed_response {
content_type = "text/plain"
message_body = "moved to subdomains, try https://default.loadtesting.fleetdm.com"
status_code = "404"
}
}
}
# Security group for the public internet facing load balancer
resource "aws_security_group" "lb" {
name = "${local.prefix} load balancer"
description = "${local.prefix} Load balancer security group"
vpc_id = module.vpc.vpc_id
}
# Allow traffic from public internet
resource "aws_security_group_rule" "lb-ingress" {
description = "${local.prefix}: allow traffic from public internet"
type = "ingress"
from_port = "443"
to_port = "443"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"] #tfsec:ignore:aws-vpc-no-public-ingress-sgr
security_group_id = aws_security_group.lb.id
}
resource "aws_security_group_rule" "lb-http-ingress" {
description = "${local.prefix}: allow traffic from public internet"
type = "ingress"
from_port = "80"
to_port = "80"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"] #tfsec:ignore:aws-vpc-no-public-ingress-sgr
security_group_id = aws_security_group.lb.id
}
resource "aws_security_group_rule" "lb-es" {
description = "${local.prefix}: allow traffic from public internet"
type = "ingress"
from_port = "9200"
to_port = "9200"
protocol = "tcp"
cidr_blocks = ["10.0.0.0/8"]
security_group_id = aws_security_group.lb.id
}
resource "aws_security_group_rule" "lb-es-apm" {
description = "${local.prefix}: allow traffic from public internet"
type = "ingress"
from_port = "8200"
to_port = "8200"
protocol = "tcp"
cidr_blocks = concat(["10.0.0.0/8"], [for ip in module.vpc.nat_public_ips : "${ip}/32"])
security_group_id = aws_security_group.lb.id
}
resource "aws_security_group_rule" "lb-kibana" {
description = "${local.prefix}: allow traffic from public internet"
type = "ingress"
from_port = "5601"
to_port = "5601"
protocol = "tcp"
cidr_blocks = ["10.0.0.0/8"]
security_group_id = aws_security_group.lb.id
}
# Allow outbound traffic
resource "aws_security_group_rule" "lb-egress" {
description = "${local.prefix}: allow all outbound traffic"
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"] #tfsec:ignore:aws-vpc-no-public-egress-sgr
security_group_id = aws_security_group.lb.id
}

View File

@ -0,0 +1,19 @@
resource "aws_kms_key" "main" {
description = "${local.prefix}-${random_pet.main.id}"
deletion_window_in_days = 10
enable_key_rotation = true
}
resource "aws_ecr_repository" "prometheus-to-cloudwatch" {
name = "prometheus-to-cloudwatch"
image_tag_mutability = "IMMUTABLE"
image_scanning_configuration {
scan_on_push = true
}
encryption_configuration {
encryption_type = "KMS"
kms_key = aws_kms_key.main.arn
}
}

View File

@ -69,11 +69,11 @@ resource "aws_security_group_rule" "es-egress" {
resource "aws_autoscaling_group" "elasticstack" {
name = "${local.prefix}-elasticstack"
max_size = var.scale_down ? 0 : 1
min_size = var.scale_down ? 0 : 1
max_size = 1
min_size = 1
health_check_grace_period = 3000
health_check_type = "ELB"
desired_capacity = var.scale_down ? 0 : 1
desired_capacity = 1
force_delete = true
vpc_zone_identifier = module.vpc.private_subnets
target_group_arns = [aws_lb_target_group.elasticsearch.arn, aws_lb_target_group.elasticapm.arn, aws_lb_target_group.kibana.arn]
@ -104,7 +104,7 @@ resource "aws_autoscaling_group" "elasticstack" {
tag {
key = "ansible_playbook_path"
value = "tools/loadtesting/terraform/elasticsearch_ansible"
value = "tools/loadtesting/terraform/shared/elasticsearch_ansible"
propagate_at_launch = true
}
@ -213,7 +213,7 @@ resource "aws_alb_listener" "elasticsearch" {
port = 9200
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-FS-1-2-Res-2019-08"
certificate_arn = aws_acm_certificate_validation.dogfood_fleetdm_com.certificate_arn
certificate_arn = aws_acm_certificate_validation.fleetdm_com.certificate_arn
default_action {
target_group_arn = aws_lb_target_group.elasticsearch.arn
@ -236,7 +236,7 @@ resource "aws_alb_listener" "elasticapm" {
port = 8200
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-FS-1-2-Res-2019-08"
certificate_arn = aws_acm_certificate_validation.dogfood_fleetdm_com.certificate_arn
certificate_arn = aws_acm_certificate_validation.fleetdm_com.certificate_arn
default_action {
target_group_arn = aws_lb_target_group.elasticapm.arn
@ -256,7 +256,7 @@ resource "aws_alb_listener" "kibana" {
port = 5601
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-FS-1-2-Res-2019-08"
certificate_arn = aws_acm_certificate_validation.dogfood_fleetdm_com.certificate_arn
certificate_arn = aws_acm_certificate_validation.fleetdm_com.certificate_arn
default_action {
target_group_arn = aws_lb_target_group.kibana.arn

View File

@ -0,0 +1,4 @@
resource "aws_secretsmanager_secret" "enroll_secret" {
name = "/fleet/loadtest/enroll/${random_pet.main.id}"
kms_key_id = aws_kms_key.main.id
}

View File

@ -0,0 +1,47 @@
provider "aws" {
region = "us-east-2"
default_tags {
tags = {
environment = "loadtest"
terraform = "https://github.com/fleetdm/fleet/tree/main/tools/terraform/shared"
state = "local"
}
}
}
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.74.0"
}
docker = {
source = "kreuzwerker/docker"
version = "~> 2.16.0"
}
git = {
source = "paultyng/git"
version = "~> 0.1.0"
}
}
backend "s3" {
bucket = "fleet-terraform-state20220408141538466600000002"
key = "loadtesting/loadtesting/shared/terraform.tfstate" # This should be set to account_alias/unique_key/terraform.tfstate
workspace_key_prefix = "loadtesting" # This should be set to the account alias
region = "us-east-2"
encrypt = true
kms_key_id = "9f98a443-ffd7-4dbe-a9c3-37df89b2e42a"
dynamodb_table = "tf-remote-state-lock"
role_arn = "arn:aws:iam::353365949058:role/terraform-loadtesting"
}
}
data "aws_caller_identity" "current" {}
data "git_repository" "tf" {
path = "${path.module}/../../../../"
}
resource "random_pet" "main" {
length = 1
}

View File

@ -0,0 +1,3 @@
locals {
prefix = "fleet"
}

View File

@ -0,0 +1,35 @@
output "alb_security_group" {
value = aws_security_group.lb
}
output "alb" {
value = aws_alb.main
}
output "alb-internal" {
value = aws_alb.internal
}
output "alb-listener" {
value = aws_alb_listener.https-fleetdm
}
output "alb-listener-internal" {
value = aws_alb_listener.https-fleetdm-internal
}
output "vpc" {
value = module.vpc
}
output "ecr" {
value = aws_ecr_repository.prometheus-to-cloudwatch
}
output "ecr-kms" {
value = aws_kms_key.main
}
output "enroll_secret" {
value = aws_secretsmanager_secret.enroll_secret
}

View File

@ -0,0 +1,106 @@
resource "aws_route53_zone" "fleetctl_com" {
name = "loadtest.fleetctl.com"
}
resource "aws_route53_zone" "fleetdm_com" {
name = "loadtest.fleetdm.com"
}
resource "aws_route53_record" "fleetctl_com" {
zone_id = aws_route53_zone.fleetctl_com.zone_id
name = aws_route53_zone.fleetctl_com.name
type = "A"
alias {
name = aws_alb.main.dns_name
zone_id = aws_alb.main.zone_id
evaluate_target_health = false
}
}
resource "aws_route53_record" "fleetdm_com" {
zone_id = aws_route53_zone.fleetdm_com.zone_id
name = aws_route53_zone.fleetdm_com.name
type = "A"
alias {
name = aws_alb.main.dns_name
zone_id = aws_alb.main.zone_id
evaluate_target_health = false
}
}
resource "aws_route53_record" "wildcard" {
zone_id = aws_route53_zone.fleetdm_com.zone_id
name = "*.${aws_route53_zone.fleetdm_com.name}"
type = "A"
alias {
name = aws_alb.main.dns_name
zone_id = aws_alb.main.zone_id
evaluate_target_health = false
}
}
resource "aws_acm_certificate" "wildcard" {
domain_name = aws_route53_record.wildcard.name
subject_alternative_names = [aws_route53_record.fleetdm_com.name]
validation_method = "DNS"
lifecycle {
create_before_destroy = true
}
}
resource "aws_route53_record" "wildcard_validation" {
for_each = {
for dvo in aws_acm_certificate.wildcard.domain_validation_options : dvo.domain_name => {
name = dvo.resource_record_name
record = dvo.resource_record_value
type = dvo.resource_record_type
}
}
allow_overwrite = true
name = each.value.name
records = [each.value.record]
ttl = 60
type = each.value.type
zone_id = aws_route53_zone.fleetdm_com.zone_id
}
resource "aws_acm_certificate_validation" "wildcard" {
certificate_arn = aws_acm_certificate.wildcard.arn
validation_record_fqdns = [for record in aws_route53_record.wildcard_validation : record.fqdn]
}
resource "aws_acm_certificate" "fleetdm_com" {
domain_name = aws_route53_record.fleetdm_com.name
validation_method = "DNS"
lifecycle {
create_before_destroy = true
}
}
resource "aws_route53_record" "fleetdm_com_validation" {
for_each = {
for dvo in aws_acm_certificate.fleetdm_com.domain_validation_options : dvo.domain_name => {
name = dvo.resource_record_name
record = dvo.resource_record_value
type = dvo.resource_record_type
}
}
allow_overwrite = true
name = each.value.name
records = [each.value.record]
ttl = 60
type = each.value.type
zone_id = aws_route53_zone.fleetdm_com.zone_id
}
resource "aws_acm_certificate_validation" "fleetdm_com" {
certificate_arn = aws_acm_certificate.fleetdm_com.arn
validation_record_fqdns = [for record in aws_route53_record.fleetdm_com_validation : record.fqdn]
}