terraform reference arch (#1761)

* terraform initial architecture
* added ecs autoscaling and https alb listener
* add r53 hosted zone, dns cert verification, http -> https redirect
* fleet dogfood env dogfood.fleetdm.com now configured, added license key, added readreplica settings, enabled vuln processing
* add comment about using RDS serverless option
This commit is contained in:
Benjamin Edwards 2021-09-21 14:19:19 -04:00 committed by GitHub
parent 1f324339f8
commit fd4c90eddf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 905 additions and 1 deletions

8
.gitignore vendored
View File

@ -50,5 +50,11 @@ backup.sql.gz
# committing a package-lock.json. Fleet app uses Yarn with yarn.lock.
package-lock.json
# infra
.terraform
.terraform.tfstate*
.terraform.lock*
terraform.tfstate*
# generated installers
orbit-osquery*
orbit-osquery*

View File

@ -0,0 +1 @@
1.0.4

View File

@ -0,0 +1,54 @@
data "aws_iam_policy_document" "fleet" {
statement {
effect = "Allow"
actions = ["cloudwatch:PutMetricData"]
resources = ["*"]
}
statement {
effect = "Allow"
actions = ["secretsmanager:GetSecretValue"]
resources = [aws_secretsmanager_secret.database_password_secret.arn, data.aws_secretsmanager_secret.license.arn]
}
statement {
effect = "Allow"
actions = [
"firehose:DescribeDeliveryStream",
"firehose:PutRecord",
"firehose:PutRecordBatch",
]
resources = [aws_kinesis_firehose_delivery_stream.osquery_logs.arn]
}
}
data "aws_iam_policy_document" "assume_role" {
statement {
effect = "Allow"
actions = ["sts:AssumeRole"]
principals {
identifiers = ["ecs.amazonaws.com", "ecs-tasks.amazonaws.com"]
type = "Service"
}
}
}
resource "aws_iam_role" "main" {
name = "fleetdm-role"
assume_role_policy = data.aws_iam_policy_document.assume_role.json
}
resource "aws_iam_role_policy_attachment" "role_attachment" {
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"
role = aws_iam_role.main.name
}
resource "aws_iam_policy" "main" {
name = "fleet-iam-policy"
policy = data.aws_iam_policy_document.fleet.json
}
resource "aws_iam_role_policy_attachment" "attachment" {
policy_arn = aws_iam_policy.main.arn
role = aws_iam_role.main.name
}

View File

@ -0,0 +1,78 @@
# Security group for the public internet facing load balancer
resource "aws_security_group" "lb" {
name = "${var.prefix} load balancer"
description = "${var.prefix} Load balancer security group"
vpc_id = module.vpc.vpc_id
}
# Allow traffic from public internet
resource "aws_security_group_rule" "lb-ingress" {
description = "${var.prefix}: allow traffic from public internet"
type = "ingress"
from_port = "443"
to_port = "443"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.lb.id
}
resource "aws_security_group_rule" "lb-http-ingress" {
description = "${var.prefix}: allow traffic from public internet"
type = "ingress"
from_port = "80"
to_port = "80"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.lb.id
}
# Allow outbound traffic
resource "aws_security_group_rule" "lb-egress" {
description = "${var.prefix}: allow all outbound traffic"
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.lb.id
}
# Security group for the backends that run the application.
# Allows traffic from the load balancer
resource "aws_security_group" "backend" {
name = "${var.prefix} backend"
description = "${var.prefix} Backend security group"
vpc_id = module.vpc.vpc_id
}
# Allow traffic from the load balancer to the backends
resource "aws_security_group_rule" "backend-ingress" {
description = "${var.prefix}: allow traffic from load balancer"
type = "ingress"
from_port = "8080"
to_port = "8080"
protocol = "tcp"
source_security_group_id = aws_security_group.lb.id
security_group_id = aws_security_group.backend.id
}
# Allow outbound traffic from the backends
resource "aws_security_group_rule" "backend-egress" {
description = "${var.prefix}: allow all outbound traffic"
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.backend.id
}

328
tools/terraform/ecs.tf Normal file
View File

@ -0,0 +1,328 @@
//resource "aws_route53_record" "record" {
// name = "fleetdm"
// type = "A"
// zone_id = "Z046188311R47QSK245X"
// alias {
// evaluate_target_health = false
// name = aws_alb.main.dns_name
// zone_id = aws_alb.main.zone_id
// }
//}
resource "aws_alb" "main" {
name = "fleetdm"
internal = false
security_groups = [aws_security_group.lb.id, aws_security_group.backend.id]
subnets = module.vpc.public_subnets
}
resource "aws_alb_target_group" "main" {
name = "fleetdm"
protocol = "HTTP"
target_type = "ip"
port = "8080"
vpc_id = module.vpc.vpc_id
deregistration_delay = 30
load_balancing_algorithm_type = "least_outstanding_requests"
health_check {
path = "/healthz"
matcher = "200"
timeout = 10
interval = 15
healthy_threshold = 5
unhealthy_threshold = 5
}
depends_on = [aws_alb.main]
}
resource "aws_alb_listener" "https-fleetdm" {
load_balancer_arn = aws_alb.main.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-FS-1-2-Res-2019-08"
certificate_arn = aws_acm_certificate_validation.dogfood_fleetdm_com.certificate_arn
default_action {
target_group_arn = aws_alb_target_group.main.arn
type = "forward"
}
}
resource "aws_alb_listener" "http" {
load_balancer_arn = aws_alb.main.arn
port = "80"
protocol = "HTTP"
default_action {
type = "redirect"
redirect {
port = "443"
protocol = "HTTPS"
status_code = "HTTP_301"
}
}
}
resource "aws_ecs_cluster" "fleet" {
name = "${var.prefix}-backend"
setting {
name = "containerInsights"
value = "enabled"
}
}
resource "aws_ecs_service" "fleet" {
name = "fleet"
launch_type = "FARGATE"
cluster = aws_ecs_cluster.fleet.id
task_definition = aws_ecs_task_definition.backend.arn
desired_count = 1
deployment_minimum_healthy_percent = 100
deployment_maximum_percent = 200
health_check_grace_period_seconds = 30
load_balancer {
target_group_arn = aws_alb_target_group.main.arn
container_name = "fleet"
container_port = 8080
}
network_configuration {
subnets = module.vpc.private_subnets
security_groups = [aws_security_group.backend.id]
}
depends_on = [aws_alb_listener.http, aws_alb_listener.https-fleetdm]
}
resource "aws_cloudwatch_log_group" "backend" {
name = "fleetdm"
retention_in_days = 1
}
data "aws_region" "current" {}
data "aws_secretsmanager_secret" "license" {
name = "/fleet/license"
}
resource "aws_ecs_task_definition" "backend" {
family = "fleet"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
execution_role_arn = aws_iam_role.main.arn
task_role_arn = aws_iam_role.main.arn
cpu = 512
memory = 4096
container_definitions = jsonencode(
[
{
name = "fleet"
image = "fleetdm/fleet"
cpu = 512
memory = 4096
mountPoints = []
volumesFrom = []
essential = true
portMappings = [
{
# This port is the same that the contained application also uses
containerPort = 8080
protocol = "tcp"
}
]
networkMode = "awsvpc"
logConfiguration = {
logDriver = "awslogs"
options = {
awslogs-group = aws_cloudwatch_log_group.backend.name
awslogs-region = data.aws_region.current.name
awslogs-stream-prefix = "fleet"
}
},
secrets = [
{
name = "FLEET_MYSQL_PASSWORD"
valueFrom = aws_secretsmanager_secret.database_password_secret.arn
},
{
name = "FLEET_MYSQL_READ_REPLICA_PASSWORD"
valueFrom = aws_secretsmanager_secret.database_password_secret.arn
},
{
name = "FLEET_LICENSE_KEY"
valueFrom = data.aws_secretsmanager_secret.license.arn
}
]
environment = [
{
name = "FLEET_MYSQL_USERNAME"
value = "fleet"
},
{
name = "FLEET_MYSQL_DATABASE"
value = "fleet"
},
{
name = "FLEET_MYSQL_ADDRESS"
value = "${module.aurora_mysql.rds_cluster_endpoint}:3306"
},
{
name = "FLEET_MYSQL_READ_REPLICA_USERNAME"
value = "fleet"
},
{
name = "FLEET_MYSQL_READ_REPLICA_DATABASE"
value = "fleet"
},
{
name = "FLEET_MYSQL_READ_REPLICA_ADDRESS"
value = "${module.aurora_mysql.rds_cluster_reader_endpoint}:3306"
},
{
name = "FLEET_REDIS_ADDRESS"
value = "${aws_elasticache_replication_group.default.primary_endpoint_address}:6379"
},
{
name = "FLEET_FIREHOSE_STATUS_STREAM"
value = aws_kinesis_firehose_delivery_stream.osquery_logs.name
},
{
name = "FLEET_FIREHOSE_RESULT_STREAM"
value = aws_kinesis_firehose_delivery_stream.osquery_logs.name
},
{
name = "FLEET_FIREHOSE_REGION"
value = data.aws_region.current.name
},
{
name = "FLEET_OSQUERY_STATUS_LOG_PLUGIN"
value = "firehose"
},
{
name = "FLEET_OSQUERY_RESULT_LOG_PLUGIN"
value = "firehose"
},
{
name = "FLEET_SERVER_TLS"
value = "false"
},
{
name = "FLEET_BETA_SOFTWARE_INVENTORY"
value = "1"
},
{
name = "FLEET_VULNERABILITIES_DATABASES_PATH"
value = "/home/fleet"
}
]
}
])
}
resource "aws_ecs_task_definition" "migration" {
family = "fleet-migrate"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
execution_role_arn = aws_iam_role.main.arn
task_role_arn = aws_iam_role.main.arn
cpu = 256
memory = 512
container_definitions = jsonencode(
[
{
name = "fleet-prepare-db"
image = "fleetdm/fleet"
cpu = 256
memory = 512
mountPoints = []
volumesFrom = []
essential = true
portMappings = [
{
# This port is the same that the contained application also uses
containerPort = 8080
protocol = "tcp"
}
]
networkMode = "awsvpc"
logConfiguration = {
logDriver = "awslogs"
options = {
awslogs-group = aws_cloudwatch_log_group.backend.name
awslogs-region = data.aws_region.current.name
awslogs-stream-prefix = "fleet"
}
},
command = ["fleet", "prepare", "db"]
secrets = [
{
name = "FLEET_MYSQL_PASSWORD"
valueFrom = aws_secretsmanager_secret.database_password_secret.arn
}
]
environment = [
{
name = "FLEET_MYSQL_USERNAME"
value = "fleet"
},
{
name = "FLEET_MYSQL_DATABASE"
value = "fleet"
},
{
name = "FLEET_MYSQL_ADDRESS"
value = "${module.aurora_mysql.rds_cluster_endpoint}:3306"
},
{
name = "FLEET_REDIS_ADDRESS"
value = "${aws_elasticache_replication_group.default.primary_endpoint_address}:6379"
}
]
}
])
}
resource "aws_appautoscaling_target" "ecs_target" {
max_capacity = 5
min_capacity = 1
resource_id = "service/${aws_ecs_cluster.fleet.name}/${aws_ecs_service.fleet.name}"
scalable_dimension = "ecs:service:DesiredCount"
service_namespace = "ecs"
}
resource "aws_appautoscaling_policy" "ecs_policy_memory" {
name = "fleet-memory-autoscaling"
policy_type = "TargetTrackingScaling"
resource_id = aws_appautoscaling_target.ecs_target.resource_id
scalable_dimension = aws_appautoscaling_target.ecs_target.scalable_dimension
service_namespace = aws_appautoscaling_target.ecs_target.service_namespace
target_tracking_scaling_policy_configuration {
predefined_metric_specification {
predefined_metric_type = "ECSServiceAverageMemoryUtilization"
}
target_value = 80
}
}
resource "aws_appautoscaling_policy" "ecs_policy_cpu" {
name = "fleet-cpu-autoscaling"
policy_type = "TargetTrackingScaling"
resource_id = aws_appautoscaling_target.ecs_target.resource_id
scalable_dimension = aws_appautoscaling_target.ecs_target.scalable_dimension
service_namespace = aws_appautoscaling_target.ecs_target.service_namespace
target_tracking_scaling_policy_configuration {
predefined_metric_specification {
predefined_metric_type = "ECSServiceAverageCPUUtilization"
}
target_value = 60
}
}

View File

@ -0,0 +1,69 @@
resource "aws_s3_bucket" "osquery" {
bucket = "fleet-osquery-logs-archive"
acl = "private"
lifecycle_rule {
enabled = true
expiration {
days = 1
}
}
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "aws:kms"
}
}
}
}
// allow firehose to write to bucket
data "aws_iam_policy_document" "osquery_logs_policy_doc" {
statement {
effect = "Allow"
actions = [
"s3:AbortMultipartUpload",
"s3:GetBucketLocation",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
"s3:PutObject"
]
resources = [aws_s3_bucket.osquery.arn, "${aws_s3_bucket.osquery.arn}/*"]
}
}
resource "aws_iam_policy" "firehose" {
name = "osquery_logs_firehose_policy"
policy = data.aws_iam_policy_document.osquery_logs_policy_doc.json
}
resource "aws_iam_role" "firehose" {
assume_role_policy = data.aws_iam_policy_document.osquery_firehose_assume_role.json
}
resource "aws_iam_role_policy_attachment" "firehose" {
policy_arn = aws_iam_policy.firehose.arn
role = aws_iam_role.firehose.name
}
data "aws_iam_policy_document" "osquery_firehose_assume_role" {
statement {
effect = "Allow"
actions = ["sts:AssumeRole"]
principals {
identifiers = ["firehose.amazonaws.com"]
type = "Service"
}
}
}
resource "aws_kinesis_firehose_delivery_stream" "osquery_logs" {
name = "osquery_logs"
destination = "s3"
s3_configuration {
role_arn = aws_iam_role.firehose.arn
bucket_arn = aws_s3_bucket.osquery.arn
}
}

19
tools/terraform/main.tf Normal file
View File

@ -0,0 +1,19 @@
variable "region" {
default = "us-east-2"
}
provider "aws" {
region = var.region
}
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "3.54.0"
}
}
}
data "aws_caller_identity" "current" {}

View File

@ -0,0 +1,7 @@
output "nameservers_fleetctl" {
value = aws_route53_zone.dogfood_fleetctl_com.name_servers
}
output "nameservers_fleetdm" {
value = aws_route53_zone.dogfood_fleetdm_com.name_servers
}

93
tools/terraform/r53.tf Normal file
View File

@ -0,0 +1,93 @@
resource "aws_route53_zone" "dogfood_fleetctl_com" {
name = "dogfood.fleetctl.com"
}
resource "aws_route53_zone" "dogfood_fleetdm_com" {
name = "dogfood.fleetdm.com"
}
resource "aws_route53_record" "dogfood_fleetctl_com" {
zone_id = aws_route53_zone.dogfood_fleetctl_com.zone_id
name = "dogfood.fleetctl.com"
type = "A"
alias {
name = aws_alb.main.dns_name
zone_id = aws_alb.main.zone_id
evaluate_target_health = false
}
}
resource "aws_route53_record" "dogfood_fleetdm_com" {
zone_id = aws_route53_zone.dogfood_fleetdm_com.zone_id
name = "dogfood.fleetdm.com"
type = "A"
alias {
name = aws_alb.main.dns_name
zone_id = aws_alb.main.zone_id
evaluate_target_health = false
}
}
resource "aws_acm_certificate" "dogfood_fleetctl_com" {
domain_name = "dogfood.fleetctl.com"
validation_method = "DNS"
lifecycle {
create_before_destroy = true
}
}
resource "aws_acm_certificate" "dogfood_fleetdm_com" {
domain_name = "dogfood.fleetdm.com"
validation_method = "DNS"
lifecycle {
create_before_destroy = true
}
}
resource "aws_route53_record" "dogfood_fleetctl_com_validation" {
for_each = {
for dvo in aws_acm_certificate.dogfood_fleetctl_com.domain_validation_options : dvo.domain_name => {
name = dvo.resource_record_name
record = dvo.resource_record_value
type = dvo.resource_record_type
}
}
allow_overwrite = true
name = each.value.name
records = [each.value.record]
ttl = 60
type = each.value.type
zone_id = aws_route53_zone.dogfood_fleetctl_com.zone_id
}
resource "aws_route53_record" "dogfood_fleetdm_com_validation" {
for_each = {
for dvo in aws_acm_certificate.dogfood_fleetdm_com.domain_validation_options : dvo.domain_name => {
name = dvo.resource_record_name
record = dvo.resource_record_value
type = dvo.resource_record_type
}
}
allow_overwrite = true
name = each.value.name
records = [each.value.record]
ttl = 60
type = each.value.type
zone_id = aws_route53_zone.dogfood_fleetdm_com.zone_id
}
resource "aws_acm_certificate_validation" "dogfood_fleetctl_com" {
certificate_arn = aws_acm_certificate.dogfood_fleetctl_com.arn
validation_record_fqdns = [for record in aws_route53_record.dogfood_fleetctl_com_validation : record.fqdn]
}
resource "aws_acm_certificate_validation" "dogfood_fleetdm_com" {
certificate_arn = aws_acm_certificate.dogfood_fleetdm_com.arn
validation_record_fqdns = [for record in aws_route53_record.dogfood_fleetdm_com_validation : record.fqdn]
}

113
tools/terraform/rds.tf Normal file
View File

@ -0,0 +1,113 @@
locals {
name = "fleetdm"
}
resource "random_password" "database_password" {
length = 16
special = false
}
resource "aws_secretsmanager_secret" "database_password_secret" {
name = "/fleet/database/password/master"
}
resource "aws_secretsmanager_secret_version" "database_password_secret_version" {
secret_id = aws_secretsmanager_secret.database_password_secret.id
secret_string = random_password.database_password.result
}
// if you want to use RDS Serverless option prefer the following commented block
//module "aurora_mysql_serverless" {
// source = "terraform-aws-modules/rds-aurora/aws"
// version = "5.2.0"
//
// name = "${local.name}-mysql"
// engine = "aurora-mysql"
// engine_mode = "serverless"
// storage_encrypted = true
// username = "fleet"
// password = random_password.database_password.result
// create_random_password = false
// database_name = "fleet"
// enable_http_endpoint = true
//
// vpc_id = module.vpc.vpc_id
// subnets = module.vpc.database_subnets
// create_security_group = true
// allowed_cidr_blocks = module.vpc.private_subnets_cidr_blocks
//
// replica_scale_enabled = false
// replica_count = 0
//
// monitoring_interval = 60
//
// apply_immediately = true
// skip_final_snapshot = true
//
// db_parameter_group_name = aws_db_parameter_group.example_mysql.id
// db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.example_mysql.id
//
// scaling_configuration = {
// auto_pause = true
// min_capacity = 2
// max_capacity = 16
// seconds_until_auto_pause = 300
// timeout_action = "ForceApplyCapacityChange"
// }
//}
module "aurora_mysql" {
source = "terraform-aws-modules/rds-aurora/aws"
version = "5.2.0"
name = "${local.name}-mysql-iam"
engine = "aurora-mysql"
engine_version = "5.7.mysql_aurora.2.10.0"
instance_type = "db.t4g.medium"
instance_type_replica = "db.t4g.medium"
iam_database_authentication_enabled = true
storage_encrypted = true
username = "fleet"
password = random_password.database_password.result
create_random_password = false
database_name = "fleet"
enable_http_endpoint = false
#performance_insights_enabled = true
vpc_id = module.vpc.vpc_id
subnets = module.vpc.database_subnets
create_security_group = true
allowed_cidr_blocks = module.vpc.private_subnets_cidr_blocks
replica_count = 1
replica_scale_enabled = true
replica_scale_min = 1
replica_scale_max = 3
monitoring_interval = 60
iam_role_name = "${local.name}-rds-enhanced-monitoring"
iam_role_use_name_prefix = true
iam_role_description = "${local.name} RDS enhanced monitoring IAM role"
iam_role_path = "/autoscaling/"
iam_role_max_session_duration = 7200
apply_immediately = true
skip_final_snapshot = true
db_parameter_group_name = aws_db_parameter_group.example_mysql.id
db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.example_mysql.id
}
resource "aws_db_parameter_group" "example_mysql" {
name = "${local.name}-aurora-db-mysql-parameter-group"
family = "aurora-mysql5.7"
description = "${local.name}-aurora-db-mysql-parameter-group"
}
resource "aws_rds_cluster_parameter_group" "example_mysql" {
name = "${local.name}-aurora-mysql-cluster-parameter-group"
family = "aurora-mysql5.7"
description = "${local.name}-aurora-mysql-cluster-parameter-group"
}

44
tools/terraform/readme.md Normal file
View File

@ -0,0 +1,44 @@
## Terraform
`terraform init && terraform workspace new dev`
`terraform plan`
`terraform apply`
### Configuration
Typical settings to override in an existing environment:
`module.vpc.vpc_id` -- the VPC ID output from VPC module. If you are introducing fleet to an existing VPC, you could replace all instances with your VPC ID.
In this reference architecture we are placing ECS, RDS MySQL, and Redis (ElastiCache) in separate subnets, each associated to a route table, allowing communication between.
This is not required, as long as Fleet can resolve the MySQL and Redis hosts, that should be adequate.
#### HTTPS
The ALB is in the public subnet with an ENI to bridge into the private subnet. SSL is terminated at the ALB and `fleet serve` is launched with `FLEET_SERVER_TLS=false` as an
environment variable.
Replace `cert_arn` with the **certificate ARN** that applies to your environment. This is the **certificate ARN** used in the **ALB HTTPS Listener**.
### Migrating the DB
After applying terraform run the following to migrate the database:
```
aws ecs run-task --cluster fleet-backend --task-definition fleet-migrate:<latest_version> --launch-type FARGATE --network-configuration "awsvpcConfiguration={subnets=[<private_subnet_id>],securityGroups=[<desired_security_group>]}"
```
### Connecting a Host
Build orbit:
```
fleetctl package --type=msi --fleet-url=<alb_dns> --enroll-secret=<secret>
```
Run orbit:
```
"C:\Program Files\Orbit\bin\orbit\orbit.exe" --root-dir "C:\Program Files\Orbit\." --log-file "C:\Program Files\Orbit\orbit-log.txt" --fleet-url "http://<alb_dns>" --enroll-secret-path "C:\Program Files\Orbit\secret.txt" --update-url "https://tuf.fleetctl.com" --orbit-channel "stable" --osqueryd-channel "stable"
```

65
tools/terraform/redis.tf Normal file
View File

@ -0,0 +1,65 @@
variable "maintenance_window" {
default = ""
}
variable "engine_version" {
default = "5.0.6"
}
variable "node_type" {
default = "cache.t2.micro"
}
variable "number_cache_clusters" {
default = 3
}
resource "aws_elasticache_replication_group" "default" {
availability_zones = ["us-east-2a", "us-east-2b", "us-east-2c"]
engine = "redis"
parameter_group_name = aws_elasticache_parameter_group.default.name
subnet_group_name = module.vpc.elasticache_subnet_group_name
security_group_ids = [aws_security_group.redis.id]
replication_group_id = "fleetdm-redis"
number_cache_clusters = var.number_cache_clusters
node_type = var.node_type
engine_version = var.engine_version
port = "6379"
maintenance_window = var.maintenance_window
snapshot_retention_limit = 0
automatic_failover_enabled = false
at_rest_encryption_enabled = false
transit_encryption_enabled = false
apply_immediately = true
replication_group_description = "fleetdm-redis"
}
resource "aws_elasticache_parameter_group" "default" {
name = "fleetdm-redis"
family = "redis5.0"
description = "for fleet"
}
resource "aws_security_group" "redis" {
name = local.security_group_name
vpc_id = module.vpc.vpc_id
}
locals {
security_group_name = "${var.prefix}-elasticache-redis"
}
resource "aws_security_group_rule" "ingress" {
type = "ingress"
from_port = "6379"
to_port = "6379"
protocol = "tcp"
cidr_blocks = module.vpc.private_subnets_cidr_blocks
security_group_id = aws_security_group.redis.id
}
resource "aws_security_group_rule" "egress" {
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = aws_security_group.redis.id
}

View File

@ -0,0 +1,3 @@
variable "prefix" {
default = "fleet"
}

24
tools/terraform/vpc.tf Normal file
View File

@ -0,0 +1,24 @@
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
name = "fleet-vpc"
cidr = "10.10.0.0/16"
azs = ["us-east-2a", "us-east-2b", "us-east-2c"]
private_subnets = ["10.10.1.0/24", "10.10.2.0/24", "10.10.3.0/24"]
public_subnets = ["10.10.11.0/24", "10.10.12.0/24", "10.10.13.0/24"]
database_subnets = ["10.10.21.0/24", "10.10.22.0/24", "10.10.23.0/24"]
elasticache_subnets = ["10.10.31.0/24", "10.10.32.0/24", "10.10.33.0/24"]
create_database_subnet_group = true
create_database_subnet_route_table = true
create_elasticache_subnet_group = true
create_elasticache_subnet_route_table = true
enable_vpn_gateway = false
one_nat_gateway_per_az = false
single_nat_gateway = true
enable_nat_gateway = true
}