mirror of
https://github.com/hashicorp/vault.git
synced 2026-05-05 12:26:34 +02:00
[QT-572][VAULT-17391] enos: use ec2 fleets for consul storage scenarios (#21400)
Begin the process of migrating away from the "strongly encouraged not to use"[0] Ec2 spot fleet API to the more modern `ec2:CreateFleet`. Unfortuantely the `instant` type fleet does not guarantee fulfillment with either on-demand or spot types. We'll need to add a feature similar to `wait_for_fulfillment` on the `spot_fleet_request` resource[1] to `ec2_fleet` before we can rely on it. We also update the existing target fleets to support provisioning generic targets. This has allowed us to remove our usage of `terraform-enos-aws-consul` and replace it with a smaller `backend_consul` module in-repo. We also remove `terraform-enos-aws-infra` and replace it with two smaller in-repo modules `ec2_info` and `create_vpc`. This has allowed us to simplify the vpc resources we use for each scneario, which in turn allows us to not rely on flaky resources. As part of this refactor we've also made it possible to provision targets using different distro versions. [0] https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-best-practices.html#which-spot-request-method-to-use [1] https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/spot_fleet_request#wait_for_fulfillment * enos/consul: add `backend_consul` module that accepts target hosts. * enos/target_ec2_spot_fleet: add support for consul networking. * enos/target_ec2_spot_fleet: add support for customizing cluster tag key. * enos/scenarios: create `target_ec2_fleet` which uses a more modern `ec2_fleet` API. * enos/create_vpc: replace `terraform-enos-aws-infra` with smaller and simplified version. Flatten the networking to a single route on the default route table and a single subnet. * enos/ec2_info: add a new module to give us useful ec2 information including AMI id's for various arch/distro/version combinations. * enos/ci: update service user role to allow for managing ec2 fleets. Signed-off-by: Ryan Cragun <me@ryan.ec>
This commit is contained in:
parent
c7db2d61b0
commit
8d22142a3e
@ -99,6 +99,7 @@ data "aws_iam_policy_document" "enos_scenario" {
|
||||
"ec2:CancelSpotInstanceRequests",
|
||||
"ec2:CreateInternetGateway",
|
||||
"ec2:CreateKeyPair",
|
||||
"ec2:CreateFleet",
|
||||
"ec2:CreateLaunchTemplate",
|
||||
"ec2:CreateLaunchTemplateVersion",
|
||||
"ec2:CreateRoute",
|
||||
@ -109,10 +110,12 @@ data "aws_iam_policy_document" "enos_scenario" {
|
||||
"ec2:CreateTags",
|
||||
"ec2:CreateVolume",
|
||||
"ec2:CreateVPC",
|
||||
"ec2:DeleteFleets",
|
||||
"ec2:DeleteInternetGateway",
|
||||
"ec2:DeleteLaunchTemplate",
|
||||
"ec2:DeleteLaunchTemplateVersions",
|
||||
"ec2:DeleteKeyPair",
|
||||
"ec2:DeleteRoute",
|
||||
"ec2:DeleteRouteTable",
|
||||
"ec2:DeleteSecurityGroup",
|
||||
"ec2:DeleteSpotDatafeedSubscription",
|
||||
@ -122,6 +125,9 @@ data "aws_iam_policy_document" "enos_scenario" {
|
||||
"ec2:DeleteVPC",
|
||||
"ec2:DescribeAccountAttributes",
|
||||
"ec2:DescribeAvailabilityZones",
|
||||
"ec2:DescribeFleets",
|
||||
"ec2:DescribeFleetHistory",
|
||||
"ec2:DescribeFleetInstances",
|
||||
"ec2:DescribeImages",
|
||||
"ec2:DescribeInstanceAttribute",
|
||||
"ec2:DescribeInstanceCreditSpecifications",
|
||||
@ -158,6 +164,7 @@ data "aws_iam_policy_document" "enos_scenario" {
|
||||
"ec2:GetLaunchTemplateData",
|
||||
"ec2:GetSpotPlacementScores",
|
||||
"ec2:ImportKeyPair",
|
||||
"ec2:ModifyFleet",
|
||||
"ec2:ModifyInstanceAttribute",
|
||||
"ec2:ModifyLaunchTemplate",
|
||||
"ec2:ModifySpotFleetRequest",
|
||||
|
||||
@ -5,19 +5,11 @@ module "autopilot_upgrade_storageconfig" {
|
||||
source = "./modules/autopilot_upgrade_storageconfig"
|
||||
}
|
||||
|
||||
module "az_finder" {
|
||||
source = "./modules/az_finder"
|
||||
}
|
||||
|
||||
module "backend_consul" {
|
||||
source = "app.terraform.io/hashicorp-qti/aws-consul/enos"
|
||||
source = "./modules/backend_consul"
|
||||
|
||||
project_name = var.project_name
|
||||
environment = "ci"
|
||||
common_tags = var.tags
|
||||
ssh_aws_keypair = var.aws_ssh_keypair_name
|
||||
consul_license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path))
|
||||
consul_log_level = var.backend_log_level
|
||||
license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path))
|
||||
log_level = var.backend_log_level
|
||||
}
|
||||
|
||||
module "backend_raft" {
|
||||
@ -37,12 +29,14 @@ module "build_artifactory" {
|
||||
}
|
||||
|
||||
module "create_vpc" {
|
||||
source = "app.terraform.io/hashicorp-qti/aws-infra/enos"
|
||||
source = "./modules/create_vpc"
|
||||
|
||||
project_name = var.project_name
|
||||
environment = "ci"
|
||||
common_tags = var.tags
|
||||
ami_architectures = ["amd64", "arm64"]
|
||||
environment = "ci"
|
||||
common_tags = var.tags
|
||||
}
|
||||
|
||||
module "ec2_info" {
|
||||
source = "./modules/ec2_info"
|
||||
}
|
||||
|
||||
module "get_local_metadata" {
|
||||
@ -67,13 +61,16 @@ module "shutdown_multiple_nodes" {
|
||||
source = "./modules/shutdown_multiple_nodes"
|
||||
}
|
||||
|
||||
module "target_ec2_instances" {
|
||||
source = "./modules/target_ec2_instances"
|
||||
module "target_ec2_fleet" {
|
||||
source = "./modules/target_ec2_fleet"
|
||||
|
||||
common_tags = var.tags
|
||||
instance_count = var.vault_instance_count
|
||||
project_name = var.project_name
|
||||
ssh_keypair = var.aws_ssh_keypair_name
|
||||
capacity_type = "on-demand" // or "spot", use on-demand until we can stabilize spot fleets
|
||||
common_tags = var.tags
|
||||
instance_mem_min = 4096
|
||||
instance_cpu_min = 2
|
||||
max_price = "0.1432" // On-demand cost for RHEL/t3.medium on-demand in us-east
|
||||
project_name = var.project_name
|
||||
ssh_keypair = var.aws_ssh_keypair_name
|
||||
}
|
||||
|
||||
module "target_ec2_spot_fleet" {
|
||||
@ -82,10 +79,9 @@ module "target_ec2_spot_fleet" {
|
||||
common_tags = var.tags
|
||||
instance_mem_min = 4096
|
||||
instance_cpu_min = 2
|
||||
max_price = "0.1432" // On-demand cost for RHEL/t3.medium on-demand in us-east
|
||||
project_name = var.project_name
|
||||
// Current on-demand cost of t3.medium in us-east.
|
||||
spot_price_max = "0.0416"
|
||||
ssh_keypair = var.aws_ssh_keypair_name
|
||||
ssh_keypair = var.aws_ssh_keypair_name
|
||||
}
|
||||
|
||||
module "vault_agent" {
|
||||
|
||||
@ -26,28 +26,28 @@ scenario "agent" {
|
||||
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
||||
}
|
||||
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null
|
||||
packages = ["jq"]
|
||||
distro_version = {
|
||||
"rhel" = var.rhel_distro_version
|
||||
"ubuntu" = var.ubuntu_distro_version
|
||||
}
|
||||
enos_provider = {
|
||||
rhel = provider.enos.rhel
|
||||
ubuntu = provider.enos.ubuntu
|
||||
}
|
||||
install_artifactory_artifact = local.bundle_path == null
|
||||
spot_price_max = {
|
||||
max_price = {
|
||||
// These prices are based on on-demand cost for t3.large in us-east
|
||||
"rhel" = "0.1432"
|
||||
"ubuntu" = "0.0832"
|
||||
}
|
||||
packages = ["jq"]
|
||||
tags = merge({
|
||||
"Project Name" : var.project_name
|
||||
"Project" : "Enos",
|
||||
"Environment" : "ci"
|
||||
}, var.tags)
|
||||
vault_instance_types = {
|
||||
amd64 = "t3a.small"
|
||||
arm64 = "t4g.small"
|
||||
}
|
||||
vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch])
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_tag_key = "Type" // enos_vault_start expects Type as the tag key
|
||||
}
|
||||
|
||||
step "get_local_metadata" {
|
||||
@ -72,29 +72,19 @@ scenario "agent" {
|
||||
artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null
|
||||
distro = matrix.artifact_source == "artifactory" ? matrix.distro : null
|
||||
edition = matrix.artifact_source == "artifactory" ? matrix.edition : null
|
||||
instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null
|
||||
revision = var.vault_revision
|
||||
}
|
||||
}
|
||||
|
||||
step "find_azs" {
|
||||
module = module.az_finder
|
||||
|
||||
variables {
|
||||
instance_type = [
|
||||
var.backend_instance_type,
|
||||
local.vault_instance_type
|
||||
]
|
||||
}
|
||||
step "ec2_info" {
|
||||
module = module.ec2_info
|
||||
}
|
||||
|
||||
step "create_vpc" {
|
||||
module = module.create_vpc
|
||||
|
||||
variables {
|
||||
ami_architectures = distinct([matrix.arch, "amd64"])
|
||||
availability_zones = step.find_azs.availability_zones
|
||||
common_tags = local.tags
|
||||
common_tags = local.tags
|
||||
}
|
||||
}
|
||||
|
||||
@ -108,7 +98,7 @@ scenario "agent" {
|
||||
}
|
||||
|
||||
step "create_vault_cluster_targets" {
|
||||
module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
@ -116,11 +106,11 @@ scenario "agent" {
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch]
|
||||
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_tag_key = local.vault_tag_key
|
||||
common_tags = local.tags
|
||||
instance_type = local.vault_instance_type // only used for on-demand instances
|
||||
spot_price_max = local.spot_price_max[matrix.distro]
|
||||
max_price = local.max_price[matrix.distro]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
@ -39,33 +39,32 @@ scenario "autopilot" {
|
||||
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
||||
}
|
||||
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null
|
||||
packages = ["jq"]
|
||||
distro_version = {
|
||||
"rhel" = var.rhel_distro_version
|
||||
"ubuntu" = var.ubuntu_distro_version
|
||||
}
|
||||
enos_provider = {
|
||||
rhel = provider.enos.rhel
|
||||
ubuntu = provider.enos.ubuntu
|
||||
}
|
||||
spot_price_max = {
|
||||
max_price = {
|
||||
// These prices are based on on-demand cost for t3.large in us-east
|
||||
"rhel" = "0.1432"
|
||||
"ubuntu" = "0.0832"
|
||||
}
|
||||
packages = ["jq"]
|
||||
tags = merge({
|
||||
"Project Name" : var.project_name
|
||||
"Project" : "Enos",
|
||||
"Environment" : "ci"
|
||||
}, var.tags)
|
||||
vault_instance_types = {
|
||||
amd64 = "t3a.small"
|
||||
arm64 = "t4g.small"
|
||||
}
|
||||
|
||||
vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch])
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_install_dir_packages = {
|
||||
rhel = "/bin"
|
||||
ubuntu = "/usr/bin"
|
||||
}
|
||||
vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro]
|
||||
vault_tag_key = "Type" // enos_vault_start expects Type as the tag key
|
||||
}
|
||||
|
||||
step "build_vault" {
|
||||
@ -85,29 +84,19 @@ scenario "autopilot" {
|
||||
artifact_type = matrix.artifact_type
|
||||
distro = matrix.artifact_source == "artifactory" ? matrix.distro : null
|
||||
edition = matrix.artifact_source == "artifactory" ? matrix.edition : null
|
||||
instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null
|
||||
revision = var.vault_revision
|
||||
}
|
||||
}
|
||||
|
||||
step "find_azs" {
|
||||
module = module.az_finder
|
||||
|
||||
variables {
|
||||
instance_type = [
|
||||
local.vault_instance_type
|
||||
]
|
||||
}
|
||||
step "ec2_info" {
|
||||
module = module.ec2_info
|
||||
}
|
||||
|
||||
step "create_vpc" {
|
||||
module = module.create_vpc
|
||||
depends_on = [step.find_azs]
|
||||
module = module.create_vpc
|
||||
|
||||
variables {
|
||||
ami_architectures = [matrix.arch]
|
||||
availability_zones = step.find_azs.availability_zones
|
||||
common_tags = local.tags
|
||||
common_tags = local.tags
|
||||
}
|
||||
}
|
||||
|
||||
@ -120,7 +109,7 @@ scenario "autopilot" {
|
||||
}
|
||||
|
||||
step "create_vault_cluster_targets" {
|
||||
module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
@ -128,11 +117,11 @@ scenario "autopilot" {
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch]
|
||||
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_tag_key = local.vault_tag_key
|
||||
common_tags = local.tags
|
||||
instance_type = local.vault_instance_type // only used for on-demand instances
|
||||
spot_price_max = local.spot_price_max[matrix.distro]
|
||||
max_price = local.max_price[matrix.distro]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
@ -185,7 +174,6 @@ scenario "autopilot" {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
step "verify_write_test_data" {
|
||||
module = module.vault_verify_write_data
|
||||
depends_on = [
|
||||
@ -215,7 +203,7 @@ scenario "autopilot" {
|
||||
}
|
||||
|
||||
step "create_vault_cluster_upgrade_targets" {
|
||||
module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
@ -223,12 +211,11 @@ scenario "autopilot" {
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch]
|
||||
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
common_tags = local.tags
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
instance_type = local.vault_instance_type // only used for on-demand instances
|
||||
spot_price_max = local.spot_price_max[matrix.distro]
|
||||
max_price = local.max_price[matrix.distro]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
@ -247,12 +234,10 @@ scenario "autopilot" {
|
||||
}
|
||||
|
||||
variables {
|
||||
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
config_env_vars = {
|
||||
VAULT_LOG_LEVEL = var.vault_log_level
|
||||
}
|
||||
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
log_level = var.vault_log_level
|
||||
force_unseal = matrix.seal == "shamir"
|
||||
initialize_cluster = false
|
||||
install_dir = local.vault_install_dir
|
||||
|
||||
@ -39,39 +39,40 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
locals {
|
||||
backend_tag_key = "VaultStorage"
|
||||
build_tags = {
|
||||
"ent" = ["ui", "enterprise", "ent"]
|
||||
"ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"]
|
||||
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
|
||||
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
||||
}
|
||||
distro_version = {
|
||||
"rhel" = var.rhel_distro_version
|
||||
"ubuntu" = var.ubuntu_distro_version
|
||||
}
|
||||
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null
|
||||
packages = ["jq"]
|
||||
enos_provider = {
|
||||
rhel = provider.enos.rhel
|
||||
ubuntu = provider.enos.ubuntu
|
||||
}
|
||||
spot_price_max = {
|
||||
max_price = {
|
||||
// These prices are based on on-demand cost for t3.large in us-east
|
||||
"rhel" = "0.1432"
|
||||
"ubuntu" = "0.0832"
|
||||
}
|
||||
packages = ["jq"]
|
||||
tags = merge({
|
||||
"Project Name" : var.project_name
|
||||
"Project" : "Enos",
|
||||
"Environment" : "ci"
|
||||
}, var.tags)
|
||||
vault_instance_types = {
|
||||
amd64 = "t3a.small"
|
||||
arm64 = "t4g.small"
|
||||
}
|
||||
vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch])
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_install_dir_packages = {
|
||||
rhel = "/bin"
|
||||
ubuntu = "/usr/bin"
|
||||
}
|
||||
vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro]
|
||||
vault_tag_key = "Type" // enos_vault_start expects Type as the tag key
|
||||
}
|
||||
|
||||
step "build_vault" {
|
||||
@ -91,28 +92,19 @@ scenario "replication" {
|
||||
artifact_type = matrix.artifact_type
|
||||
distro = matrix.artifact_source == "artifactory" ? matrix.distro : null
|
||||
edition = matrix.artifact_source == "artifactory" ? matrix.edition : null
|
||||
instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null
|
||||
revision = var.vault_revision
|
||||
}
|
||||
}
|
||||
|
||||
step "find_azs" {
|
||||
module = module.az_finder
|
||||
variables {
|
||||
instance_type = [
|
||||
local.vault_instance_type
|
||||
]
|
||||
}
|
||||
step "ec2_info" {
|
||||
module = module.ec2_info
|
||||
}
|
||||
|
||||
step "create_vpc" {
|
||||
module = module.create_vpc
|
||||
depends_on = [step.find_azs]
|
||||
module = module.create_vpc
|
||||
|
||||
variables {
|
||||
ami_architectures = [matrix.arch]
|
||||
availability_zones = step.find_azs.availability_zones
|
||||
common_tags = local.tags
|
||||
common_tags = local.tags
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,29 +116,71 @@ scenario "replication" {
|
||||
}
|
||||
}
|
||||
|
||||
step "create_primary_backend_cluster" {
|
||||
module = "backend_${matrix.primary_backend}"
|
||||
depends_on = [step.create_vpc]
|
||||
# Create all of our instances for both primary and secondary clusters
|
||||
step "create_primary_cluster_targets" {
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [
|
||||
step.create_vpc,
|
||||
]
|
||||
|
||||
providers = {
|
||||
enos = local.enos_provider[matrix.distro]
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_tag_key = local.vault_tag_key
|
||||
common_tags = local.tags
|
||||
max_price = local.max_price[matrix.distro]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_primary_cluster_backend_targets" {
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [
|
||||
step.create_vpc,
|
||||
]
|
||||
|
||||
providers = {
|
||||
enos = provider.enos.ubuntu
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"]
|
||||
common_tags = local.tags
|
||||
consul_release = {
|
||||
edition = var.backend_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
instance_type = var.backend_instance_type
|
||||
kms_key_arn = step.create_vpc.kms_key_arn
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_tag_key = local.backend_tag_key
|
||||
common_tags = local.tags
|
||||
max_price = local.max_price["ubuntu"]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_primary_cluster_targets" {
|
||||
module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances
|
||||
step "create_primary_cluster_additional_targets" {
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [
|
||||
step.create_vpc,
|
||||
step.create_primary_cluster_targets,
|
||||
]
|
||||
|
||||
providers = {
|
||||
enos = local.enos_provider[matrix.distro]
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_name = step.create_primary_cluster_targets.cluster_name
|
||||
cluster_tag_key = local.vault_tag_key
|
||||
common_tags = local.tags
|
||||
max_price = local.max_price[matrix.distro]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_secondary_cluster_targets" {
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
@ -154,15 +188,54 @@ scenario "replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch]
|
||||
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_tag_key = local.vault_tag_key
|
||||
common_tags = local.tags
|
||||
instance_type = local.vault_instance_type // only used for on-demand instances
|
||||
spot_price_max = local.spot_price_max[matrix.distro]
|
||||
max_price = local.max_price[matrix.distro]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_secondary_cluster_backend_targets" {
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
enos = provider.enos.ubuntu
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_tag_key = local.backend_tag_key
|
||||
common_tags = local.tags
|
||||
max_price = local.max_price["ubuntu"]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_primary_backend_cluster" {
|
||||
module = "backend_${matrix.primary_backend}"
|
||||
depends_on = [
|
||||
step.create_primary_cluster_backend_targets,
|
||||
]
|
||||
|
||||
providers = {
|
||||
enos = provider.enos.ubuntu
|
||||
}
|
||||
|
||||
variables {
|
||||
cluster_name = step.create_primary_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = local.backend_tag_key
|
||||
release = {
|
||||
edition = var.backend_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
target_hosts = step.create_primary_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
step "create_primary_cluster" {
|
||||
module = module.vault_cluster
|
||||
depends_on = [
|
||||
@ -176,14 +249,16 @@ scenario "replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_name = step.create_primary_cluster_targets.cluster_name
|
||||
consul_cluster_tag = step.create_primary_backend_cluster.consul_cluster_tag
|
||||
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name
|
||||
backend_cluster_tag_key = local.backend_tag_key
|
||||
cluster_name = step.create_primary_cluster_targets.cluster_name
|
||||
consul_release = matrix.primary_backend == "consul" ? {
|
||||
edition = var.backend_edition
|
||||
version = matrix.consul_version
|
||||
} : null
|
||||
enable_file_audit_device = var.vault_enable_file_audit_device
|
||||
install_dir = local.vault_install_dir
|
||||
license = matrix.edition != "oss" ? step.read_license.license : null
|
||||
local_artifact_path = local.bundle_path
|
||||
@ -191,46 +266,27 @@ scenario "replication" {
|
||||
storage_backend = matrix.primary_backend
|
||||
target_hosts = step.create_primary_cluster_targets.hosts
|
||||
unseal_method = matrix.primary_seal
|
||||
enable_file_audit_device = var.vault_enable_file_audit_device
|
||||
}
|
||||
}
|
||||
|
||||
step "create_secondary_backend_cluster" {
|
||||
module = "backend_${matrix.secondary_backend}"
|
||||
depends_on = [step.create_vpc]
|
||||
module = "backend_${matrix.secondary_backend}"
|
||||
depends_on = [
|
||||
step.create_secondary_cluster_backend_targets
|
||||
]
|
||||
|
||||
providers = {
|
||||
enos = provider.enos.ubuntu
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"]
|
||||
common_tags = local.tags
|
||||
consul_release = {
|
||||
cluster_name = step.create_secondary_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = local.backend_tag_key
|
||||
release = {
|
||||
edition = var.backend_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
instance_type = var.backend_instance_type
|
||||
kms_key_arn = step.create_vpc.kms_key_arn
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_secondary_cluster_targets" {
|
||||
module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
enos = local.enos_provider[matrix.distro]
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
common_tags = local.tags
|
||||
instance_type = local.vault_instance_type // only used for on-demand instances
|
||||
spot_price_max = local.spot_price_max[matrix.distro]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
target_hosts = step.create_secondary_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@ -247,14 +303,16 @@ scenario "replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_name = step.create_secondary_cluster_targets.cluster_name
|
||||
consul_cluster_tag = step.create_secondary_backend_cluster.consul_cluster_tag
|
||||
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
backend_cluster_name = step.create_secondary_cluster_backend_targets.cluster_name
|
||||
backend_cluster_tag_key = local.backend_tag_key
|
||||
cluster_name = step.create_secondary_cluster_targets.cluster_name
|
||||
consul_release = matrix.secondary_backend == "consul" ? {
|
||||
edition = var.backend_edition
|
||||
version = matrix.consul_version
|
||||
} : null
|
||||
enable_file_audit_device = var.vault_enable_file_audit_device
|
||||
install_dir = local.vault_install_dir
|
||||
license = matrix.edition != "oss" ? step.read_license.license : null
|
||||
local_artifact_path = local.bundle_path
|
||||
@ -262,7 +320,6 @@ scenario "replication" {
|
||||
storage_backend = matrix.secondary_backend
|
||||
target_hosts = step.create_secondary_cluster_targets.hosts
|
||||
unseal_method = matrix.secondary_seal
|
||||
enable_file_audit_device = var.vault_enable_file_audit_device
|
||||
}
|
||||
}
|
||||
|
||||
@ -471,32 +528,14 @@ scenario "replication" {
|
||||
}
|
||||
}
|
||||
|
||||
step "create_more_primary_cluster_targets" {
|
||||
module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
enos = local.enos_provider[matrix.distro]
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
common_tags = local.tags
|
||||
instance_type = local.vault_instance_type // only used for on-demand instances
|
||||
spot_price_max = local.spot_price_max[matrix.distro]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "add_more_nodes_to_primary_cluster" {
|
||||
step "add_additional_nodes_to_primary_cluster" {
|
||||
module = module.vault_cluster
|
||||
depends_on = [
|
||||
step.create_vpc,
|
||||
step.create_primary_backend_cluster,
|
||||
step.create_primary_cluster,
|
||||
step.verify_replicated_data,
|
||||
step.create_more_primary_cluster_targets
|
||||
step.create_primary_cluster_additional_targets
|
||||
]
|
||||
|
||||
providers = {
|
||||
@ -504,10 +543,11 @@ scenario "replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_name = step.create_primary_cluster_targets.cluster_name
|
||||
consul_cluster_tag = step.create_primary_backend_cluster.consul_cluster_tag
|
||||
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name
|
||||
backend_cluster_tag_key = local.backend_tag_key
|
||||
cluster_name = step.create_primary_cluster_targets.cluster_name
|
||||
consul_release = matrix.primary_backend == "consul" ? {
|
||||
edition = var.backend_edition
|
||||
version = matrix.consul_version
|
||||
@ -522,21 +562,21 @@ scenario "replication" {
|
||||
shamir_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : null
|
||||
storage_backend = matrix.primary_backend
|
||||
storage_node_prefix = "newprimary_node"
|
||||
target_hosts = step.create_more_primary_cluster_targets.hosts
|
||||
target_hosts = step.create_primary_cluster_additional_targets.hosts
|
||||
unseal_method = matrix.primary_seal
|
||||
}
|
||||
}
|
||||
|
||||
step "verify_more_primary_nodes_unsealed" {
|
||||
step "verify_addtional_primary_nodes_are_unsealed" {
|
||||
module = module.vault_verify_unsealed
|
||||
depends_on = [step.add_more_nodes_to_primary_cluster]
|
||||
depends_on = [step.add_additional_nodes_to_primary_cluster]
|
||||
|
||||
providers = {
|
||||
enos = local.enos_provider[matrix.distro]
|
||||
}
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_more_primary_cluster_targets.hosts
|
||||
vault_instances = step.create_primary_cluster_additional_targets.hosts
|
||||
vault_install_dir = local.vault_install_dir
|
||||
}
|
||||
}
|
||||
@ -545,9 +585,9 @@ scenario "replication" {
|
||||
skip_step = matrix.primary_backend != "raft"
|
||||
module = module.vault_verify_raft_auto_join_voter
|
||||
depends_on = [
|
||||
step.add_more_nodes_to_primary_cluster,
|
||||
step.add_additional_nodes_to_primary_cluster,
|
||||
step.create_primary_cluster,
|
||||
step.verify_more_primary_nodes_unsealed
|
||||
step.verify_addtional_primary_nodes_are_unsealed
|
||||
]
|
||||
|
||||
providers = {
|
||||
@ -555,7 +595,7 @@ scenario "replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_more_primary_cluster_targets.hosts
|
||||
vault_instances = step.create_primary_cluster_additional_targets.hosts
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
}
|
||||
@ -565,7 +605,7 @@ scenario "replication" {
|
||||
module = module.shutdown_node
|
||||
depends_on = [
|
||||
step.get_primary_cluster_ips,
|
||||
step.verify_more_primary_nodes_unsealed
|
||||
step.verify_addtional_primary_nodes_are_unsealed
|
||||
]
|
||||
|
||||
providers = {
|
||||
@ -596,7 +636,7 @@ scenario "replication" {
|
||||
step "get_updated_primary_cluster_ips" {
|
||||
module = module.vault_get_cluster_ips
|
||||
depends_on = [
|
||||
step.add_more_nodes_to_primary_cluster,
|
||||
step.add_additional_nodes_to_primary_cluster,
|
||||
step.remove_primary_follower_1,
|
||||
step.remove_primary_leader
|
||||
]
|
||||
@ -608,7 +648,7 @@ scenario "replication" {
|
||||
variables {
|
||||
vault_instances = step.create_primary_cluster_targets.hosts
|
||||
vault_install_dir = local.vault_install_dir
|
||||
added_vault_instances = step.create_more_primary_cluster_targets.hosts
|
||||
added_vault_instances = step.create_primary_cluster_additional_targets.hosts
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
node_public_ip = step.get_primary_cluster_ips.follower_public_ip_2
|
||||
}
|
||||
@ -631,6 +671,11 @@ scenario "replication" {
|
||||
}
|
||||
}
|
||||
|
||||
output "audit_device_file_path" {
|
||||
description = "The file path for the file audit device, if enabled"
|
||||
value = step.create_primary_cluster.audit_device_file_path
|
||||
}
|
||||
|
||||
output "primary_cluster_hosts" {
|
||||
description = "The Vault primary cluster target hosts"
|
||||
value = step.create_primary_cluster_targets.hosts
|
||||
@ -638,7 +683,7 @@ scenario "replication" {
|
||||
|
||||
output "primary_cluster_additional_hosts" {
|
||||
description = "The Vault added new node on primary cluster target hosts"
|
||||
value = step.create_more_primary_cluster_targets.hosts
|
||||
value = step.create_primary_cluster_additional_targets.hosts
|
||||
}
|
||||
|
||||
output "primary_cluster_root_token" {
|
||||
@ -725,9 +770,4 @@ scenario "replication" {
|
||||
description = "The Vault updated secondary cluster primaries connection status"
|
||||
value = step.verify_updated_performance_replication.secondary_replication_data_primaries
|
||||
}
|
||||
|
||||
output "vault_audit_device_file_path" {
|
||||
description = "The file path for the file audit device, if enabled"
|
||||
value = step.create_primary_cluster.audit_device_file_path
|
||||
}
|
||||
}
|
||||
|
||||
@ -34,6 +34,7 @@ scenario "smoke" {
|
||||
]
|
||||
|
||||
locals {
|
||||
backend_tag_key = "VaultStorage"
|
||||
build_tags = {
|
||||
"oss" = ["ui"]
|
||||
"ent" = ["ui", "enterprise", "ent"]
|
||||
@ -42,32 +43,32 @@ scenario "smoke" {
|
||||
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
||||
}
|
||||
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null
|
||||
packages = ["jq"]
|
||||
distro_version = {
|
||||
"rhel" = var.rhel_distro_version
|
||||
"ubuntu" = var.ubuntu_distro_version
|
||||
}
|
||||
enos_provider = {
|
||||
rhel = provider.enos.rhel
|
||||
ubuntu = provider.enos.ubuntu
|
||||
}
|
||||
spot_price_max = {
|
||||
max_price = {
|
||||
// These prices are based on on-demand cost for t3.large in us-east
|
||||
"rhel" = "0.1432"
|
||||
"ubuntu" = "0.0832"
|
||||
}
|
||||
packages = ["jq"]
|
||||
tags = merge({
|
||||
"Project Name" : var.project_name
|
||||
"Project" : "Enos",
|
||||
"Environment" : "ci"
|
||||
}, var.tags)
|
||||
vault_instance_types = {
|
||||
amd64 = "t3a.small"
|
||||
arm64 = "t4g.small"
|
||||
}
|
||||
vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch])
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_install_dir_packages = {
|
||||
rhel = "/bin"
|
||||
ubuntu = "/usr/bin"
|
||||
}
|
||||
vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro]
|
||||
vault_tag_key = "Type" // enos_vault_start expects Type as the tag key
|
||||
}
|
||||
|
||||
step "get_local_metadata" {
|
||||
@ -92,29 +93,19 @@ scenario "smoke" {
|
||||
artifact_type = matrix.artifact_type
|
||||
distro = matrix.artifact_source == "artifactory" ? matrix.distro : null
|
||||
edition = matrix.artifact_source == "artifactory" ? matrix.edition : null
|
||||
instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null
|
||||
revision = var.vault_revision
|
||||
}
|
||||
}
|
||||
|
||||
step "find_azs" {
|
||||
module = module.az_finder
|
||||
|
||||
variables {
|
||||
instance_type = [
|
||||
var.backend_instance_type,
|
||||
local.vault_instance_type
|
||||
]
|
||||
}
|
||||
step "ec2_info" {
|
||||
module = module.ec2_info
|
||||
}
|
||||
|
||||
step "create_vpc" {
|
||||
module = module.create_vpc
|
||||
|
||||
variables {
|
||||
ami_architectures = distinct([matrix.arch, "amd64"])
|
||||
availability_zones = step.find_azs.availability_zones
|
||||
common_tags = local.tags
|
||||
common_tags = local.tags
|
||||
}
|
||||
}
|
||||
|
||||
@ -127,29 +118,8 @@ scenario "smoke" {
|
||||
}
|
||||
}
|
||||
|
||||
step "create_backend_cluster" {
|
||||
module = "backend_${matrix.backend}"
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
enos = provider.enos.ubuntu
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"]
|
||||
common_tags = local.tags
|
||||
consul_release = {
|
||||
edition = var.backend_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
instance_type = var.backend_instance_type
|
||||
kms_key_arn = step.create_vpc.kms_key_arn
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_vault_cluster_targets" {
|
||||
module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
@ -157,15 +127,54 @@ scenario "smoke" {
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch]
|
||||
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_tag_key = local.vault_tag_key
|
||||
common_tags = local.tags
|
||||
instance_type = local.vault_instance_type // only used for on-demand instances
|
||||
spot_price_max = local.spot_price_max[matrix.distro]
|
||||
max_price = local.max_price[matrix.distro]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_vault_cluster_backend_targets" {
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
enos = provider.enos.ubuntu
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_tag_key = local.backend_tag_key
|
||||
common_tags = local.tags
|
||||
max_price = local.max_price["ubuntu"]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_backend_cluster" {
|
||||
module = "backend_${matrix.backend}"
|
||||
depends_on = [
|
||||
step.create_vault_cluster_backend_targets,
|
||||
]
|
||||
|
||||
providers = {
|
||||
enos = provider.enos.ubuntu
|
||||
}
|
||||
|
||||
variables {
|
||||
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = local.backend_tag_key
|
||||
release = {
|
||||
edition = var.backend_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
target_hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
step "create_vault_cluster" {
|
||||
module = module.vault_cluster
|
||||
depends_on = [
|
||||
@ -179,14 +188,16 @@ scenario "smoke" {
|
||||
}
|
||||
|
||||
variables {
|
||||
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag
|
||||
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
backend_cluster_tag_key = local.backend_tag_key
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
consul_release = matrix.backend == "consul" ? {
|
||||
edition = var.backend_edition
|
||||
version = matrix.consul_version
|
||||
} : null
|
||||
enable_file_audit_device = var.vault_enable_file_audit_device
|
||||
install_dir = local.vault_install_dir
|
||||
license = matrix.edition != "oss" ? step.read_license.license : null
|
||||
local_artifact_path = local.bundle_path
|
||||
@ -194,7 +205,6 @@ scenario "smoke" {
|
||||
storage_backend = matrix.backend
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
unseal_method = matrix.seal
|
||||
enable_file_audit_device = var.vault_enable_file_audit_device
|
||||
}
|
||||
}
|
||||
|
||||
@ -328,6 +338,11 @@ scenario "smoke" {
|
||||
}
|
||||
}
|
||||
|
||||
output "audit_device_file_path" {
|
||||
description = "The file path for the file audit device, if enabled"
|
||||
value = step.create_vault_cluster.audit_device_file_path
|
||||
}
|
||||
|
||||
output "awskms_unseal_key_arn" {
|
||||
description = "The Vault cluster KMS key arn"
|
||||
value = step.create_vpc.kms_key_arn
|
||||
@ -382,9 +397,4 @@ scenario "smoke" {
|
||||
description = "The Vault cluster unseal keys hex"
|
||||
value = step.create_vault_cluster.unseal_keys_hex
|
||||
}
|
||||
|
||||
output "vault_audit_device_file_path" {
|
||||
description = "The file path for the file audit device, if enabled"
|
||||
value = step.create_vault_cluster.audit_device_file_path
|
||||
}
|
||||
}
|
||||
|
||||
@ -15,33 +15,35 @@ scenario "ui" {
|
||||
]
|
||||
|
||||
locals {
|
||||
arch = "amd64"
|
||||
distro = "ubuntu"
|
||||
seal = "awskms"
|
||||
artifact_type = "bundle"
|
||||
consul_version = "1.14.2"
|
||||
arch = "amd64"
|
||||
artifact_type = "bundle"
|
||||
backend_tag_key = "VaultStorage"
|
||||
build_tags = {
|
||||
"oss" = ["ui"]
|
||||
"ent" = ["ui", "enterprise", "ent"]
|
||||
}
|
||||
bundle_path = abspath(var.vault_bundle_path)
|
||||
bundle_path = abspath(var.vault_bundle_path)
|
||||
distro = "ubuntu"
|
||||
consul_version = "1.14.2"
|
||||
max_price = {
|
||||
// These prices are based on on-demand cost for t3.large in us-east
|
||||
"rhel" = "0.1432"
|
||||
"ubuntu" = "0.0832"
|
||||
}
|
||||
seal = "awskms"
|
||||
tags = merge({
|
||||
"Project Name" : var.project_name
|
||||
"Project" : "Enos",
|
||||
"Environment" : "ci"
|
||||
}, var.tags)
|
||||
vault_instance_types = {
|
||||
amd64 = "t3a.small"
|
||||
arm64 = "t4g.small"
|
||||
}
|
||||
vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[local.arch])
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_install_dir_packages = {
|
||||
rhel = "/bin"
|
||||
ubuntu = "/usr/bin"
|
||||
}
|
||||
vault_install_dir = var.vault_install_dir
|
||||
ui_test_filter = var.ui_test_filter != null && try(trimspace(var.ui_test_filter), "") != "" ? var.ui_test_filter : (matrix.edition == "oss") ? "!enterprise" : null
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_tag_key = "Type" // enos_vault_start expects Type as the tag key
|
||||
ui_test_filter = var.ui_test_filter != null && try(trimspace(var.ui_test_filter), "") != "" ? var.ui_test_filter : (matrix.edition == "oss") ? "!enterprise" : null
|
||||
}
|
||||
|
||||
step "get_local_metadata" {
|
||||
@ -62,24 +64,15 @@ scenario "ui" {
|
||||
}
|
||||
}
|
||||
|
||||
step "find_azs" {
|
||||
module = module.az_finder
|
||||
|
||||
variables {
|
||||
instance_type = [
|
||||
var.backend_instance_type,
|
||||
local.vault_instance_type
|
||||
]
|
||||
}
|
||||
step "ec2_info" {
|
||||
module = module.ec2_info
|
||||
}
|
||||
|
||||
step "create_vpc" {
|
||||
module = module.create_vpc
|
||||
|
||||
variables {
|
||||
ami_architectures = [local.arch]
|
||||
availability_zones = step.find_azs.availability_zones
|
||||
common_tags = local.tags
|
||||
common_tags = local.tags
|
||||
}
|
||||
}
|
||||
|
||||
@ -92,8 +85,8 @@ scenario "ui" {
|
||||
}
|
||||
}
|
||||
|
||||
step "create_backend_cluster" {
|
||||
module = "backend_${matrix.backend}"
|
||||
step "create_vault_cluster_targets" {
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
@ -101,20 +94,17 @@ scenario "ui" {
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"]
|
||||
common_tags = local.tags
|
||||
consul_release = {
|
||||
edition = var.backend_edition
|
||||
version = local.consul_version
|
||||
}
|
||||
instance_type = var.backend_instance_type
|
||||
kms_key_arn = step.create_vpc.kms_key_arn
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
ami_id = step.ec2_info.ami_ids[local.arch][local.distro][var.ubuntu_distro_version]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_tag_key = local.vault_tag_key
|
||||
common_tags = local.tags
|
||||
max_price = local.max_price[local.distro]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_vault_cluster_targets" {
|
||||
module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances
|
||||
step "create_vault_cluster_backend_targets" {
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
@ -122,14 +112,36 @@ scenario "ui" {
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids[local.distro][local.arch]
|
||||
ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_tag_key = local.backend_tag_key
|
||||
common_tags = local.tags
|
||||
instance_type = local.vault_instance_type // only used for on-demand instances
|
||||
max_price = local.max_price["ubuntu"]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_backend_cluster" {
|
||||
module = "backend_${matrix.backend}"
|
||||
depends_on = [
|
||||
step.create_vault_cluster_backend_targets,
|
||||
]
|
||||
|
||||
providers = {
|
||||
enos = provider.enos.ubuntu
|
||||
}
|
||||
|
||||
variables {
|
||||
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = local.backend_tag_key
|
||||
release = {
|
||||
edition = var.backend_edition
|
||||
version = local.consul_version
|
||||
}
|
||||
target_hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
step "create_vault_cluster" {
|
||||
module = module.vault_cluster
|
||||
depends_on = [
|
||||
@ -143,23 +155,21 @@ scenario "ui" {
|
||||
}
|
||||
|
||||
variables {
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
config_env_vars = {
|
||||
VAULT_LOG_LEVEL = var.vault_log_level
|
||||
}
|
||||
consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
backend_cluster_tag_key = local.backend_tag_key
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
consul_release = matrix.backend == "consul" ? {
|
||||
edition = var.backend_edition
|
||||
version = local.consul_version
|
||||
} : null
|
||||
enable_file_audit_device = var.vault_enable_file_audit_device
|
||||
install_dir = local.vault_install_dir
|
||||
license = matrix.edition != "oss" ? step.read_license.license : null
|
||||
local_artifact_path = local.bundle_path
|
||||
storage_backend = matrix.backend
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
unseal_method = local.seal
|
||||
enable_file_audit_device = var.vault_enable_file_audit_device
|
||||
}
|
||||
}
|
||||
|
||||
@ -175,6 +185,11 @@ scenario "ui" {
|
||||
}
|
||||
}
|
||||
|
||||
output "audit_device_file_path" {
|
||||
description = "The file path for the file audit device, if enabled"
|
||||
value = step.create_vault_cluster.audit_device_file_path
|
||||
}
|
||||
|
||||
output "awskms_unseal_key_arn" {
|
||||
description = "The Vault cluster KMS key arn"
|
||||
value = step.create_vpc.kms_key_arn
|
||||
@ -200,11 +215,6 @@ scenario "ui" {
|
||||
value = step.create_vault_cluster.public_ips
|
||||
}
|
||||
|
||||
output "root_token" {
|
||||
description = "The Vault cluster root token"
|
||||
value = step.create_vault_cluster.root_token
|
||||
}
|
||||
|
||||
output "recovery_key_shares" {
|
||||
description = "The Vault cluster recovery key shares"
|
||||
value = step.create_vault_cluster.recovery_key_shares
|
||||
@ -220,14 +230,9 @@ scenario "ui" {
|
||||
value = step.create_vault_cluster.recovery_keys_hex
|
||||
}
|
||||
|
||||
output "unseal_keys_b64" {
|
||||
description = "The Vault cluster unseal keys"
|
||||
value = step.create_vault_cluster.unseal_keys_b64
|
||||
}
|
||||
|
||||
output "unseal_keys_hex" {
|
||||
description = "The Vault cluster unseal keys hex"
|
||||
value = step.create_vault_cluster.unseal_keys_hex
|
||||
output "root_token" {
|
||||
description = "The Vault cluster root token"
|
||||
value = step.create_vault_cluster.root_token
|
||||
}
|
||||
|
||||
output "ui_test_environment" {
|
||||
@ -245,8 +250,13 @@ scenario "ui" {
|
||||
value = step.test_ui.ui_test_stdout
|
||||
}
|
||||
|
||||
output "vault_audit_device_file_path" {
|
||||
description = "The file path for the file audit device, if enabled"
|
||||
value = step.create_vault_cluster.audit_device_file_path
|
||||
output "unseal_keys_b64" {
|
||||
description = "The Vault cluster unseal keys"
|
||||
value = step.create_vault_cluster.unseal_keys_b64
|
||||
}
|
||||
|
||||
output "unseal_keys_hex" {
|
||||
description = "The Vault cluster unseal keys hex"
|
||||
value = step.create_vault_cluster.unseal_keys_hex
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,6 +28,7 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
locals {
|
||||
backend_tag_key = "VaultStorage"
|
||||
build_tags = {
|
||||
"oss" = ["ui"]
|
||||
"ent" = ["ui", "enterprise", "ent"]
|
||||
@ -36,32 +37,32 @@ scenario "upgrade" {
|
||||
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
|
||||
}
|
||||
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null
|
||||
packages = ["jq"]
|
||||
distro_version = {
|
||||
"rhel" = var.rhel_distro_version
|
||||
"ubuntu" = var.ubuntu_distro_version
|
||||
}
|
||||
enos_provider = {
|
||||
rhel = provider.enos.rhel
|
||||
ubuntu = provider.enos.ubuntu
|
||||
}
|
||||
spot_price_max = {
|
||||
max_price = {
|
||||
// These prices are based on on-demand cost for t3.large in us-east
|
||||
"rhel" = "0.1432"
|
||||
"ubuntu" = "0.0832"
|
||||
}
|
||||
packages = ["jq"]
|
||||
tags = merge({
|
||||
"Project Name" : var.project_name
|
||||
"Project" : "Enos",
|
||||
"Environment" : "ci"
|
||||
}, var.tags)
|
||||
vault_instance_types = {
|
||||
amd64 = "t3a.small"
|
||||
arm64 = "t4g.small"
|
||||
}
|
||||
vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch])
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_install_dir_packages = {
|
||||
rhel = "/bin"
|
||||
ubuntu = "/usr/bin"
|
||||
}
|
||||
vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro]
|
||||
vault_tag_key = "Type" // enos_vault_start expects Type as the tag key
|
||||
}
|
||||
|
||||
# This step gets/builds the upgrade artifact that we will upgrade to
|
||||
@ -82,29 +83,19 @@ scenario "upgrade" {
|
||||
artifact_type = matrix.artifact_type
|
||||
distro = matrix.artifact_source == "artifactory" ? matrix.distro : null
|
||||
edition = matrix.artifact_source == "artifactory" ? matrix.edition : null
|
||||
instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null
|
||||
revision = var.vault_revision
|
||||
}
|
||||
}
|
||||
|
||||
step "find_azs" {
|
||||
module = module.az_finder
|
||||
|
||||
variables {
|
||||
instance_type = [
|
||||
var.backend_instance_type,
|
||||
local.vault_instance_type,
|
||||
]
|
||||
}
|
||||
step "ec2_info" {
|
||||
module = module.ec2_info
|
||||
}
|
||||
|
||||
step "create_vpc" {
|
||||
module = module.create_vpc
|
||||
|
||||
variables {
|
||||
ami_architectures = distinct([matrix.arch, "amd64"])
|
||||
availability_zones = step.find_azs.availability_zones
|
||||
common_tags = local.tags
|
||||
common_tags = local.tags
|
||||
}
|
||||
}
|
||||
|
||||
@ -122,29 +113,8 @@ scenario "upgrade" {
|
||||
module = module.get_local_metadata
|
||||
}
|
||||
|
||||
step "create_backend_cluster" {
|
||||
module = "backend_${matrix.backend}"
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
enos = provider.enos.ubuntu
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"]
|
||||
common_tags = local.tags
|
||||
consul_release = {
|
||||
edition = var.backend_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
instance_type = var.backend_instance_type
|
||||
kms_key_arn = step.create_vpc.kms_key_arn
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_vault_cluster_targets" {
|
||||
module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
@ -152,15 +122,54 @@ scenario "upgrade" {
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch]
|
||||
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][local.distro_version[matrix.distro]]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_tag_key = local.vault_tag_key
|
||||
common_tags = local.tags
|
||||
instance_type = local.vault_instance_type // only used for on-demand instances
|
||||
spot_price_max = local.spot_price_max[matrix.distro]
|
||||
max_price = local.max_price[matrix.distro]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_vault_cluster_backend_targets" {
|
||||
module = module.target_ec2_spot_fleet
|
||||
depends_on = [step.create_vpc]
|
||||
|
||||
providers = {
|
||||
enos = provider.enos.ubuntu
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_tag_key = local.backend_tag_key
|
||||
common_tags = local.tags
|
||||
max_price = local.max_price["ubuntu"]
|
||||
vpc_id = step.create_vpc.vpc_id
|
||||
}
|
||||
}
|
||||
|
||||
step "create_backend_cluster" {
|
||||
module = "backend_${matrix.backend}"
|
||||
depends_on = [
|
||||
step.create_vault_cluster_backend_targets,
|
||||
]
|
||||
|
||||
providers = {
|
||||
enos = provider.enos.ubuntu
|
||||
}
|
||||
|
||||
variables {
|
||||
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = local.backend_tag_key
|
||||
release = {
|
||||
edition = var.backend_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
target_hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
step "create_vault_cluster" {
|
||||
module = module.vault_cluster
|
||||
depends_on = [
|
||||
@ -174,13 +183,15 @@ scenario "upgrade" {
|
||||
}
|
||||
|
||||
variables {
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag
|
||||
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
|
||||
backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
backend_cluster_tag_key = local.backend_tag_key
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
consul_release = matrix.backend == "consul" ? {
|
||||
edition = var.backend_edition
|
||||
version = matrix.consul_version
|
||||
} : null
|
||||
enable_file_audit_device = var.vault_enable_file_audit_device
|
||||
install_dir = local.vault_install_dir
|
||||
license = matrix.edition != "oss" ? step.read_license.license : null
|
||||
packages = local.packages
|
||||
@ -188,7 +199,6 @@ scenario "upgrade" {
|
||||
storage_backend = matrix.backend
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
unseal_method = matrix.seal
|
||||
enable_file_audit_device = var.vault_enable_file_audit_device
|
||||
}
|
||||
}
|
||||
|
||||
@ -345,6 +355,11 @@ scenario "upgrade" {
|
||||
}
|
||||
}
|
||||
|
||||
output "audit_device_file_path" {
|
||||
description = "The file path for the file audit device, if enabled"
|
||||
value = step.create_vault_cluster.audit_device_file_path
|
||||
}
|
||||
|
||||
output "awskms_unseal_key_arn" {
|
||||
description = "The Vault cluster KMS key arn"
|
||||
value = step.create_vpc.kms_key_arn
|
||||
@ -399,9 +414,4 @@ scenario "upgrade" {
|
||||
description = "The Vault cluster unseal keys hex"
|
||||
value = step.create_vault_cluster.unseal_keys_hex
|
||||
}
|
||||
|
||||
output "vault_audit_device_file_path" {
|
||||
description = "The file path for the file audit device, if enabled"
|
||||
value = step.create_vault_cluster.audit_device_file_path
|
||||
}
|
||||
}
|
||||
|
||||
@ -58,9 +58,9 @@ variable "backend_edition" {
|
||||
}
|
||||
|
||||
variable "backend_instance_type" {
|
||||
description = "The instance type to use for the Vault backend"
|
||||
description = "The instance type to use for the Vault backend. Must be arm64/nitro compatible"
|
||||
type = string
|
||||
default = "t3.small"
|
||||
default = "t4g.small"
|
||||
}
|
||||
|
||||
variable "backend_license_path" {
|
||||
@ -75,12 +75,50 @@ variable "backend_log_level" {
|
||||
default = "trace"
|
||||
}
|
||||
|
||||
variable "operator_instance" {
|
||||
type = string
|
||||
description = "The ip address of the operator (Voter) node"
|
||||
}
|
||||
|
||||
variable "project_name" {
|
||||
description = "The description of the project"
|
||||
type = string
|
||||
default = "vault-enos-integration"
|
||||
}
|
||||
|
||||
variable "remove_vault_instances" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The old vault nodes to be removed"
|
||||
}
|
||||
|
||||
|
||||
variable "ui_test_filter" {
|
||||
type = string
|
||||
description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"<filter>\"'"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "ui_run_tests" {
|
||||
type = bool
|
||||
description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run"
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "vault_enable_file_audit_device" {
|
||||
description = "If true the file audit device will be enabled at the path /var/log/vault_audit.log"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "rhel_distro_version" {
|
||||
description = "The version of RHEL to use"
|
||||
type = string
|
||||
default = "9.1" // or "8.8"
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags that will be applied to infrastructure resources that support tagging"
|
||||
type = map(string)
|
||||
@ -99,6 +137,12 @@ variable "tfc_api_token" {
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "ubuntu_distro_version" {
|
||||
description = "The version of ubuntu to use"
|
||||
type = string
|
||||
default = "22.04" // or "20.04", "18.04"
|
||||
}
|
||||
|
||||
variable "vault_artifact_type" {
|
||||
description = "The Vault artifact type package or bundle"
|
||||
default = "bundle"
|
||||
@ -180,35 +224,3 @@ variable "vault_upgrade_initial_release" {
|
||||
version = "1.10.4"
|
||||
}
|
||||
}
|
||||
|
||||
variable "operator_instance" {
|
||||
type = string
|
||||
description = "The ip address of the operator (Voter) node"
|
||||
}
|
||||
|
||||
variable "remove_vault_instances" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The old vault nodes to be removed"
|
||||
}
|
||||
|
||||
|
||||
variable "ui_test_filter" {
|
||||
type = string
|
||||
description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"<filter>\"'"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "ui_run_tests" {
|
||||
type = bool
|
||||
description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run"
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "vault_enable_file_audit_device" {
|
||||
description = "If true the file audit device will be enabled at the path /var/log/vault_audit.log"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
56
enos/modules/backend_consul/main.tf
Normal file
56
enos/modules/backend_consul/main.tf
Normal file
@ -0,0 +1,56 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
terraform {
|
||||
required_version = ">= 1.2.0"
|
||||
|
||||
required_providers {
|
||||
enos = {
|
||||
source = "app.terraform.io/hashicorp-qti/enos"
|
||||
version = ">= 0.4.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
bin_path = "${var.install_dir}/consul"
|
||||
}
|
||||
|
||||
resource "enos_bundle_install" "consul" {
|
||||
for_each = var.target_hosts
|
||||
|
||||
destination = var.install_dir
|
||||
release = merge(var.release, { product = "consul" })
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = each.value.public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_consul_start" "consul" {
|
||||
for_each = enos_bundle_install.consul
|
||||
|
||||
bin_path = local.bin_path
|
||||
data_dir = var.data_dir
|
||||
config_dir = var.config_dir
|
||||
config = {
|
||||
data_dir = var.data_dir
|
||||
datacenter = "dc1"
|
||||
retry_join = ["provider=aws tag_key=${var.cluster_tag_key} tag_value=${var.cluster_name}"]
|
||||
server = true
|
||||
bootstrap_expect = length(var.target_hosts)
|
||||
log_level = var.log_level
|
||||
log_file = var.log_dir
|
||||
}
|
||||
license = var.license
|
||||
unit_name = "consul"
|
||||
username = "consul"
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.target_hosts[each.key].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
18
enos/modules/backend_consul/outputs.tf
Normal file
18
enos/modules/backend_consul/outputs.tf
Normal file
@ -0,0 +1,18 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
output "private_ips" {
|
||||
description = "Consul cluster target host private_ips"
|
||||
value = [for host in var.target_hosts : host.private_ip]
|
||||
}
|
||||
|
||||
output "public_ips" {
|
||||
description = "Consul cluster target host public_ips"
|
||||
value = [for host in var.target_hosts : host.public_ip]
|
||||
}
|
||||
|
||||
output "target_hosts" {
|
||||
description = "The Consul cluster instances that were created"
|
||||
|
||||
value = var.target_hosts
|
||||
}
|
||||
76
enos/modules/backend_consul/variables.tf
Normal file
76
enos/modules/backend_consul/variables.tf
Normal file
@ -0,0 +1,76 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
description = "The name of the Consul cluster"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "cluster_tag_key" {
|
||||
type = string
|
||||
description = "The tag key for searching for Consul nodes"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "config_dir" {
|
||||
type = string
|
||||
description = "The directory where the consul will write config files"
|
||||
default = "/etc/consul.d"
|
||||
}
|
||||
|
||||
variable "data_dir" {
|
||||
type = string
|
||||
description = "The directory where the consul will store data"
|
||||
default = "/opt/consul/data"
|
||||
}
|
||||
|
||||
variable "install_dir" {
|
||||
type = string
|
||||
description = "The directory where the consul binary will be installed"
|
||||
default = "/opt/consul/bin"
|
||||
}
|
||||
|
||||
variable "license" {
|
||||
type = string
|
||||
sensitive = true
|
||||
description = "The consul enterprise license"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "log_dir" {
|
||||
type = string
|
||||
description = "The directory where the consul will write log files"
|
||||
default = "/var/log/consul.d"
|
||||
}
|
||||
|
||||
variable "log_level" {
|
||||
type = string
|
||||
description = "The consul service log level"
|
||||
default = "info"
|
||||
|
||||
validation {
|
||||
condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level)
|
||||
error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'."
|
||||
}
|
||||
}
|
||||
|
||||
variable "release" {
|
||||
type = object({
|
||||
version = string
|
||||
edition = string
|
||||
})
|
||||
description = "Consul release version and edition to install from releases.hashicorp.com"
|
||||
default = {
|
||||
version = "1.15.3"
|
||||
edition = "oss"
|
||||
}
|
||||
}
|
||||
|
||||
variable "target_hosts" {
|
||||
description = "The target machines host addresses to use for the consul cluster"
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
}
|
||||
@ -1,49 +1,66 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
// Shim module to handle the fact that Vault doesn't actually need a backend module
|
||||
// Shim module to handle the fact that Vault doesn't actually need a backend module when we use raft.
|
||||
terraform {
|
||||
required_version = ">= 1.2.0"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
}
|
||||
enos = {
|
||||
source = "app.terraform.io/hashicorp-qti/enos"
|
||||
source = "app.terraform.io/hashicorp-qti/enos"
|
||||
version = ">= 0.4.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
variable "ami_id" {
|
||||
default = null
|
||||
}
|
||||
variable "common_tags" {
|
||||
default = null
|
||||
}
|
||||
variable "consul_license" {
|
||||
default = null
|
||||
}
|
||||
variable "consul_release" {
|
||||
default = null
|
||||
}
|
||||
variable "environment" {
|
||||
default = null
|
||||
}
|
||||
variable "instance_type" {
|
||||
default = null
|
||||
}
|
||||
variable "kms_key_arn" {
|
||||
default = null
|
||||
}
|
||||
variable "project_name" {
|
||||
default = null
|
||||
}
|
||||
variable "ssh_aws_keypair" {
|
||||
default = null
|
||||
}
|
||||
variable "vpc_id" {
|
||||
variable "cluster_name" {
|
||||
default = null
|
||||
}
|
||||
|
||||
output "consul_cluster_tag" {
|
||||
value = null
|
||||
variable "cluster_tag_key" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "config_dir" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "data_dir" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "install_dir" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "license" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "log_dir" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "log_level" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "release" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "target_hosts" {
|
||||
default = null
|
||||
}
|
||||
|
||||
output "private_ips" {
|
||||
value = [for host in var.target_hosts : host.private_ip]
|
||||
}
|
||||
|
||||
output "public_ips" {
|
||||
value = [for host in var.target_hosts : host.public_ip]
|
||||
}
|
||||
|
||||
output "target_hosts" {
|
||||
value = var.target_hosts
|
||||
}
|
||||
|
||||
@ -41,9 +41,6 @@ variable "distro" {
|
||||
variable "edition" {
|
||||
default = null
|
||||
}
|
||||
variable "instance_type" {
|
||||
default = null
|
||||
}
|
||||
variable "revision" {
|
||||
default = null
|
||||
}
|
||||
|
||||
@ -50,9 +50,6 @@ variable "distro" {
|
||||
variable "edition" {
|
||||
default = null
|
||||
}
|
||||
variable "instance_type" {
|
||||
default = null
|
||||
}
|
||||
variable "revision" {
|
||||
default = null
|
||||
}
|
||||
|
||||
91
enos/modules/create_vpc/main.tf
Normal file
91
enos/modules/create_vpc/main.tf
Normal file
@ -0,0 +1,91 @@
|
||||
data "aws_region" "current" {}
|
||||
|
||||
resource "random_string" "cluster_id" {
|
||||
length = 8
|
||||
lower = true
|
||||
upper = false
|
||||
numeric = false
|
||||
special = false
|
||||
}
|
||||
|
||||
resource "aws_kms_key" "key" {
|
||||
count = var.create_kms_key ? 1 : 0
|
||||
description = "vault-ci-kms-key"
|
||||
deletion_window_in_days = 7 // 7 is the shortest allowed window
|
||||
}
|
||||
|
||||
resource "aws_kms_alias" "alias" {
|
||||
count = var.create_kms_key ? 1 : 0
|
||||
name = "alias/enos_key-${random_string.cluster_id.result}"
|
||||
target_key_id = aws_kms_key.key[0].key_id
|
||||
}
|
||||
|
||||
resource "aws_vpc" "vpc" {
|
||||
cidr_block = var.cidr
|
||||
enable_dns_hostnames = true
|
||||
enable_dns_support = true
|
||||
|
||||
tags = merge(
|
||||
var.common_tags,
|
||||
{
|
||||
"Name" = var.name
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_subnet" "subnet" {
|
||||
vpc_id = aws_vpc.vpc.id
|
||||
cidr_block = var.cidr
|
||||
map_public_ip_on_launch = true
|
||||
|
||||
tags = merge(
|
||||
var.common_tags,
|
||||
{
|
||||
"Name" = "${var.name}-subnet"
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_internet_gateway" "igw" {
|
||||
vpc_id = aws_vpc.vpc.id
|
||||
|
||||
tags = merge(
|
||||
var.common_tags,
|
||||
{
|
||||
"Name" = "${var.name}-igw"
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_route" "igw" {
|
||||
route_table_id = aws_vpc.vpc.default_route_table_id
|
||||
destination_cidr_block = "0.0.0.0/0"
|
||||
gateway_id = aws_internet_gateway.igw.id
|
||||
}
|
||||
|
||||
resource "aws_security_group" "default" {
|
||||
vpc_id = aws_vpc.vpc.id
|
||||
|
||||
ingress {
|
||||
description = "allow_ingress_from_all"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
egress {
|
||||
description = "allow_egress_from_all"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
tags = merge(
|
||||
var.common_tags,
|
||||
{
|
||||
"Name" = "${var.name}-default"
|
||||
},
|
||||
)
|
||||
}
|
||||
24
enos/modules/create_vpc/outputs.tf
Normal file
24
enos/modules/create_vpc/outputs.tf
Normal file
@ -0,0 +1,24 @@
|
||||
output "aws_region" {
|
||||
description = "AWS Region for resources"
|
||||
value = data.aws_region.current.name
|
||||
}
|
||||
|
||||
output "vpc_id" {
|
||||
description = "Created VPC ID"
|
||||
value = aws_vpc.vpc.id
|
||||
}
|
||||
|
||||
output "vpc_cidr" {
|
||||
description = "CIDR for whole VPC"
|
||||
value = var.cidr
|
||||
}
|
||||
|
||||
output "kms_key_arn" {
|
||||
description = "ARN of the generated KMS key"
|
||||
value = try(aws_kms_key.key[0].arn, null)
|
||||
}
|
||||
|
||||
output "kms_key_alias" {
|
||||
description = "Alias of the generated KMS key"
|
||||
value = try(aws_kms_alias.alias[0].name, null)
|
||||
}
|
||||
29
enos/modules/create_vpc/variables.tf
Normal file
29
enos/modules/create_vpc/variables.tf
Normal file
@ -0,0 +1,29 @@
|
||||
variable "name" {
|
||||
type = string
|
||||
default = "vault-ci"
|
||||
description = "The name of the VPC"
|
||||
}
|
||||
|
||||
variable "cidr" {
|
||||
type = string
|
||||
default = "10.13.0.0/16"
|
||||
description = "CIDR block for the VPC"
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Name of the environment."
|
||||
type = string
|
||||
default = "vault-ci"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
description = "Tags to set for all resources"
|
||||
type = map(string)
|
||||
default = { "Project" : "vault-ci" }
|
||||
}
|
||||
|
||||
variable "create_kms_key" {
|
||||
description = "Whether or not to create an key management service key"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
187
enos/modules/ec2_info/main.tf
Normal file
187
enos/modules/ec2_info/main.tf
Normal file
@ -0,0 +1,187 @@
|
||||
locals {
|
||||
architectures = toset(["arm64", "x86_64"])
|
||||
canonical_owner_id = "099720109477"
|
||||
rhel_owner_id = "309956199498"
|
||||
ids = {
|
||||
"arm64" = {
|
||||
"rhel" = {
|
||||
"8.8" = data.aws_ami.rhel_88["arm64"].id
|
||||
"9.1" = data.aws_ami.rhel_91["arm64"].id
|
||||
}
|
||||
"ubuntu" = {
|
||||
"18.04" = data.aws_ami.ubuntu_1804["arm64"].id
|
||||
"20.04" = data.aws_ami.ubuntu_2004["arm64"].id
|
||||
"22.04" = data.aws_ami.ubuntu_2204["arm64"].id
|
||||
}
|
||||
}
|
||||
"amd64" = {
|
||||
"rhel" = {
|
||||
"7.9" = data.aws_ami.rhel_79.id
|
||||
"8.8" = data.aws_ami.rhel_88["x86_64"].id
|
||||
"9.1" = data.aws_ami.rhel_91["x86_64"].id
|
||||
}
|
||||
"ubuntu" = {
|
||||
"18.04" = data.aws_ami.ubuntu_1804["x86_64"].id
|
||||
"20.04" = data.aws_ami.ubuntu_2004["x86_64"].id
|
||||
"22.04" = data.aws_ami.ubuntu_2204["x86_64"].id
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_ami" "ubuntu_1804" {
|
||||
most_recent = true
|
||||
for_each = local.architectures
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-*-18.04-*-server-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "architecture"
|
||||
values = [each.value]
|
||||
}
|
||||
|
||||
owners = [local.canonical_owner_id]
|
||||
}
|
||||
|
||||
data "aws_ami" "ubuntu_2004" {
|
||||
most_recent = true
|
||||
for_each = local.architectures
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-*-20.04-*-server-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "architecture"
|
||||
values = [each.value]
|
||||
}
|
||||
|
||||
owners = [local.canonical_owner_id]
|
||||
}
|
||||
|
||||
data "aws_ami" "ubuntu_2204" {
|
||||
most_recent = true
|
||||
for_each = local.architectures
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-*-22.04-*-server-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "architecture"
|
||||
values = [each.value]
|
||||
}
|
||||
|
||||
owners = [local.canonical_owner_id]
|
||||
}
|
||||
|
||||
data "aws_ami" "rhel_79" {
|
||||
most_recent = true
|
||||
|
||||
# Currently latest latest point release-1
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["RHEL-7.9*HVM-20*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "architecture"
|
||||
values = ["x86_64"]
|
||||
}
|
||||
|
||||
owners = [local.rhel_owner_id]
|
||||
}
|
||||
|
||||
data "aws_ami" "rhel_88" {
|
||||
most_recent = true
|
||||
for_each = local.architectures
|
||||
|
||||
# Currently latest latest point release-1
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["RHEL-8.8*HVM-20*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "architecture"
|
||||
values = [each.value]
|
||||
}
|
||||
|
||||
owners = [local.rhel_owner_id]
|
||||
}
|
||||
|
||||
data "aws_ami" "rhel_91" {
|
||||
most_recent = true
|
||||
for_each = local.architectures
|
||||
|
||||
# Currently latest latest point release-1
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["RHEL-9.1*HVM-20*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "architecture"
|
||||
values = [each.value]
|
||||
}
|
||||
|
||||
owners = [local.rhel_owner_id]
|
||||
}
|
||||
|
||||
data "aws_region" "current" {}
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
state = "available"
|
||||
|
||||
filter {
|
||||
name = "zone-name"
|
||||
values = ["*"]
|
||||
}
|
||||
}
|
||||
|
||||
output "ami_ids" {
|
||||
value = local.ids
|
||||
}
|
||||
|
||||
output "current_region" {
|
||||
value = data.aws_region.current
|
||||
}
|
||||
|
||||
output "availability_zones" {
|
||||
value = data.aws_availability_zones.available
|
||||
}
|
||||
322
enos/modules/target_ec2_fleet/main.tf
Normal file
322
enos/modules/target_ec2_fleet/main.tf
Normal file
@ -0,0 +1,322 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
# We need to specify the provider source in each module until we publish it
|
||||
# to the public registry
|
||||
enos = {
|
||||
source = "app.terraform.io/hashicorp-qti/enos"
|
||||
version = ">= 0.3.24"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_vpc" "vpc" {
|
||||
id = var.vpc_id
|
||||
}
|
||||
|
||||
data "aws_subnets" "vpc" {
|
||||
filter {
|
||||
name = "vpc-id"
|
||||
values = [var.vpc_id]
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_kms_key" "kms_key" {
|
||||
key_id = var.awskms_unseal_key_arn
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "target" {
|
||||
statement {
|
||||
resources = ["*"]
|
||||
|
||||
actions = [
|
||||
"ec2:DescribeInstances",
|
||||
"secretsmanager:*"
|
||||
]
|
||||
}
|
||||
|
||||
statement {
|
||||
resources = [var.awskms_unseal_key_arn]
|
||||
|
||||
actions = [
|
||||
"kms:DescribeKey",
|
||||
"kms:ListKeys",
|
||||
"kms:Encrypt",
|
||||
"kms:Decrypt",
|
||||
"kms:GenerateDataKey"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "target_role" {
|
||||
statement {
|
||||
actions = ["sts:AssumeRole"]
|
||||
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = ["ec2.amazonaws.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "enos_environment" "localhost" {}
|
||||
|
||||
resource "random_string" "random_cluster_name" {
|
||||
length = 8
|
||||
lower = true
|
||||
upper = false
|
||||
numeric = false
|
||||
special = false
|
||||
}
|
||||
|
||||
resource "random_string" "unique_id" {
|
||||
length = 4
|
||||
lower = true
|
||||
upper = false
|
||||
numeric = false
|
||||
special = false
|
||||
}
|
||||
|
||||
locals {
|
||||
spot_allocation_strategy = "price-capacity-optimized"
|
||||
on_demand_allocation_strategy = "lowestPrice"
|
||||
instances = toset([for idx in range(var.instance_count) : tostring(idx)])
|
||||
cluster_name = coalesce(var.cluster_name, random_string.random_cluster_name.result)
|
||||
name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}"
|
||||
fleet_tag = "${local.name_prefix}-spot-fleet-target"
|
||||
fleet_tags = {
|
||||
Name = "${local.name_prefix}-target"
|
||||
"${var.cluster_tag_key}" = local.cluster_name
|
||||
Fleet = local.fleet_tag
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "target" {
|
||||
name = "${local.name_prefix}-target-role"
|
||||
assume_role_policy = data.aws_iam_policy_document.target_role.json
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "target" {
|
||||
name = "${local.name_prefix}-target-profile"
|
||||
role = aws_iam_role.target.name
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "target" {
|
||||
name = "${local.name_prefix}-target-policy"
|
||||
role = aws_iam_role.target.id
|
||||
policy = data.aws_iam_policy_document.target.json
|
||||
}
|
||||
|
||||
resource "aws_security_group" "target" {
|
||||
name = "${local.name_prefix}-target"
|
||||
description = "Target instance security group"
|
||||
vpc_id = var.vpc_id
|
||||
|
||||
# SSH traffic
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
# Vault traffic
|
||||
ingress {
|
||||
from_port = 8200
|
||||
to_port = 8201
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
formatlist("%s/32", var.ssh_allow_ips)
|
||||
])
|
||||
}
|
||||
|
||||
# Consul traffic
|
||||
ingress {
|
||||
from_port = 8300
|
||||
to_port = 8302
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 8301
|
||||
to_port = 8302
|
||||
protocol = "udp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 8500
|
||||
to_port = 8503
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 8600
|
||||
to_port = 8600
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 8600
|
||||
to_port = 8600
|
||||
protocol = "udp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
# Internal traffic
|
||||
ingress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
self = true
|
||||
}
|
||||
|
||||
# External traffic
|
||||
egress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
tags = merge(
|
||||
var.common_tags,
|
||||
{
|
||||
Name = "${local.name_prefix}-sg"
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_launch_template" "target" {
|
||||
name = "${local.name_prefix}-target"
|
||||
image_id = var.ami_id
|
||||
key_name = var.ssh_keypair
|
||||
|
||||
iam_instance_profile {
|
||||
name = aws_iam_instance_profile.target.name
|
||||
}
|
||||
|
||||
network_interfaces {
|
||||
associate_public_ip_address = true
|
||||
delete_on_termination = true
|
||||
security_groups = [aws_security_group.target.id]
|
||||
}
|
||||
|
||||
tag_specifications {
|
||||
resource_type = "instance"
|
||||
|
||||
tags = merge(
|
||||
var.common_tags,
|
||||
local.fleet_tags,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
# There are three primary knobs we can turn to try and optimize our costs by
|
||||
# using a spot fleet: our min and max instance requirements, our max bid
|
||||
# price, and the allocation strategy to use when fulfilling the spot request.
|
||||
# We've currently configured our instance requirements to allow for anywhere
|
||||
# from 2-4 vCPUs and 4-16GB of RAM. We intentionally have a wide range
|
||||
# to allow for a large instance size pool to be considered. Our next knob is our
|
||||
# max bid price. As we're using spot fleets to save on instance cost, we never
|
||||
# want to pay more for an instance than we were on-demand. We've set the max price
|
||||
# to equal what we pay for t3.medium instances on-demand, which are the smallest
|
||||
# reliable size for Vault scenarios. The final knob is the allocation strategy
|
||||
# that AWS will use when looking for instances that meet our resource and cost
|
||||
# requirements. We're using the "lowestPrice" strategy to get the absolute
|
||||
# cheapest machines that will fit the requirements, but it comes with a slightly
|
||||
# higher capacity risk than say, "capacityOptimized" or "priceCapacityOptimized".
|
||||
# Unless we see capacity issues or instances being shut down then we ought to
|
||||
# stick with that strategy.
|
||||
resource "aws_ec2_fleet" "targets" {
|
||||
terminate_instances = true // termiante instances when we "delete" the fleet
|
||||
tags = merge(
|
||||
var.common_tags,
|
||||
local.fleet_tags,
|
||||
)
|
||||
type = "instant" // make a synchronous request for the entire fleet
|
||||
|
||||
launch_template_config {
|
||||
launch_template_specification {
|
||||
launch_template_id = aws_launch_template.target.id
|
||||
version = aws_launch_template.target.latest_version
|
||||
}
|
||||
|
||||
override {
|
||||
max_price = var.max_price
|
||||
subnet_id = data.aws_subnets.vpc.ids[0]
|
||||
|
||||
instance_requirements {
|
||||
burstable_performance = "included"
|
||||
|
||||
memory_mib {
|
||||
min = var.instance_mem_min
|
||||
max = var.instance_mem_max
|
||||
}
|
||||
|
||||
vcpu_count {
|
||||
min = var.instance_cpu_min
|
||||
max = var.instance_cpu_max
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
on_demand_options {
|
||||
allocation_strategy = local.on_demand_allocation_strategy
|
||||
max_total_price = (var.max_price * var.instance_count)
|
||||
min_target_capacity = var.capacity_type == "on-demand" ? var.instance_count : null
|
||||
// One of these has to be set to enforce our on-demand target capacity minimum
|
||||
single_availability_zone = false
|
||||
single_instance_type = true
|
||||
}
|
||||
|
||||
spot_options {
|
||||
allocation_strategy = local.spot_allocation_strategy
|
||||
// The instance_pools_to_use_count is only valid for the allocation_strategy
|
||||
// lowestPrice. When we are using that strategy we'll want to always set it
|
||||
// to non-zero to avoid rebuilding the fleet on a re-run. For any other strategy
|
||||
// set it to zero to avoid rebuilding the fleet on a re-run.
|
||||
instance_pools_to_use_count = local.spot_allocation_strategy == "lowestPrice" ? 1 : null
|
||||
}
|
||||
|
||||
// Try and provision only spot instances and fall back to on-demand.
|
||||
target_capacity_specification {
|
||||
default_target_capacity_type = var.capacity_type
|
||||
spot_target_capacity = var.capacity_type == "spot" ? var.instance_count : 0
|
||||
on_demand_target_capacity = var.capacity_type == "on-demand" ? var.instance_count : 0
|
||||
target_capacity_unit_type = "units" // units == instance count
|
||||
total_target_capacity = var.instance_count
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_instance" "targets" {
|
||||
depends_on = [
|
||||
aws_ec2_fleet.targets,
|
||||
]
|
||||
for_each = local.instances
|
||||
|
||||
instance_id = aws_ec2_fleet.targets.fleet_instance_set[0].instance_ids[each.key]
|
||||
}
|
||||
11
enos/modules/target_ec2_fleet/outputs.tf
Normal file
11
enos/modules/target_ec2_fleet/outputs.tf
Normal file
@ -0,0 +1,11 @@
|
||||
output "cluster_name" {
|
||||
value = local.cluster_name
|
||||
}
|
||||
|
||||
output "hosts" {
|
||||
description = "The ec2 fleet target hosts"
|
||||
value = { for idx in range(var.instance_count) : idx => {
|
||||
public_ip = data.aws_instance.targets[idx].public_ip
|
||||
private_ip = data.aws_instance.targets[idx].private_ip
|
||||
} }
|
||||
}
|
||||
98
enos/modules/target_ec2_fleet/variables.tf
Normal file
98
enos/modules/target_ec2_fleet/variables.tf
Normal file
@ -0,0 +1,98 @@
|
||||
variable "ami_id" {
|
||||
description = "The machine image identifier"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "awskms_unseal_key_arn" {
|
||||
type = string
|
||||
description = "The AWSKMS key ARN if using the awskms unseal method. If specified the instances will be granted kms permissions to the key"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
description = "A unique cluster identifier"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "cluster_tag_key" {
|
||||
type = string
|
||||
description = "The key name for the cluster tag"
|
||||
default = "TargetCluster"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
description = "Common tags for cloud resources"
|
||||
type = map(string)
|
||||
default = {
|
||||
Project = "Vault"
|
||||
}
|
||||
}
|
||||
|
||||
variable "instance_mem_min" {
|
||||
description = "The minimum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)"
|
||||
type = number
|
||||
default = 4096 // ~4 GB
|
||||
}
|
||||
|
||||
variable "instance_mem_max" {
|
||||
description = "The maximum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)"
|
||||
type = number
|
||||
default = 16385 // ~16 GB
|
||||
}
|
||||
|
||||
variable "instance_cpu_min" {
|
||||
description = "The minimum number of vCPU's for each instance in the fleet"
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "instance_cpu_max" {
|
||||
description = "The maximum number of vCPU's for each instance in the fleet"
|
||||
type = number
|
||||
default = 8 // Unlikely we'll ever get that high due to spot price bid protection
|
||||
}
|
||||
|
||||
variable "instance_count" {
|
||||
description = "The number of target instances to create"
|
||||
type = number
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "max_price" {
|
||||
description = "The maximum hourly price to pay for each target instance"
|
||||
type = string
|
||||
default = "0.0416"
|
||||
}
|
||||
|
||||
variable "project_name" {
|
||||
description = "A unique project name"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ssh_allow_ips" {
|
||||
description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "ssh_keypair" {
|
||||
description = "SSH keypair used to connect to EC2 instances"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "capacity_type" {
|
||||
description = "What capacity type to use for EC2 instances"
|
||||
type = string
|
||||
default = "on-demand"
|
||||
|
||||
validation {
|
||||
condition = contains(["on-demand", "spot"], var.capacity_type)
|
||||
error_message = "The capacity_type must be either 'on-demand' or 'spot'."
|
||||
}
|
||||
}
|
||||
|
||||
variable "vpc_id" {
|
||||
description = "The identifier of the VPC where the target instances will be created"
|
||||
type = string
|
||||
}
|
||||
@ -1,181 +0,0 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
# We need to specify the provider source in each module until we publish it
|
||||
# to the public registry
|
||||
enos = {
|
||||
source = "app.terraform.io/hashicorp-qti/enos"
|
||||
version = ">= 0.3.24"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_vpc" "vpc" {
|
||||
id = var.vpc_id
|
||||
}
|
||||
|
||||
data "aws_subnets" "vpc" {
|
||||
filter {
|
||||
name = "vpc-id"
|
||||
values = [var.vpc_id]
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_kms_key" "kms_key" {
|
||||
key_id = var.awskms_unseal_key_arn
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "target" {
|
||||
statement {
|
||||
resources = ["*"]
|
||||
|
||||
actions = [
|
||||
"ec2:DescribeInstances",
|
||||
"secretsmanager:*"
|
||||
]
|
||||
}
|
||||
|
||||
statement {
|
||||
resources = [var.awskms_unseal_key_arn]
|
||||
|
||||
actions = [
|
||||
"kms:DescribeKey",
|
||||
"kms:ListKeys",
|
||||
"kms:Encrypt",
|
||||
"kms:Decrypt",
|
||||
"kms:GenerateDataKey"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "target_instance_role" {
|
||||
statement {
|
||||
actions = ["sts:AssumeRole"]
|
||||
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = ["ec2.amazonaws.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "enos_environment" "localhost" {}
|
||||
|
||||
resource "random_string" "cluster_name" {
|
||||
length = 8
|
||||
lower = true
|
||||
upper = false
|
||||
numeric = false
|
||||
special = false
|
||||
}
|
||||
|
||||
locals {
|
||||
instances = toset([for idx in range(var.instance_count) : tostring(idx)])
|
||||
cluster_name = coalesce(var.cluster_name, random_string.cluster_name.result)
|
||||
name_prefix = "${var.project_name}-${local.cluster_name}"
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "target_instance_role" {
|
||||
name = "target_instance_role-${random_string.cluster_name.result}"
|
||||
assume_role_policy = data.aws_iam_policy_document.target_instance_role.json
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "target" {
|
||||
name = "${local.name_prefix}-target"
|
||||
role = aws_iam_role.target_instance_role.name
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy" "target" {
|
||||
name = "${local.name_prefix}-target"
|
||||
role = aws_iam_role.target_instance_role.id
|
||||
policy = data.aws_iam_policy_document.target.json
|
||||
}
|
||||
|
||||
resource "aws_security_group" "target" {
|
||||
name = "${local.name_prefix}-target"
|
||||
description = "Target instance security group"
|
||||
vpc_id = var.vpc_id
|
||||
|
||||
# SSH traffic
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
# Vault traffic
|
||||
ingress {
|
||||
from_port = 8200
|
||||
to_port = 8201
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
formatlist("%s/32", var.ssh_allow_ips)
|
||||
])
|
||||
}
|
||||
|
||||
# Consul traffic
|
||||
ingress {
|
||||
from_port = 8301
|
||||
to_port = 8301
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 8301
|
||||
to_port = 8301
|
||||
protocol = "udp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
# Internal traffic
|
||||
ingress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
self = true
|
||||
}
|
||||
|
||||
# External traffic
|
||||
egress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
tags = merge(
|
||||
var.common_tags,
|
||||
{
|
||||
Name = "${local.name_prefix}-sg"
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_instance" "targets" {
|
||||
for_each = local.instances
|
||||
ami = var.ami_id
|
||||
instance_type = var.instance_type
|
||||
vpc_security_group_ids = [aws_security_group.target.id]
|
||||
subnet_id = tolist(data.aws_subnets.vpc.ids)[each.key % length(data.aws_subnets.vpc.ids)]
|
||||
key_name = var.ssh_keypair
|
||||
iam_instance_profile = aws_iam_instance_profile.target.name
|
||||
tags = merge(
|
||||
var.common_tags,
|
||||
{
|
||||
Name = "${local.name_prefix}-target-instance"
|
||||
Type = local.cluster_name
|
||||
},
|
||||
)
|
||||
}
|
||||
@ -1,11 +0,0 @@
|
||||
output "cluster_name" {
|
||||
value = local.cluster_name
|
||||
}
|
||||
|
||||
output "hosts" {
|
||||
description = "The ec2 instance target hosts"
|
||||
value = { for idx in range(var.instance_count) : idx => {
|
||||
public_ip = aws_instance.targets[idx].public_ip
|
||||
private_ip = aws_instance.targets[idx].private_ip
|
||||
} }
|
||||
}
|
||||
@ -1,61 +0,0 @@
|
||||
variable "ami_id" {
|
||||
description = "The machine image identifier"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "awskms_unseal_key_arn" {
|
||||
type = string
|
||||
description = "The AWSKMS key ARN if using the awskms unseal method. If specified the instances will be granted kms permissions to the key"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
description = "A unique cluster identifier"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
description = "Common tags for cloud resources"
|
||||
type = map(string)
|
||||
default = { "Project" : "Enos" }
|
||||
}
|
||||
|
||||
variable "instance_count" {
|
||||
description = "The number of target instances to create"
|
||||
type = number
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
description = "The instance machine type"
|
||||
type = string
|
||||
default = "t3.small"
|
||||
}
|
||||
|
||||
variable "project_name" {
|
||||
description = "A unique project name"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "spot_price_max" {
|
||||
description = "Unused shim variable to match target_ec2_spot_fleet"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "ssh_allow_ips" {
|
||||
description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "ssh_keypair" {
|
||||
description = "SSH keypair used to connect to EC2 instances"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vpc_id" {
|
||||
description = "The identifier of the VPC where the target instances will be created"
|
||||
type = string
|
||||
}
|
||||
@ -144,7 +144,7 @@ data "aws_iam_policy_document" "fleet_role" {
|
||||
|
||||
data "enos_environment" "localhost" {}
|
||||
|
||||
resource "random_string" "cluster_name" {
|
||||
resource "random_string" "random_cluster_name" {
|
||||
length = 8
|
||||
lower = true
|
||||
upper = false
|
||||
@ -163,13 +163,13 @@ resource "random_string" "unique_id" {
|
||||
locals {
|
||||
allocation_strategy = "lowestPrice"
|
||||
instances = toset([for idx in range(var.instance_count) : tostring(idx)])
|
||||
cluster_name = coalesce(var.cluster_name, random_string.cluster_name.result)
|
||||
cluster_name = coalesce(var.cluster_name, random_string.random_cluster_name.result)
|
||||
name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}"
|
||||
fleet_tag = "${local.name_prefix}-spot-fleet-target"
|
||||
fleet_tags = {
|
||||
Name = "${local.name_prefix}-target"
|
||||
Type = local.cluster_name
|
||||
SpotFleet = local.fleet_tag
|
||||
Name = "${local.name_prefix}-target"
|
||||
"${var.cluster_tag_key}" = local.cluster_name
|
||||
Fleet = local.fleet_tag
|
||||
}
|
||||
}
|
||||
|
||||
@ -230,8 +230,8 @@ resource "aws_security_group" "target" {
|
||||
|
||||
# Consul traffic
|
||||
ingress {
|
||||
from_port = 8301
|
||||
to_port = 8301
|
||||
from_port = 8300
|
||||
to_port = 8302
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
@ -241,7 +241,37 @@ resource "aws_security_group" "target" {
|
||||
|
||||
ingress {
|
||||
from_port = 8301
|
||||
to_port = 8301
|
||||
to_port = 8302
|
||||
protocol = "udp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 8500
|
||||
to_port = 8503
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 8600
|
||||
to_port = 8600
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 8600
|
||||
to_port = 8600
|
||||
protocol = "udp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses),
|
||||
@ -334,7 +364,7 @@ resource "aws_spot_fleet_request" "targets" {
|
||||
}
|
||||
|
||||
overrides {
|
||||
spot_price = var.spot_price_max
|
||||
spot_price = var.max_price
|
||||
subnet_id = data.aws_subnets.vpc.ids[0]
|
||||
|
||||
instance_requirements {
|
||||
@ -359,8 +389,14 @@ resource "aws_spot_fleet_request" "targets" {
|
||||
)
|
||||
}
|
||||
|
||||
resource "time_sleep" "wait_for_fulfillment" {
|
||||
depends_on = [aws_spot_fleet_request.targets]
|
||||
create_duration = "2s"
|
||||
}
|
||||
|
||||
data "aws_instances" "targets" {
|
||||
depends_on = [
|
||||
time_sleep.wait_for_fulfillment,
|
||||
aws_spot_fleet_request.targets,
|
||||
]
|
||||
|
||||
|
||||
@ -3,7 +3,7 @@ output "cluster_name" {
|
||||
}
|
||||
|
||||
output "hosts" {
|
||||
description = "The spot fleet target hosts"
|
||||
description = "The ec2 fleet target hosts"
|
||||
value = { for idx in range(var.instance_count) : idx => {
|
||||
public_ip = data.aws_instance.targets[idx].public_ip
|
||||
private_ip = data.aws_instance.targets[idx].private_ip
|
||||
|
||||
@ -15,6 +15,12 @@ variable "cluster_name" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "cluster_tag_key" {
|
||||
type = string
|
||||
description = "The key name for the cluster tag"
|
||||
default = "TargetCluster"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
description = "Common tags for cloud resources"
|
||||
type = map(string)
|
||||
@ -53,22 +59,15 @@ variable "instance_count" {
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
description = "Shim variable for target module variable compatibility that is not used. The spot fleet determines instance sizes"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "project_name" {
|
||||
description = "A unique project name"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "spot_price_max" {
|
||||
variable "max_price" {
|
||||
description = "The maximum hourly price to pay for each target instance"
|
||||
type = string
|
||||
// Current on-demand cost of linux t3.medium in us-east.
|
||||
default = "0.0416"
|
||||
default = "0.0416"
|
||||
}
|
||||
|
||||
variable "ssh_allow_ips" {
|
||||
|
||||
@ -30,7 +30,6 @@ variable "arch" {}
|
||||
variable "artifact_type" {}
|
||||
variable "distro" {}
|
||||
variable "edition" {}
|
||||
variable "instance_type" {}
|
||||
variable "revision" {}
|
||||
variable "product_version" {}
|
||||
variable "build_tags" { default = null }
|
||||
|
||||
@ -12,8 +12,16 @@ terraform {
|
||||
data "enos_environment" "localhost" {}
|
||||
|
||||
locals {
|
||||
bin_path = "${var.install_dir}/vault"
|
||||
consul_bin_path = "${var.consul_install_dir}/consul"
|
||||
audit_device_file_path = "/var/log/vault/vault_audit.log"
|
||||
bin_path = "${var.install_dir}/vault"
|
||||
consul_bin_path = "${var.consul_install_dir}/consul"
|
||||
enable_audit_device = var.enable_file_audit_device && var.initialize_cluster
|
||||
// In order to get Terraform to plan we have to use collections with keys
|
||||
// that are known at plan time. In order for our module to work our var.target_hosts
|
||||
// must be a map with known keys at plan time. Here we're creating locals
|
||||
// that keep track of index values that point to our target hosts.
|
||||
followers = toset(slice(local.instances, 1, length(local.instances)))
|
||||
instances = [for idx in range(length(var.target_hosts)) : tostring(idx)]
|
||||
key_shares = {
|
||||
"awskms" = null
|
||||
"shamir" = 5
|
||||
@ -22,13 +30,7 @@ locals {
|
||||
"awskms" = null
|
||||
"shamir" = 3
|
||||
}
|
||||
// In order to get Terraform to plan we have to use collections with keys
|
||||
// that are known at plan time. In order for our module to work our var.target_hosts
|
||||
// must be a map with known keys at plan time. Here we're creating locals
|
||||
// that keep track of index values that point to our target hosts.
|
||||
followers = toset(slice(local.instances, 1, length(local.instances)))
|
||||
instances = [for idx in range(length(var.target_hosts)) : tostring(idx)]
|
||||
leader = toset(slice(local.instances, 0, 1))
|
||||
leader = toset(slice(local.instances, 0, 1))
|
||||
recovery_shares = {
|
||||
"awskms" = 5
|
||||
"shamir" = null
|
||||
@ -61,9 +63,7 @@ locals {
|
||||
path = "vault"
|
||||
})
|
||||
]
|
||||
audit_device_file_path = "/var/log/vault/vault_audit.log"
|
||||
vault_service_user = "vault"
|
||||
enable_audit_device = var.enable_file_audit_device && var.initialize_cluster
|
||||
vault_service_user = "vault"
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "install_packages" {
|
||||
@ -122,7 +122,7 @@ resource "enos_consul_start" "consul" {
|
||||
config = {
|
||||
data_dir = var.consul_data_dir
|
||||
datacenter = "dc1"
|
||||
retry_join = ["provider=aws tag_key=Type tag_value=${var.consul_cluster_tag}"]
|
||||
retry_join = ["provider=aws tag_key=${var.backend_cluster_tag_key} tag_value=${var.backend_cluster_name}"]
|
||||
server = false
|
||||
bootstrap_expect = 0
|
||||
license = var.consul_license
|
||||
|
||||
@ -1,6 +1,11 @@
|
||||
output "public_ips" {
|
||||
description = "Vault cluster target host public_ips"
|
||||
value = [for host in var.target_hosts : host.public_ip]
|
||||
output "audit_device_file_path" {
|
||||
description = "The file path for the audit device, if enabled"
|
||||
value = var.enable_file_audit_device ? local.audit_device_file_path : "file audit device not enabled"
|
||||
}
|
||||
|
||||
output "cluster_name" {
|
||||
description = "The Vault cluster name"
|
||||
value = var.cluster_name
|
||||
}
|
||||
|
||||
output "private_ips" {
|
||||
@ -8,29 +13,9 @@ output "private_ips" {
|
||||
value = [for host in var.target_hosts : host.private_ip]
|
||||
}
|
||||
|
||||
output "target_hosts" {
|
||||
description = "The vault cluster instances that were created"
|
||||
|
||||
value = var.target_hosts
|
||||
}
|
||||
output "root_token" {
|
||||
value = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none")
|
||||
}
|
||||
|
||||
output "unseal_keys_b64" {
|
||||
value = try(enos_vault_init.leader[0].unseal_keys_b64, [])
|
||||
}
|
||||
|
||||
output "unseal_keys_hex" {
|
||||
value = try(enos_vault_init.leader[0].unseal_keys_hex, null)
|
||||
}
|
||||
|
||||
output "unseal_shares" {
|
||||
value = try(enos_vault_init.leader[0].unseal_keys_shares, -1)
|
||||
}
|
||||
|
||||
output "unseal_threshold" {
|
||||
value = try(enos_vault_init.leader[0].unseal_keys_threshold, -1)
|
||||
output "public_ips" {
|
||||
description = "Vault cluster target host public_ips"
|
||||
value = [for host in var.target_hosts : host.public_ip]
|
||||
}
|
||||
|
||||
output "recovery_keys_b64" {
|
||||
@ -49,12 +34,28 @@ output "recovery_threshold" {
|
||||
value = try(enos_vault_init.leader[0].recovery_keys_threshold, -1)
|
||||
}
|
||||
|
||||
output "cluster_name" {
|
||||
description = "The Vault cluster name"
|
||||
value = var.cluster_name
|
||||
output "root_token" {
|
||||
value = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none")
|
||||
}
|
||||
|
||||
output "audit_device_file_path" {
|
||||
description = "The file path for the audit device, if enabled"
|
||||
value = var.enable_file_audit_device ? local.audit_device_file_path : "file audit device not enabled"
|
||||
output "target_hosts" {
|
||||
description = "The vault cluster instances that were created"
|
||||
|
||||
value = var.target_hosts
|
||||
}
|
||||
|
||||
output "unseal_keys_b64" {
|
||||
value = try(enos_vault_init.leader[0].unseal_keys_b64, [])
|
||||
}
|
||||
|
||||
output "unseal_keys_hex" {
|
||||
value = try(enos_vault_init.leader[0].unseal_keys_hex, null)
|
||||
}
|
||||
|
||||
output "unseal_shares" {
|
||||
value = try(enos_vault_init.leader[0].unseal_keys_shares, -1)
|
||||
}
|
||||
|
||||
output "unseal_threshold" {
|
||||
value = try(enos_vault_init.leader[0].unseal_keys_threshold, -1)
|
||||
}
|
||||
|
||||
@ -15,6 +15,18 @@ variable "awskms_unseal_key_arn" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "backend_cluster_name" {
|
||||
type = string
|
||||
description = "The name of the backend cluster"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "backend_cluster_tag_key" {
|
||||
type = string
|
||||
description = "The tag key for searching for backend nodes"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
description = "The Vault cluster name"
|
||||
@ -33,12 +45,6 @@ variable "config_env_vars" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "consul_cluster_tag" {
|
||||
type = string
|
||||
description = "The retry_join tag to use for Consul"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "consul_data_dir" {
|
||||
type = string
|
||||
description = "The directory where the consul will store data"
|
||||
@ -87,6 +93,12 @@ variable "consul_release" {
|
||||
}
|
||||
}
|
||||
|
||||
variable "enable_file_audit_device" {
|
||||
description = "If true the file audit device will be enabled at the path /var/log/vault_audit.log"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "force_unseal" {
|
||||
type = bool
|
||||
description = "Always unseal the Vault cluster even if we're not initializing it"
|
||||
@ -203,9 +215,3 @@ variable "unseal_method" {
|
||||
error_message = "The unseal_method must be either awskms or shamir. No other unseal methods are supported."
|
||||
}
|
||||
}
|
||||
|
||||
variable "enable_file_audit_device" {
|
||||
description = "If true the file audit device will be enabled at the path /var/log/vault_audit.log"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user