From 4103c6d960a4de55108a5ed457a8528ab7d434ae Mon Sep 17 00:00:00 2001 From: Hamza Shili Date: Wed, 25 Jun 2025 15:28:45 -0700 Subject: [PATCH 01/26] copy enos dir from vault repo --- enos/Makefile | 52 ++ enos/README.md | 259 +++++++ enos/ci/aws-nuke.yml | 409 +++++++++++ enos/ci/bootstrap/main.tf | 69 ++ enos/ci/bootstrap/outputs.tf | 23 + enos/ci/bootstrap/variables.tf | 16 + enos/ci/service-user-iam/main.tf | 242 +++++++ enos/ci/service-user-iam/outputs.tf | 16 + enos/ci/service-user-iam/providers.tf | 22 + enos/ci/service-user-iam/service-quotas.tf | 65 ++ enos/ci/service-user-iam/variables.tf | 11 + enos/enos-dev-variables.hcl | 21 + enos/enos-dynamic-config.hcl | 20 + enos/enos-globals.hcl | 162 +++++ enos/enos-providers.hcl | 26 + enos/enos-qualities.hcl | 666 ++++++++++++++++++ enos/enos-terraform.hcl | 32 + enos/enos-variables.hcl | 219 ++++++ enos/modules/artifact/metadata/main.tf | 229 ++++++ .../autopilot_upgrade_storageconfig/main.tf | 10 + enos/modules/backend_consul/main.tf | 56 ++ enos/modules/backend_consul/outputs.tf | 18 + enos/modules/backend_consul/variables.tf | 77 ++ enos/modules/backend_raft/main.tf | 70 ++ .../build_artifactory_artifact/main.tf | 101 +++ .../modules/build_artifactory_package/main.tf | 115 +++ enos/modules/build_crt/main.tf | 37 + enos/modules/build_local/main.tf | 69 ++ enos/modules/build_local/scripts/build.sh | 24 + enos/modules/choose_follower_host/main.tf | 17 + enos/modules/create_vpc/main.tf | 114 +++ enos/modules/create_vpc/outputs.tf | 22 + enos/modules/create_vpc/variables.tf | 37 + enos/modules/disable_selinux/main.tf | 31 + .../scripts/make-selinux-permissive.sh | 18 + enos/modules/ec2_info/main.tf | 264 +++++++ .../generate_dr_operation_token/main.tf | 82 +++ .../scripts/configure-vault-dr-primary.sh | 50 ++ .../generate_failover_secondary_token/main.tf | 98 +++ .../generate-failover-secondary-token.sh | 33 + .../generate_secondary_public_key/main.tf | 77 ++ enos/modules/generate_secondary_token/main.tf | 86 +++ enos/modules/get_local_metadata/main.tf | 58 ++ .../get_local_metadata/scripts/build_date.sh | 9 + .../get_local_metadata/scripts/version.sh | 97 +++ enos/modules/install_packages/main.tf | 136 ++++ .../install_packages/scripts/add-repos.sh | 84 +++ .../scripts/install-packages.sh | 105 +++ .../scripts/synchronize-repos.sh | 151 ++++ enos/modules/k8s_deploy_vault/main.tf | 165 +++++ enos/modules/k8s_deploy_vault/variables.tf | 42 ++ .../k8s_vault_verify_replication/main.tf | 42 ++ .../scripts/smoke-verify-replication.sh | 27 + .../k8s_vault_verify_replication/variables.tf | 30 + enos/modules/k8s_vault_verify_ui/main.tf | 45 ++ .../scripts/smoke-verify-ui.sh | 17 + enos/modules/k8s_vault_verify_ui/variables.tf | 25 + enos/modules/k8s_vault_verify_version/main.tf | 51 ++ .../scripts/get-status.sh | 10 + .../scripts/smoke-verify-version.sh | 45 ++ .../k8s_vault_verify_version/variables.tf | 62 ++ .../k8s_vault_verify_write_data/main.tf | 53 ++ .../k8s_vault_verify_write_data/variables.tf | 36 + enos/modules/load_docker_image/main.tf | 53 ++ enos/modules/local_kind_cluster/main.tf | 53 ++ enos/modules/read_license/main.tf | 8 + enos/modules/replication_data/main.tf | 51 ++ enos/modules/restart_vault/main.tf | 51 ++ .../restart_vault/scripts/restart-vault.sh | 48 ++ enos/modules/seal_awskms/main.tf | 68 ++ enos/modules/seal_pkcs11/main.tf | 133 ++++ enos/modules/seal_shamir/main.tf | 27 + enos/modules/shutdown_multiple_nodes/main.tf | 29 + enos/modules/shutdown_node/main.tf | 29 + .../modules/softhsm_create_vault_keys/main.tf | 129 ++++ .../scripts/create-keys.sh | 82 +++ .../scripts/get-keys.sh | 20 + .../softhsm_distribute_vault_keys/main.tf | 110 +++ .../scripts/distribute-token.sh | 31 + enos/modules/softhsm_init/main.tf | 83 +++ .../softhsm_init/scripts/init-softhsm.sh | 30 + enos/modules/softhsm_install/main.tf | 116 +++ .../scripts/find-shared-object.sh | 26 + enos/modules/start_vault/main.tf | 276 ++++++++ enos/modules/start_vault/outputs.tf | 63 ++ enos/modules/start_vault/variables.tf | 193 +++++ enos/modules/stop_vault/main.tf | 39 + enos/modules/target_ec2_fleet/main.tf | 339 +++++++++ enos/modules/target_ec2_fleet/outputs.tf | 15 + enos/modules/target_ec2_fleet/variables.tf | 107 +++ enos/modules/target_ec2_shim/main.tf | 52 ++ enos/modules/target_ec2_spot_fleet/main.tf | 466 ++++++++++++ enos/modules/target_ec2_spot_fleet/outputs.tf | 15 + .../target_ec2_spot_fleet/variables.tf | 96 +++ enos/modules/vault_agent/main.tf | 91 +++ .../scripts/set-up-approle-and-agent.sh | 99 +++ enos/modules/vault_cluster/main.tf | 414 +++++++++++ enos/modules/vault_cluster/outputs.tf | 102 +++ .../scripts/create-audit-log-dir.sh | 40 ++ .../scripts/enable-audit-devices.sh | 53 ++ .../scripts/set-up-login-shell-profile.sh | 57 ++ .../scripts/start-audit-socket-listener.sh | 92 +++ enos/modules/vault_cluster/variables.tf | 291 ++++++++ .../vault_failover_demote_dr_primary/main.tf | 63 ++ .../main.tf | 69 ++ .../vault_failover_update_dr_primary/main.tf | 76 ++ enos/modules/vault_get_cluster_ips/main.tf | 185 +++++ .../scripts/get-follower-ipv4s.sh | 85 +++ .../scripts/get-follower-ipv6s.sh | 87 +++ .../scripts/get-leader-ipv4.sh | 66 ++ .../scripts/get-leader-ipv6.sh | 66 ++ enos/modules/vault_proxy/main.tf | 100 +++ .../scripts/set-up-approle-and-proxy.sh | 86 +++ enos/modules/vault_proxy/scripts/use-proxy.sh | 36 + .../vault_raft_remove_node_and_verify/main.tf | 125 ++++ enos/modules/vault_raft_remove_peer/main.tf | 80 +++ .../scripts/raft-remove-peer.sh | 48 ++ enos/modules/vault_setup_dr_primary/main.tf | 61 ++ .../vault_setup_dr_primary/scripts/enable.sh | 17 + enos/modules/vault_setup_perf_primary/main.tf | 60 ++ .../scripts/configure-vault-pr-primary.sh | 17 + .../vault_setup_replication_secondary/main.tf | 114 +++ .../scripts/wait-for-leader-ready.sh | 65 ++ enos/modules/vault_step_down/main.tf | 50 ++ .../scripts/operator-step-down.sh | 19 + enos/modules/vault_test_ui/main.tf | 34 + enos/modules/vault_test_ui/outputs.tf | 15 + enos/modules/vault_test_ui/scripts/test_ui.sh | 12 + enos/modules/vault_test_ui/variables.tf | 34 + .../main.tf | 129 ++++ .../scripts/unseal-node.sh | 37 + .../scripts/wait-until-sealed.sh | 29 + enos/modules/vault_upgrade/main.tf | 195 +++++ .../scripts/maybe-remove-old-unit-file.sh | 39 + .../modules/vault_verify_agent_output/main.tf | 44 ++ .../scripts/verify-vault-agent-output.sh | 15 + enos/modules/vault_verify_autopilot/main.tf | 64 ++ .../scripts/smoke-verify-autopilot.sh | 42 ++ .../vault_verify_billing_start_date/main.tf | 64 ++ .../scripts/verify-billing-start.sh | 98 +++ enos/modules/vault_verify_default_lcq/main.tf | 66 ++ .../scripts/smoke-verify-default-lcq.sh | 49 ++ .../vault_verify_dr_replication/main.tf | 117 +++ .../scripts/verify-replication-status.sh | 89 +++ .../main.tf | 117 +++ .../scripts/verify-replication-status.sh | 97 +++ .../vault_verify_raft_auto_join_voter/main.tf | 76 ++ .../scripts/verify-raft-auto-join-voter.sh | 49 ++ .../modules/vault_verify_removed_node/main.tf | 246 +++++++ .../scripts/verify_manual_rejoin_fails.sh | 21 + .../scripts/verify_raft_remove_peer.sh | 60 ++ .../scripts/verify_unseal_fails.sh | 22 + .../vault_verify_removed_node_shim/main.tf | 89 +++ enos/modules/vault_verify_replication/main.tf | 48 ++ .../scripts/smoke-verify-replication.sh | 28 + enos/modules/vault_verify_ui/main.tf | 41 ++ .../scripts/smoke-verify-ui.sh | 20 + enos/modules/vault_verify_undo_logs/main.tf | 77 ++ .../scripts/smoke-verify-undo-logs.sh | 35 + enos/modules/vault_verify_version/main.tf | 100 +++ .../scripts/verify-cli-version.sh | 55 ++ .../scripts/verify-cluster-version.sh | 37 + .../vault_wait_for_cluster_unsealed/main.tf | 62 ++ .../scripts/verify-vault-node-unsealed.sh | 58 ++ enos/modules/vault_wait_for_leader/main.tf | 82 +++ .../scripts/wait-for-leader.sh | 96 +++ .../vault_wait_for_seal_rewrap/main.tf | 78 ++ .../scripts/wait-for-seal-rewrap.sh | 72 ++ enos/modules/verify_log_secrets/main.tf | 96 +++ .../scripts/scan_logs_for_secrets.sh | 72 ++ enos/modules/verify_seal_type/main.tf | 54 ++ .../scripts/verify-seal-type.sh | 37 + .../modules/create/auth.tf | 233 ++++++ .../modules/create/aws.tf | 21 + .../modules/create/aws/aws.tf | 158 +++++ .../modules/create/identity.tf | 380 ++++++++++ .../modules/create/kv.tf | 131 ++++ .../modules/create/main.tf | 68 ++ .../modules/create/pki.tf | 69 ++ .../modules/read/auth.tf | 24 + .../modules/read/aws.tf | 15 + .../modules/read/aws/aws.tf | 69 ++ .../modules/read/identity.tf | 56 ++ .../verify_secrets_engines/modules/read/kv.tf | 25 + .../modules/read/main.tf | 67 ++ .../modules/read/pki.tf | 31 + .../scripts/auth-enable.sh | 22 + .../scripts/auth-ldap-write.sh | 36 + .../scripts/auth-userpass-login.sh | 22 + .../scripts/auth-userpass-write.sh | 24 + .../scripts/aws-generate-roles.sh | 63 ++ .../scripts/aws-verify-new-creds.sh | 52 ++ .../scripts/identity-oidc-introspect-token.sh | 33 + .../scripts/identity-verify-entity.sh | 43 ++ .../scripts/identity-verify-oidc.sh | 63 ++ .../verify_secrets_engines/scripts/kv-put.sh | 25 + .../scripts/kv-verify-value.sh | 33 + .../scripts/pki-issue-certificates.sh | 61 ++ .../scripts/pki-verify-certificates.sh | 109 +++ .../scripts/policy-write.sh | 22 + .../verify_secrets_engines/scripts/read.sh | 21 + .../scripts/secrets-enable.sh | 22 + .../scripts/write-payload.sh | 26 + 203 files changed, 16128 insertions(+) create mode 100644 enos/Makefile create mode 100644 enos/README.md create mode 100644 enos/ci/aws-nuke.yml create mode 100644 enos/ci/bootstrap/main.tf create mode 100644 enos/ci/bootstrap/outputs.tf create mode 100644 enos/ci/bootstrap/variables.tf create mode 100644 enos/ci/service-user-iam/main.tf create mode 100644 enos/ci/service-user-iam/outputs.tf create mode 100644 enos/ci/service-user-iam/providers.tf create mode 100644 enos/ci/service-user-iam/service-quotas.tf create mode 100644 enos/ci/service-user-iam/variables.tf create mode 100644 enos/enos-dev-variables.hcl create mode 100644 enos/enos-dynamic-config.hcl create mode 100644 enos/enos-globals.hcl create mode 100644 enos/enos-providers.hcl create mode 100644 enos/enos-qualities.hcl create mode 100644 enos/enos-terraform.hcl create mode 100644 enos/enos-variables.hcl create mode 100644 enos/modules/artifact/metadata/main.tf create mode 100644 enos/modules/autopilot_upgrade_storageconfig/main.tf create mode 100644 enos/modules/backend_consul/main.tf create mode 100644 enos/modules/backend_consul/outputs.tf create mode 100644 enos/modules/backend_consul/variables.tf create mode 100644 enos/modules/backend_raft/main.tf create mode 100644 enos/modules/build_artifactory_artifact/main.tf create mode 100644 enos/modules/build_artifactory_package/main.tf create mode 100644 enos/modules/build_crt/main.tf create mode 100644 enos/modules/build_local/main.tf create mode 100755 enos/modules/build_local/scripts/build.sh create mode 100644 enos/modules/choose_follower_host/main.tf create mode 100644 enos/modules/create_vpc/main.tf create mode 100644 enos/modules/create_vpc/outputs.tf create mode 100644 enos/modules/create_vpc/variables.tf create mode 100644 enos/modules/disable_selinux/main.tf create mode 100644 enos/modules/disable_selinux/scripts/make-selinux-permissive.sh create mode 100644 enos/modules/ec2_info/main.tf create mode 100644 enos/modules/generate_dr_operation_token/main.tf create mode 100755 enos/modules/generate_dr_operation_token/scripts/configure-vault-dr-primary.sh create mode 100644 enos/modules/generate_failover_secondary_token/main.tf create mode 100644 enos/modules/generate_failover_secondary_token/scripts/generate-failover-secondary-token.sh create mode 100644 enos/modules/generate_secondary_public_key/main.tf create mode 100644 enos/modules/generate_secondary_token/main.tf create mode 100644 enos/modules/get_local_metadata/main.tf create mode 100755 enos/modules/get_local_metadata/scripts/build_date.sh create mode 100755 enos/modules/get_local_metadata/scripts/version.sh create mode 100644 enos/modules/install_packages/main.tf create mode 100644 enos/modules/install_packages/scripts/add-repos.sh create mode 100644 enos/modules/install_packages/scripts/install-packages.sh create mode 100644 enos/modules/install_packages/scripts/synchronize-repos.sh create mode 100644 enos/modules/k8s_deploy_vault/main.tf create mode 100644 enos/modules/k8s_deploy_vault/variables.tf create mode 100644 enos/modules/k8s_vault_verify_replication/main.tf create mode 100755 enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh create mode 100644 enos/modules/k8s_vault_verify_replication/variables.tf create mode 100644 enos/modules/k8s_vault_verify_ui/main.tf create mode 100755 enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh create mode 100644 enos/modules/k8s_vault_verify_ui/variables.tf create mode 100644 enos/modules/k8s_vault_verify_version/main.tf create mode 100755 enos/modules/k8s_vault_verify_version/scripts/get-status.sh create mode 100755 enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh create mode 100644 enos/modules/k8s_vault_verify_version/variables.tf create mode 100644 enos/modules/k8s_vault_verify_write_data/main.tf create mode 100644 enos/modules/k8s_vault_verify_write_data/variables.tf create mode 100644 enos/modules/load_docker_image/main.tf create mode 100644 enos/modules/local_kind_cluster/main.tf create mode 100644 enos/modules/read_license/main.tf create mode 100644 enos/modules/replication_data/main.tf create mode 100644 enos/modules/restart_vault/main.tf create mode 100644 enos/modules/restart_vault/scripts/restart-vault.sh create mode 100644 enos/modules/seal_awskms/main.tf create mode 100644 enos/modules/seal_pkcs11/main.tf create mode 100644 enos/modules/seal_shamir/main.tf create mode 100644 enos/modules/shutdown_multiple_nodes/main.tf create mode 100644 enos/modules/shutdown_node/main.tf create mode 100644 enos/modules/softhsm_create_vault_keys/main.tf create mode 100644 enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh create mode 100644 enos/modules/softhsm_create_vault_keys/scripts/get-keys.sh create mode 100644 enos/modules/softhsm_distribute_vault_keys/main.tf create mode 100644 enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh create mode 100644 enos/modules/softhsm_init/main.tf create mode 100644 enos/modules/softhsm_init/scripts/init-softhsm.sh create mode 100644 enos/modules/softhsm_install/main.tf create mode 100644 enos/modules/softhsm_install/scripts/find-shared-object.sh create mode 100644 enos/modules/start_vault/main.tf create mode 100644 enos/modules/start_vault/outputs.tf create mode 100644 enos/modules/start_vault/variables.tf create mode 100644 enos/modules/stop_vault/main.tf create mode 100644 enos/modules/target_ec2_fleet/main.tf create mode 100644 enos/modules/target_ec2_fleet/outputs.tf create mode 100644 enos/modules/target_ec2_fleet/variables.tf create mode 100644 enos/modules/target_ec2_shim/main.tf create mode 100644 enos/modules/target_ec2_spot_fleet/main.tf create mode 100644 enos/modules/target_ec2_spot_fleet/outputs.tf create mode 100644 enos/modules/target_ec2_spot_fleet/variables.tf create mode 100644 enos/modules/vault_agent/main.tf create mode 100644 enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh create mode 100644 enos/modules/vault_cluster/main.tf create mode 100644 enos/modules/vault_cluster/outputs.tf create mode 100755 enos/modules/vault_cluster/scripts/create-audit-log-dir.sh create mode 100644 enos/modules/vault_cluster/scripts/enable-audit-devices.sh create mode 100644 enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh create mode 100644 enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh create mode 100644 enos/modules/vault_cluster/variables.tf create mode 100644 enos/modules/vault_failover_demote_dr_primary/main.tf create mode 100644 enos/modules/vault_failover_promote_dr_secondary/main.tf create mode 100644 enos/modules/vault_failover_update_dr_primary/main.tf create mode 100644 enos/modules/vault_get_cluster_ips/main.tf create mode 100644 enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv4s.sh create mode 100644 enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv6s.sh create mode 100644 enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv4.sh create mode 100644 enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv6.sh create mode 100644 enos/modules/vault_proxy/main.tf create mode 100644 enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh create mode 100644 enos/modules/vault_proxy/scripts/use-proxy.sh create mode 100644 enos/modules/vault_raft_remove_node_and_verify/main.tf create mode 100644 enos/modules/vault_raft_remove_peer/main.tf create mode 100644 enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh create mode 100644 enos/modules/vault_setup_dr_primary/main.tf create mode 100644 enos/modules/vault_setup_dr_primary/scripts/enable.sh create mode 100644 enos/modules/vault_setup_perf_primary/main.tf create mode 100644 enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh create mode 100644 enos/modules/vault_setup_replication_secondary/main.tf create mode 100644 enos/modules/vault_setup_replication_secondary/scripts/wait-for-leader-ready.sh create mode 100644 enos/modules/vault_step_down/main.tf create mode 100644 enos/modules/vault_step_down/scripts/operator-step-down.sh create mode 100644 enos/modules/vault_test_ui/main.tf create mode 100644 enos/modules/vault_test_ui/outputs.tf create mode 100755 enos/modules/vault_test_ui/scripts/test_ui.sh create mode 100644 enos/modules/vault_test_ui/variables.tf create mode 100644 enos/modules/vault_unseal_replication_followers/main.tf create mode 100755 enos/modules/vault_unseal_replication_followers/scripts/unseal-node.sh create mode 100644 enos/modules/vault_unseal_replication_followers/scripts/wait-until-sealed.sh create mode 100644 enos/modules/vault_upgrade/main.tf create mode 100644 enos/modules/vault_upgrade/scripts/maybe-remove-old-unit-file.sh create mode 100644 enos/modules/vault_verify_agent_output/main.tf create mode 100644 enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh create mode 100644 enos/modules/vault_verify_autopilot/main.tf create mode 100755 enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh create mode 100644 enos/modules/vault_verify_billing_start_date/main.tf create mode 100644 enos/modules/vault_verify_billing_start_date/scripts/verify-billing-start.sh create mode 100644 enos/modules/vault_verify_default_lcq/main.tf create mode 100755 enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh create mode 100644 enos/modules/vault_verify_dr_replication/main.tf create mode 100644 enos/modules/vault_verify_dr_replication/scripts/verify-replication-status.sh create mode 100644 enos/modules/vault_verify_performance_replication/main.tf create mode 100644 enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh create mode 100644 enos/modules/vault_verify_raft_auto_join_voter/main.tf create mode 100644 enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh create mode 100644 enos/modules/vault_verify_removed_node/main.tf create mode 100644 enos/modules/vault_verify_removed_node/scripts/verify_manual_rejoin_fails.sh create mode 100755 enos/modules/vault_verify_removed_node/scripts/verify_raft_remove_peer.sh create mode 100644 enos/modules/vault_verify_removed_node/scripts/verify_unseal_fails.sh create mode 100644 enos/modules/vault_verify_removed_node_shim/main.tf create mode 100644 enos/modules/vault_verify_replication/main.tf create mode 100644 enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh create mode 100644 enos/modules/vault_verify_ui/main.tf create mode 100644 enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh create mode 100644 enos/modules/vault_verify_undo_logs/main.tf create mode 100644 enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh create mode 100644 enos/modules/vault_verify_version/main.tf create mode 100644 enos/modules/vault_verify_version/scripts/verify-cli-version.sh create mode 100644 enos/modules/vault_verify_version/scripts/verify-cluster-version.sh create mode 100644 enos/modules/vault_wait_for_cluster_unsealed/main.tf create mode 100644 enos/modules/vault_wait_for_cluster_unsealed/scripts/verify-vault-node-unsealed.sh create mode 100644 enos/modules/vault_wait_for_leader/main.tf create mode 100644 enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh create mode 100644 enos/modules/vault_wait_for_seal_rewrap/main.tf create mode 100644 enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh create mode 100644 enos/modules/verify_log_secrets/main.tf create mode 100644 enos/modules/verify_log_secrets/scripts/scan_logs_for_secrets.sh create mode 100644 enos/modules/verify_seal_type/main.tf create mode 100644 enos/modules/verify_seal_type/scripts/verify-seal-type.sh create mode 100644 enos/modules/verify_secrets_engines/modules/create/auth.tf create mode 100644 enos/modules/verify_secrets_engines/modules/create/aws.tf create mode 100644 enos/modules/verify_secrets_engines/modules/create/aws/aws.tf create mode 100644 enos/modules/verify_secrets_engines/modules/create/identity.tf create mode 100644 enos/modules/verify_secrets_engines/modules/create/kv.tf create mode 100644 enos/modules/verify_secrets_engines/modules/create/main.tf create mode 100644 enos/modules/verify_secrets_engines/modules/create/pki.tf create mode 100644 enos/modules/verify_secrets_engines/modules/read/auth.tf create mode 100644 enos/modules/verify_secrets_engines/modules/read/aws.tf create mode 100644 enos/modules/verify_secrets_engines/modules/read/aws/aws.tf create mode 100644 enos/modules/verify_secrets_engines/modules/read/identity.tf create mode 100644 enos/modules/verify_secrets_engines/modules/read/kv.tf create mode 100644 enos/modules/verify_secrets_engines/modules/read/main.tf create mode 100644 enos/modules/verify_secrets_engines/modules/read/pki.tf create mode 100644 enos/modules/verify_secrets_engines/scripts/auth-enable.sh create mode 100644 enos/modules/verify_secrets_engines/scripts/auth-ldap-write.sh create mode 100644 enos/modules/verify_secrets_engines/scripts/auth-userpass-login.sh create mode 100644 enos/modules/verify_secrets_engines/scripts/auth-userpass-write.sh create mode 100755 enos/modules/verify_secrets_engines/scripts/aws-generate-roles.sh create mode 100755 enos/modules/verify_secrets_engines/scripts/aws-verify-new-creds.sh create mode 100644 enos/modules/verify_secrets_engines/scripts/identity-oidc-introspect-token.sh create mode 100644 enos/modules/verify_secrets_engines/scripts/identity-verify-entity.sh create mode 100644 enos/modules/verify_secrets_engines/scripts/identity-verify-oidc.sh create mode 100644 enos/modules/verify_secrets_engines/scripts/kv-put.sh create mode 100644 enos/modules/verify_secrets_engines/scripts/kv-verify-value.sh create mode 100755 enos/modules/verify_secrets_engines/scripts/pki-issue-certificates.sh create mode 100755 enos/modules/verify_secrets_engines/scripts/pki-verify-certificates.sh create mode 100644 enos/modules/verify_secrets_engines/scripts/policy-write.sh create mode 100644 enos/modules/verify_secrets_engines/scripts/read.sh create mode 100644 enos/modules/verify_secrets_engines/scripts/secrets-enable.sh create mode 100644 enos/modules/verify_secrets_engines/scripts/write-payload.sh diff --git a/enos/Makefile b/enos/Makefile new file mode 100644 index 0000000..24c66eb --- /dev/null +++ b/enos/Makefile @@ -0,0 +1,52 @@ +VAULT_VERSION=$$(cat $(CURDIR)/../version/VERSION) + +.PHONY: default +default: check-fmt shellcheck + +.PHONY: check-fmt +check-fmt: check-fmt-enos check-fmt-modules check-shfmt + +.PHONY: fmt +fmt: fmt-enos fmt-modules shfmt + +.PHONY: check-fmt-enos +check-fmt-enos: + enos fmt --check --diff . + enos fmt --check --diff ./k8s + +.PHONY: fmt-enos +fmt-enos: + enos fmt . + enos fmt ./k8s + +.PHONY: gen-enos +gen-enos: + pushd ../tools/pipeline &> /dev/null && go run ./... generate enos-dynamic-config -d ../../enos -f enos-dynamic-config.hcl -e ce -v $(VAULT_VERSION) -n 3 --log info && popd &> /dev/null + +.PHONY: check-fmt-modules +check-fmt-modules: + terraform fmt -check -diff -recursive ./modules + +.PHONY: fmt-modules +fmt-modules: + terraform fmt -diff -recursive ./modules + +.PHONY: validate-enos +validate-enos: + enos scenario validate --timeout 30m0s --chdir ./k8s + enos scenario validate --timeout 30m0s + +.PHONY: lint +lint: check-fmt check-fmt-modules check-shfmt shellcheck validate-enos + +.PHONY: shellcheck +shellcheck: + find ./modules/ -type f -name '*.sh' | xargs shellcheck + +.PHONY: shfmt +shfmt: + find ./modules/ -type f -name '*.sh' | xargs shfmt -l -w -i 2 -bn -ci -kp -sr + +.PHONY: check-shfmt +check-shfmt: + find ./modules/ -type f -name '*.sh' | xargs shfmt -l -d -i 2 -bn -ci -kp -sr diff --git a/enos/README.md b/enos/README.md new file mode 100644 index 0000000..06d14c8 --- /dev/null +++ b/enos/README.md @@ -0,0 +1,259 @@ +# Enos + +Enos is an quality testing framework that allows composing and executing quality +requirement scenarios as code. For Vault, it is currently used to perform +infrastructure integration testing using the artifacts that are created as part +of the `build` workflow. While intended to be executed via Github Actions using +the results of the `build` workflow, scenarios are also executable from a developer +machine that has the requisite dependencies and configuration. + +Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs) +for further information regarding installation, execution or composing Enos scenarios. + +## When to use Enos +Determining whether to use `vault.NewTestCluster()` or Enos for testing a feature +or scenario is ultimately up to the author. Sometimes one, the other, or both +might be appropriate depending on the requirements. Generally, `vault.NewTestCluster()` +is going to give you faster feedback and execution time, whereas Enos is going +to give you a real-world execution and validation of the requirement. Consider +the following cases as examples of when one might opt for an Enos scenario: + +- The feature require third-party integrations. Whether that be networked + dependencies like a real Consul backend, a real KMS key to test awskms + auto-unseal, auto-join discovery using AWS tags, or Cloud hardware KMS's. +- The feature might behave differently under multiple configuration variants + and therefore should be tested with both combinations, e.g. auto-unseal and + manual shamir unseal or replication in HA mode with integrated storage or + Consul storage. +- The scenario requires coordination between multiple targets. For example, + consider the complex lifecycle event of migrating the seal type or storage, + or manually triggering a raft disaster scenario by partitioning the network + between the leader and follower nodes. Or perhaps an auto-pilot upgrade between + a stable version of Vault and our candidate version. +- The scenario has specific deployment strategy requirements. For example, + if we want to add a regression test for an issue that only arises when the + software is deployed in a certain manner. +- The scenario needs to use actual build artifacts that will be promoted + through the pipeline. + +## Requirements +- AWS access. HashiCorp Vault developers should use Doormat. +- Terraform >= 1.7 +- Enos >= v0.0.28. You can [download a release](https://github.com/hashicorp/enos/releases/) or + install it with Homebrew: + ```shell + brew tap hashicorp/tap && brew update && brew install hashicorp/tap/enos + ``` +- An SSH keypair in the AWS region you wish to run the scenario. You can use + Doormat to log in to the AWS console to create or upload an existing keypair. +- A Vault artifact is downloaded from the GHA artifacts when using the `artifact_source:crt` variants, from Artifactory when using `artifact_source:artifactory`, and is built locally from the current branch when using `artifact_source:local` variant. + +## Scenario Variables +In CI, each scenario is executed via Github Actions and has been configured using +environment variable inputs that follow the `ENOS_VAR_varname` pattern. + +For local execution you can specify all the required variables using environment +variables, or you can update `enos.vars.hcl` with values and uncomment the lines. + +Variables that are required: +* `aws_ssh_keypair_name` +* `aws_ssh_private_key_path` +* `vault_bundle_path` +* `vault_license_path` (only required for non-OSS editions) + +See [enos.vars.hcl](./enos.vars.hcl) or [enos-variables.hcl](./enos-variables.hcl) +for further descriptions of the variables. + +Additional variable information can also be found in the [Scenario Outlines](#scenario_outlines) + +## Scenario Outlines +Enos is capable of producing an outline of each scenario that is defined in a given directory. These +scenarios often include a description of what behavior the scenario performs, which variants are +available, and which variables are required. They also provide a step by step breakdown including +which quality requirments are verifiend by a given step. + +You can generate outlines of all scenarios or specify one via it's name. + +From the `enos` directory: +```bash +enos scenario outline smoke +``` + +There are also HTML versions available for an improved reading experience: +```bash +enos scenario outline --format html > index.html +open index.html +``` + +## Executing Scenarios +From the `enos` directory: + +```bash +# List all available scenarios +enos scenario list +# Run the smoke or upgrade scenario with an artifact that is built locally. Make sure +# the local machine has been configured as detailed in the requirements +# section. This will execute the scenario and clean up any resources if successful. +enos scenario run smoke artifact_source:local +enos scenario run upgrade artifact_source:local +# To run the same scenario variants that are run in CI, refer to the scenarios listed +# in json files under .github/enos-run-matrices directory, +# adding `artifact_source:local` to run locally. +enos scenario run smoke backend:consul consul_version:1.12.3 distro:ubuntu seal:awskms artifact_source:local arch:amd64 edition:oss +# Launch an individual scenario but leave infrastructure up after execution +enos scenario launch smoke artifact_source:local +# Check an individual scenario for validity. This is useful during scenario +# authoring and debugging. +enos scenario validate smoke artifact_source:local +# If you've run the tests and desire to see the outputs, such as the URL or +# credentials, you can run the output command to see them. Please note that +# after "run" or destroy there will be no "outputs" as the infrastructure +# will have been destroyed and state cleared. +enos scenario output smoke artifact_source:local +# Explicitly destroy all existing infrastructure +enos scenario destroy smoke artifact_source:local +``` + +Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs) +for further information regarding installation, execution or composing scenarios. + +## UI Tests +The [`ui` scenario](./enos-scenario-ui.hcl) creates a Vault cluster (deployed to AWS) using a version +built from the current checkout of the project. Once the cluster is available the UI acceptance tests +are run in a headless browser. +### Variables +In addition to the required variables that must be set, as described in the [Scenario Variables](#Scenario Variables), +the `ui` scenario has two optional variables: + +**ui_test_filter** - An optional test filter to limit the tests that are run, i.e. `'!enterprise'`. +To set a filter export the variable as follows: +```shell +> export ENOS_VAR_ui_test_filter="some filter" +``` +**ui_run_tests** - An optional boolean variable to run or not run the tests. The default value is true. +Setting this value to false is useful in the case where you want to create a cluster, but run the tests +manually. The section [Running the Tests](#Running the Tests) describes the different ways to run the +'UI' acceptance tests. + +### Running the Tests +The UI tests can be run fully automated or manually. +#### Fully Automated +The following will deploy the cluster, run the tests, and subsequently tear down the cluster: +```shell +> export ENOS_VAR_ui_test_filter="some filter" # <-- optional +> cd enos +> enos scenario ui run edition:oss +``` +#### Manually +The UI tests can be run manually as follows: +```shell +> export ENOS_VAR_ui_test_filter="some filter" # <-- optional +> export ENOS_VAR_ui_run_tests=false +> cd enos +> enos scenario ui launch edition:oss +# once complete the scenario will output a set of environment variables that must be exported. The +# output will look as follows: +export TEST_FILTER='some filter>' \ +export VAULT_ADDR='http://:8200' \ +export VAULT_TOKEN='' \ +export VAULT_UNSEAL_KEYS='["","",""]' +# copy and paste the above into the terminal to export the values +> cd ../ui +> yarn test:enos # run headless +# or +> yarn test:enos -s # run manually in a web browser +# once testing is complete +> cd ../enos +> enos scenario ui destroy edition:oss +``` + +# Variants +Both scenarios support a matrix of variants. In order to achieve broad coverage while +keeping test run time reasonable, the variants executed by the `enos-run` Github +Actions are tailored to maximize variant distribution per scenario. + +## `artifact_source:crt` +This variant is designed for use in Github Actions. The `enos-run.yml` workflow +downloads the artifact built by the `build.yml` workflow, unzips it, and sets the +`vault_bundle_path` to the zip file and the `vault_local_binary_path` to the binary. + +## `artifact_source:local` +This variant is for running the Enos scenario locally. It builds the Vault bundle +from the current branch, placing the bundle at the `vault_bundle_path` and the +unzipped Vault binary at the `vault_local_binary_path`. + +## `artifact_source:artifactory` +This variant is for running the Enos scenario to test an artifact from Artifactory. It requires following Enos variables to be set: +* `artifactory_username` +* `artifactory_token` +* `aws_ssh_keypair_name` +* `aws_ssh_private_key_path` +* `vault_product_version` +* `vault_revision` + +# CI Bootstrap +In order to execute any of the scenarios in this repository, it is first necessary to bootstrap the +CI AWS account with the required permissions, service quotas and supporting AWS resources. There are +two Terraform modules which are used for this purpose, [service-user-iam](./ci/service-user-iam) for +the account permissions, and service quotas and [bootstrap](./ci/bootstrap) for the supporting resources. + +**Supported Regions** - enos scenarios are supported in the following regions: +`"us-east-1", "us-east-2", "us-west-1", "us-west-2"` + +## Bootstrap Process +These steps should be followed to bootstrap this repo for enos scenario execution: + +### Set up CI service user IAM role and Service Quotas +The service user that is used when executing enos scenarios from any GitHub Action workflow must have +a properly configured IAM role granting the access required to create resources in AWS. Additionally, +service quotas need to be adjusted to ensure that normal use of the ci account does not cause any +service quotas to be exceeded. The [service-user-iam](./ci/service-user-iam) module contains the IAM +Policy and Role for that grants this access as well as the service quota increase requests to adjust +the service quotas. This module should be updated whenever a new AWS resource type is required for a +scenario or a service quota limit needs to be increased. Since this is persistent and cannot be created +and destroyed each time a scenario is run, the Terraform state will be managed by Terraform Cloud. +Here are the steps to configure the GitHub Actions service user: + +#### Pre-requisites +- Full access to the CI AWS account is required. + +**Notes:** +- For help with access to Terraform Cloud and the CI Account, contact the QT team on Slack (#team-quality) + for an invite. After receiving an invite to Terraform Cloud, a personal access token can be created + by clicking `User Settings` --> `Tokens` --> `Create an API token`. +- Access to the AWS account can be done via Doormat, at: https://doormat.hashicorp.services/. + - For the vault repo the account is: `vault_ci` and for the vault-enterprise repo, the account is: + `vault-enterprise_ci`. + - Access can be requested by clicking: `Cloud Access` --> `AWS` --> `Request Account Access`. + +1. **Create the Terraform Cloud Workspace** - The name of the workspace to be created depends on the + repository for which it is being created, but the pattern is: `-ci-enos-service-user-iam`, + e.g. `vault-ci-enos-service-user-iam`. It is important that the execution mode for the workspace be set + to `local`. For help on setting up the workspace, contact the QT team on Slack (#team-quality) + + +2. **Execute the Terraform module** +```shell +> cd ./enos/ci/service-user-iam +> export TF_WORKSPACE=-ci-enos-service-user-iam +> export TF_TOKEN_app_terraform_io= +> export TF_VAR_repository= +> terraform init +> terraform plan +> terraform apply -auto-approve +``` + +### Bootstrap the CI resources +Bootstrapping of the resources in the CI account is accomplished via the GitHub Actions workflow: +[enos-bootstrap-ci](../.github/workflows/enos-bootstrap-ci.yml). Before this workflow can be run a +workspace must be created as follows: + +1. **Create the Terraform Cloud Workspace** - The name workspace to be created depends on the repository + for which it is being created, but the pattern is: `-ci-bootstrap`, e.g. + `vault-ci-bootstrap`. It is important that the execution mode for the workspace be set to + `local`. For help on setting up the workspace, contact the QT team on Slack (#team-quality). + +Once the workspace has been created, changes to the bootstrap module will automatically be applied via +the GitHub PR workflow. Each time a PR is created for changes to files within that module the module +will be planned via the workflow described above. If the plan is ok and the PR is merged, the module +will automatically be applied via the same workflow. diff --git a/enos/ci/aws-nuke.yml b/enos/ci/aws-nuke.yml new file mode 100644 index 0000000..fd7dd54 --- /dev/null +++ b/enos/ci/aws-nuke.yml @@ -0,0 +1,409 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +regions: +- eu-north-1 +- ap-south-1 +- eu-west-3 +- eu-west-2 +- eu-west-1 +- ap-northeast-3 +- ap-northeast-2 +- ap-northeast-1 +- sa-east-1 +- ca-central-1 +- ap-southeast-1 +- ap-southeast-2 +- eu-central-1 +- us-east-1 +- us-east-2 +- us-west-1 +- us-west-2 +- global + +blocklist: + - 1234567890 + +accounts: + # replaced in CI + ACCOUNT_NUM: + presets: + - default + - olderthan + - honeybee + - enos + +presets: + default: + # Ignores default VPC resources + filters: + EC2VPC: + - property: IsDefault + value: "true" + EC2RouteTable: + - property: DefaultVPC + value: "true" + EC2DHCPOption: + - property: DefaultVPC + value: "true" + EC2InternetGateway: + - property: DefaultVPC + value: "true" + EC2Subnet: + - property: DefaultVPC + value: "true" + EC2InternetGatewayAttachment: + - property: DefaultVPC + value: "true" + + olderthan: + # Filters resources by age (when available) + # TIME_LIMIT replaced in CI + filters: + EC2Instance: + - property: LaunchTime + type: dateOlderThan + value: "TIME_LIMIT" + EC2NetworkACL: + EC2RouteTable: + EC2SecurityGroup: + EC2Subnet: + EC2Volume: + EC2VPC: + - property: tag:cloud-nuke-first-seen + type: dateOlderThan + value: "TIME_LIMIT" + ELBv2: + - property: tag:cloud-nuke-first-seen + type: dateOlderThan + value: "TIME_LIMIT" + ELBv2TargetGroup: + EC2NetworkInterface: + EC2InternetGateway: + EC2InternetGatewayAttachment: + RDSInstance: + - property: InstanceCreateTime + type: dateOlderThan + value: "TIME_LIMIT" + + honeybee: + # Cloudsec + filters: + IAMRole: + - property: tag:hc-config-as-code + value: "honeybee" + - property: Name + type: glob + value: "vault-assumed-role-credentials-demo" + IAMRolePolicy: + - property: tag:role:hc-config-as-code + value: "honeybee" + - property: role:RoleName + type: glob + value: "vault-assumed-role-credentials-demo" + IAMRolePolicyAttachment: + - property: tag:role:hc-config-as-code + value: "honeybee" + - property: Name + type: glob + value: "vault-assumed-role-credentials-demo" + + enos: + # Existing CI to be cleaned up later + filters: + LambdaFunction: + - property: Name + value: "enos_cleanup" + IAMRole: + - property: Name + type: glob + value: "github_actions-*" + - property: Name + value: "rds-monitoring-role" + IAMRolePolicy: + - property: role:RoleName + type: glob + value: "github_actions*" + - property: role:RoleName + type: glob + value: "rds-*" + IAMRolePolicyAttachment: + - "rds-monitoring-role -> AmazonRDSEnhancedMonitoringRole" + IAMUserPolicy: + - "github_actions-vault_ci -> AssumeServiceUserRole" + - "github_actions-vault_enterprise_ci -> AssumeServiceUserRole" + +resource-types: + # Run against everything, excluding these: + excludes: + # Avoid cloudsec things + - IAMUser + - IAMPolicy + - IAMUserAccessKey + - S3Object + - S3Bucket + - EC2KeyPair + - CloudWatchEventsTarget + - CloudWatchEventsRule + - CloudWatchLogsLogGroup + - ConfigServiceConfigurationRecorder + - ConfigServiceConfigRule + - ConfigServiceDeliveryChannel + - CloudTrailTrail + - RDSSnapshot + - RDSClusterSnapshot + - WAFWebACL + - WAFv2WebACL + - WAFRegionalWebACL + - GuardDutyDetector + + # Unused services, filtering these speeds up runs and + # removes errors about things we don't have enabled + - ACMCertificate + - ACMPCACertificateAuthority + - ACMPCACertificateAuthorityState + - AMGWorkspace + - AMPWorkspace + - APIGatewayAPIKey + - APIGatewayClientCertificate + - APIGatewayDomainName + - APIGatewayRestAPI + - APIGatewayUsagePlan + - APIGatewayV2API + - APIGatewayV2VpcLink + - APIGatewayVpcLink + - AWS::AppFlow::ConnectorProfile + - AWS::AppFlow::Flow + - AWS::AppRunner::Service + - AWS::ApplicationInsights::Application + - AWS::Backup::Framework + - AWS::MWAA::Environment + - AWS::NetworkFirewall::Firewall + - AWS::NetworkFirewall::FirewallPolicy + - AWS::NetworkFirewall::RuleGroup + - AWS::Synthetics::Canary + - AWS::Timestream::Database + - AWS::Timestream::ScheduledQuery + - AWS::Timestream::Table + - AWS::Transfer::Workflow + - AWSBackupPlan + - AWSBackupRecoveryPoint + - AWSBackupSelection + - AWSBackupVault + - AWSBackupVaultAccessPolicy + - AccessAnalyzer + - AppMeshMesh + - AppMeshRoute + - AppMeshVirtualGateway + - AppMeshVirtualNode + - AppMeshVirtualRouter + - AppMeshVirtualService + - AppStreamDirectoryConfig + - AppStreamFleet + - AppStreamFleetState + - AppStreamImage + - AppStreamImageBuilder + - AppStreamImageBuilderWaiter + - AppStreamStack + - AppStreamStackFleetAttachment + - AppSyncGraphqlAPI + - ApplicationAutoScalingScalableTarget + - ArchiveRule + - AthenaNamedQuery + - AthenaWorkGroup + - BatchComputeEnvironment + - BatchComputeEnvironmentState + - BatchJobQueue + - BatchJobQueueState + - BillingCostandUsageReport + - Budget + - Cloud9Environment + - CloudDirectoryDirectory + - CloudDirectorySchema + - CodeArtifactDomain + - CodeArtifactRepository + - CodeBuildProject + - CodeCommitRepository + - CodeDeployApplication + - CodePipelinePipeline + - CodeStarConnection + - CodeStarNotificationRule + - CodeStarProject + - CognitoIdentityPool + - CognitoIdentityProvider + - CognitoUserPool + - CognitoUserPoolClient + - CognitoUserPoolDomain + - ComprehendDocumentClassifier + - ComprehendDominantLanguageDetectionJob + - ComprehendEndpoint + - ComprehendEntitiesDetectionJob + - ComprehendEntityRecognizer + - ComprehendKeyPhrasesDetectionJob + - ComprehendSentimentDetectionJob + - ConfigServiceConfigRule + - ConfigServiceConfigurationRecorder + - ConfigServiceDeliveryChannel + - DAXCluster + - DAXParameterGroup + - DAXSubnetGroup + - DataPipelinePipeline + - DatabaseMigrationServiceCertificate + - DatabaseMigrationServiceEndpoint + - DatabaseMigrationServiceEventSubscription + - DatabaseMigrationServiceReplicationInstance + - DatabaseMigrationServiceReplicationTask + - DatabaseMigrationServiceSubnetGroup + - DeviceFarmProject + - DirectoryServiceDirectory + - EC2ClientVpnEndpointAttachment + - EC2ClientVpnEndpoint + - EC2DefaultSecurityGroupRule + - FMSNotificationChannel + - FMSPolicy + - FSxBackup + - FSxFileSystem + - FirehoseDeliveryStream + - GlobalAccelerator + - GlobalAcceleratorEndpointGroup + - GlobalAcceleratorListener + - GlueClassifier + - GlueConnection + - GlueCrawler + - GlueDatabase + - GlueDevEndpoint + - GlueJob + - GlueTrigger + - Inspector2 + - InspectorAssessmentRun + - InspectorAssessmentTarget + - InspectorAssessmentTemplate + - IoTAuthorizer + - IoTCACertificate + - IoTCertificate + - IoTJob + - IoTOTAUpdate + - IoTPolicy + - IoTRoleAlias + - IoTStream + - IoTThing + - IoTThingGroup + - IoTThingType + - IoTThingTypeState + - IoTTopicRule + - KendraIndex + - KinesisAnalyticsApplication + - KinesisStream + - KinesisVideoProject + - LexBot + - LexIntent + - LexModelBuildingServiceBotAlias + - LexSlotType + - LifecycleHook + - LightsailDisk + - LightsailDomain + - LightsailInstance + - LightsailKeyPair + - LightsailLoadBalancer + - LightsailStaticIP + - MQBroker + - MSKCluster + - MSKConfiguration + - MachineLearningBranchPrediction + - MachineLearningDataSource + - MachineLearningEvaluation + - MachineLearningMLModel + - Macie + - MediaConvertJobTemplate + - MediaConvertPreset + - MediaConvertQueue + - MediaLiveChannel + - MediaLiveInput + - MediaLiveInputSecurityGroup + - MediaPackageChannel + - MediaPackageOriginEndpoint + - MediaStoreContainer + - MediaStoreDataItems + - MediaTailorConfiguration + - MobileProject + - NeptuneCluster + - NeptuneInstance + - NetpuneSnapshot + - OpsWorksApp + - OpsWorksCMBackup + - OpsWorksCMServer + - OpsWorksCMServerState + - OpsWorksInstance + - OpsWorksLayer + - OpsWorksUserProfile + - QLDBLedger + - RoboMakerRobotApplication + - RoboMakerSimulationApplication + - RoboMakerSimulationJob + - SESConfigurationSet + - SESIdentity + - SESReceiptFilter + - SESReceiptRuleSet + - SESTemplate + - SSMActivation + - SSMAssociation + - SSMDocument + - SSMMaintenanceWindow + - SSMParameter + - SSMPatchBaseline + - SSMResourceDataSync + - SageMakerApp + - SageMakerDomain + - SageMakerEndpoint + - SageMakerEndpointConfig + - SageMakerModel + - SageMakerNotebookInstance + - SageMakerNotebookInstanceLifecycleConfig + - SageMakerNotebookInstanceState + - SageMakerUserProfiles + - ServiceCatalogConstraintPortfolioAttachment + - ServiceCatalogPortfolio + - ServiceCatalogPortfolioProductAttachment + - ServiceCatalogPortfolioShareAttachment + - ServiceCatalogPrincipalPortfolioAttachment + - ServiceCatalogProduct + - ServiceCatalogProvisionedProduct + - ServiceCatalogTagOption + - ServiceCatalogTagOptionPortfolioAttachment + - ServiceDiscoveryInstance + - ServiceDiscoveryNamespace + - ServiceDiscoveryService + - SimpleDBDomain + - StorageGatewayFileShare + - StorageGatewayGateway + - StorageGatewayTape + - StorageGatewayVolume + - TransferServer + - TransferServerUser + - WAFRegionalByteMatchSet + - WAFRegionalByteMatchSetIP + - WAFRegionalIPSet + - WAFRegionalIPSetIP + - WAFRegionalRateBasedRule + - WAFRegionalRateBasedRulePredicate + - WAFRegionalRegexMatchSet + - WAFRegionalRegexMatchTuple + - WAFRegionalRegexPatternSet + - WAFRegionalRegexPatternString + - WAFRegionalRule + - WAFRegionalRuleGroup + - WAFRegionalRulePredicate + - WAFRegionalWebACL + - WAFRegionalWebACLRuleAttachment + - WAFRule + - WAFWebACL + - WAFWebACLRuleAttachment + - WAFv2IPSet + - WAFv2RegexPatternSet + - WAFv2RuleGroup + - WAFv2WebACL + - WorkLinkFleet + - WorkSpacesWorkspace + - XRayGroup + - XRaySamplingRule + diff --git a/enos/ci/bootstrap/main.tf b/enos/ci/bootstrap/main.tf new file mode 100644 index 0000000..db89663 --- /dev/null +++ b/enos/ci/bootstrap/main.tf @@ -0,0 +1,69 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + } + + cloud { + hostname = "app.terraform.io" + organization = "hashicorp-qti" + // workspace must be exported in the environment as: TF_WORKSPACE=-ci-enos-boostrap + } +} + +provider "aws" { + region = "us-east-1" + alias = "us_east_1" +} + +provider "aws" { + region = "us-east-2" + alias = "us_east_2" +} + +provider "aws" { + region = "us-west-1" + alias = "us_west_1" +} + +provider "aws" { + region = "us-west-2" + alias = "us_west_2" +} + + +locals { + key_name = "${var.repository}-ci-ssh-key" +} + +resource "aws_key_pair" "enos_ci_key_us_east_1" { + key_name = local.key_name + public_key = var.aws_ssh_public_key + + provider = aws.us_east_1 +} + +resource "aws_key_pair" "enos_ci_key_us_east_2" { + key_name = local.key_name + public_key = var.aws_ssh_public_key + + provider = aws.us_east_2 +} + +resource "aws_key_pair" "enos_ci_key_us_west_1" { + key_name = local.key_name + public_key = var.aws_ssh_public_key + + provider = aws.us_west_1 +} + +resource "aws_key_pair" "enos_ci_key_us_west_2" { + key_name = local.key_name + public_key = var.aws_ssh_public_key + + provider = aws.us_west_2 +} diff --git a/enos/ci/bootstrap/outputs.tf b/enos/ci/bootstrap/outputs.tf new file mode 100644 index 0000000..a83ef9e --- /dev/null +++ b/enos/ci/bootstrap/outputs.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "keys" { + value = { + "us-east-1" = { + name = aws_key_pair.enos_ci_key_us_east_1.key_name + arn = aws_key_pair.enos_ci_key_us_east_1.arn + } + "us-east-2" = { + name = aws_key_pair.enos_ci_key_us_east_2.key_name + arn = aws_key_pair.enos_ci_key_us_east_2.arn + } + "us-west-1" = { + name = aws_key_pair.enos_ci_key_us_west_1.key_name + arn = aws_key_pair.enos_ci_key_us_west_1.arn + } + "us-west-2" = { + name = aws_key_pair.enos_ci_key_us_west_2.key_name + arn = aws_key_pair.enos_ci_key_us_west_2.arn + } + } +} diff --git a/enos/ci/bootstrap/variables.tf b/enos/ci/bootstrap/variables.tf new file mode 100644 index 0000000..7e80d5c --- /dev/null +++ b/enos/ci/bootstrap/variables.tf @@ -0,0 +1,16 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "aws_ssh_public_key" { + description = "The public key to use for the ssh key" + type = string +} + +variable "repository" { + description = "The repository to bootstrap the ci for, either 'vault' or 'vault-enterprise'" + type = string + validation { + condition = contains(["vault", "vault-enterprise"], var.repository) + error_message = "Repository must be one of either 'vault' or 'vault-enterprise'" + } +} diff --git a/enos/ci/service-user-iam/main.tf b/enos/ci/service-user-iam/main.tf new file mode 100644 index 0000000..da5f20b --- /dev/null +++ b/enos/ci/service-user-iam/main.tf @@ -0,0 +1,242 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + } + + cloud { + hostname = "app.terraform.io" + organization = "hashicorp-qti" + // workspace must be exported in the environment as: TF_WORKSPACE=-ci-enos-service-user-iam + } +} + +locals { + enterprise_repositories = ["vault-enterprise"] + is_ent = contains(local.enterprise_repositories, var.repository) + ci_account_prefix = local.is_ent ? "vault_enterprise" : "vault" + service_user = "github_actions-${local.ci_account_prefix}_ci" + aws_account_id = local.is_ent ? "505811019928" : "040730498200" +} + +resource "aws_iam_role" "role" { + provider = aws.us_east_1 + name = local.service_user + assume_role_policy = data.aws_iam_policy_document.assume_role_policy_document.json +} + +data "aws_iam_policy_document" "assume_role_policy_document" { + provider = aws.us_east_1 + + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${local.aws_account_id}:user/${local.service_user}"] + } + } +} + +resource "aws_iam_role_policy" "role_policy" { + provider = aws.us_east_1 + role = aws_iam_role.role.name + name = "${local.service_user}_policy" + policy = data.aws_iam_policy_document.role_policy.json +} + +data "aws_iam_policy_document" "role_policy" { + source_policy_documents = [ + data.aws_iam_policy_document.enos_scenario.json, + data.aws_iam_policy_document.aws_nuke.json, + ] +} + +data "aws_iam_policy_document" "aws_nuke" { + provider = aws.us_east_1 + + statement { + effect = "Allow" + actions = [ + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeRegions", + "ec2:DescribeVpnGateways", + "iam:DeleteAccessKey", + "iam:DeleteUser", + "iam:DeleteUserPolicy", + "iam:DetachUserPolicy", + "iam:GetUser", + "iam:ListAccessKeys", + "iam:ListAccountAliases", + "iam:ListGroupsForUser", + "iam:ListSSHPublicKeys", + "iam:ListUserPolicies", + "iam:ListUserTags", + "iam:ListUsers", + "iam:ListVirtualMFADevices", + "iam:UntagUser", + "servicequotas:ListServiceQuotas" + ] + + resources = ["*"] + } +} + +data "aws_iam_policy_document" "enos_scenario" { + provider = aws.us_east_1 + + statement { + effect = "Allow" + actions = [ + "ec2:AssociateRouteTable", + "ec2:AttachInternetGateway", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CancelSpotFleetRequests", + "ec2:CancelSpotInstanceRequests", + "iam:CreateAccessKey", + "ec2:CreateEgressOnlyInternetGateway", + "ec2:CreateInternetGateway", + "ec2:CreateKeyPair", + "ec2:CreateFleet", + "ec2:CreateLaunchTemplate", + "ec2:CreateLaunchTemplateVersion", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSpotDatafeedSubscription", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateVPC", + "ec2:DeleteEgressOnlyInternetGateway", + "ec2:DeleteFleets", + "ec2:DeleteInternetGateway", + "ec2:DeleteLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", + "ec2:DeleteKeyPair", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSpotDatafeedSubscription", + "ec2:DeleteSubnet", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DeleteVPC", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeEgressOnlyInternetGateways", + "ec2:DescribeFleets", + "ec2:DescribeFleetHistory", + "ec2:DescribeFleetInstances", + "ec2:DescribeImages", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeInstanceCreditSpecifications", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInternetGateways", + "ec2:DescribeKeyPairs", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotDatafeedSubscription", + "ec2:DescribeSpotFleetInstances", + "ec2:DescribeSpotFleetInstanceRequests", + "ec2:DescribeSpotFleetRequests", + "ec2:DescribeSpotFleetRequestHistory", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcClassicLinkDnsSupport", + "ec2:DescribeVpcs", + "ec2:DescribeVpnGateways", + "ec2:DetachInternetGateway", + "ec2:DisassociateRouteTable", + "ec2:GetLaunchTemplateData", + "ec2:GetSpotPlacementScores", + "ec2:ImportKeyPair", + "ec2:ModifyFleet", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyLaunchTemplate", + "ec2:ModifySpotFleetRequest", + "ec2:ModifySubnetAttribute", + "ec2:ModifyVPCAttribute", + "ec2:RequestSpotInstances", + "ec2:RequestSpotFleet", + "ec2:ResetInstanceAttribute", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:RunInstances", + "ec2:SendSpotInstanceInterruptions", + "ec2:TerminateInstances", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "iam:AddRoleToInstanceProfile", + "iam:AttachRolePolicy", + "iam:AttachUserPolicy", + "iam:CreateInstanceProfile", + "iam:CreatePolicy", + "iam:CreateRole", + "iam:CreateServiceLinkedRole", + "iam:CreateUser", + "iam:DeleteInstanceProfile", + "iam:DeleteLoginProfile", + "iam:DeletePolicy", + "iam:DeleteRole", + "iam:DeleteRolePolicy", + "iam:DetachRolePolicy", + "iam:GetInstanceProfile", + "iam:GetPolicy", + "iam:GetPolicyVersion", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:ListAccountAliases", + "iam:ListAttachedRolePolicies", + "iam:ListAttachedUserPolicies", + "iam:ListInstanceProfiles", + "iam:ListInstanceProfilesForRole", + "iam:ListMFADevices", + "iam:ListPolicies", + "iam:ListRolePolicies", + "iam:ListRoles", + "iam:ListServiceSpecificCredentials", + "iam:ListSigningCertificates", + "iam:PassRole", + "iam:PutRolePolicy", + "iam:RemoveRoleFromInstanceProfile", + "iam:UpdateUser", + "kms:CreateAlias", + "kms:CreateKey", + "kms:Decrypt", + "kms:DeleteAlias", + "kms:DescribeKey", + "kms:Encrypt", + "kms:GetKeyPolicy", + "kms:GetKeyRotationStatus", + "kms:ListAliases", + "kms:ListKeys", + "kms:ListResourceTags", + "kms:ScheduleKeyDeletion", + "kms:TagResource", + "servicequotas:ListServiceQuotas" + ] + + resources = ["*"] + } +} diff --git a/enos/ci/service-user-iam/outputs.tf b/enos/ci/service-user-iam/outputs.tf new file mode 100644 index 0000000..348696b --- /dev/null +++ b/enos/ci/service-user-iam/outputs.tf @@ -0,0 +1,16 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "ci_role" { + value = { + name = aws_iam_role.role.name + arn = aws_iam_role.role.arn + } +} + +output "ci_role_policy" { + value = { + name = aws_iam_role_policy.role_policy.name + policy = aws_iam_role_policy.role_policy.policy + } +} diff --git a/enos/ci/service-user-iam/providers.tf b/enos/ci/service-user-iam/providers.tf new file mode 100644 index 0000000..cf2d21e --- /dev/null +++ b/enos/ci/service-user-iam/providers.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +provider "aws" { + region = "us-east-1" + alias = "us_east_1" +} + +provider "aws" { + region = "us-east-2" + alias = "us_east_2" +} + +provider "aws" { + region = "us-west-1" + alias = "us_west_1" +} + +provider "aws" { + region = "us-west-2" + alias = "us_west_2" +} diff --git a/enos/ci/service-user-iam/service-quotas.tf b/enos/ci/service-user-iam/service-quotas.tf new file mode 100644 index 0000000..676bbb0 --- /dev/null +++ b/enos/ci/service-user-iam/service-quotas.tf @@ -0,0 +1,65 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + // This is the code of the service quota to request a change for. Each adjustable limit has a + // unique code. See, https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/servicequotas_service_quota#quota_code + subnets_per_vpcs_quota = "L-F678F1CE" + standard_spot_instance_requests_quota = "L-34B43A08" +} + +resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_1" { + provider = aws.us_east_1 + quota_code = local.subnets_per_vpcs_quota + service_code = "vpc" + value = 100 +} + +resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_2" { + provider = aws.us_east_2 + quota_code = local.subnets_per_vpcs_quota + service_code = "vpc" + value = 100 +} + +resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_1" { + provider = aws.us_west_1 + quota_code = local.subnets_per_vpcs_quota + service_code = "vpc" + value = 100 +} + +resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_2" { + provider = aws.us_west_2 + quota_code = local.subnets_per_vpcs_quota + service_code = "vpc" + value = 100 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_1" { + provider = aws.us_east_1 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_2" { + provider = aws.us_east_2 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_west_1" { + provider = aws.us_west_1 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_west_2" { + provider = aws.us_west_2 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} diff --git a/enos/ci/service-user-iam/variables.tf b/enos/ci/service-user-iam/variables.tf new file mode 100644 index 0000000..b69c07b --- /dev/null +++ b/enos/ci/service-user-iam/variables.tf @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "repository" { + description = "The GitHub repository, either vault or vault-enterprise" + type = string + validation { + condition = contains(["vault", "vault-enterprise"], var.repository) + error_message = "Invalid repository, only vault or vault-enterprise are supported" + } +} diff --git a/enos/enos-dev-variables.hcl b/enos/enos-dev-variables.hcl new file mode 100644 index 0000000..ed7ab24 --- /dev/null +++ b/enos/enos-dev-variables.hcl @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +variable "dev_build_local_ui" { + type = bool + description = "Whether or not to build the web UI when using the local builder var. If the assets have already been built we'll still include them" + default = false +} + +variable "dev_config_mode" { + type = string + description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." + default = "file" // or "env" +} + +variable "dev_consul_version" { + type = string + description = "The version of Consul to use when using Consul for storage!" + default = "1.18.1" + // NOTE: You can also set the "backend_edition" if you want to use Consul Enterprise +} diff --git a/enos/enos-dynamic-config.hcl b/enos/enos-dynamic-config.hcl new file mode 100644 index 0000000..15f7de6 --- /dev/null +++ b/enos/enos-dynamic-config.hcl @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Code generated by pipeline generate enos-dynamic-config DO NOT EDIT. + +# This file is overwritten in CI as it contains branch specific and sometimes ever-changing values. +# It's checked in here so that enos samples and scenarios can be performed, just be aware that this +# might change out from under you. + +globals { + sample_attributes = { + aws_region = ["us-east-1", "us-west-2"] + distro_version_amzn = ["2023"] + distro_version_leap = ["15.6"] + distro_version_rhel = ["8.10", "9.5"] + distro_version_sles = ["15.6"] + distro_version_ubuntu = ["20.04", "24.04"] + upgrade_initial_version = ["1.17.0", "1.17.1", "1.17.2", "1.17.3", "1.17.4", "1.17.5", "1.17.6", "1.18.0-rc1", "1.18.0", "1.18.1", "1.18.2", "1.18.3", "1.18.4", "1.18.5", "1.19.0-rc1", "1.19.0", "1.19.1", "1.19.2"] + } +} diff --git a/enos/enos-globals.hcl b/enos/enos-globals.hcl new file mode 100644 index 0000000..59ec11a --- /dev/null +++ b/enos/enos-globals.hcl @@ -0,0 +1,162 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +globals { + archs = ["amd64", "arm64"] + artifact_sources = ["local", "crt", "artifactory"] + artifact_types = ["bundle", "package"] + backends = ["consul", "raft"] + backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic")) + backend_tag_key = "VaultStorage" + build_tags = { + "ce" = ["ui"] + "ent" = ["ui", "enterprise", "ent"] + "ent.fips1403" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_3", "ent.fips1403"] + "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] + "ent.hsm.fips1403" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_3", "ent.hsm.fips1403"] + } + config_modes = ["env", "file"] + consul_editions = ["ce", "ent"] + consul_versions = ["1.14.11", "1.15.7", "1.16.3", "1.17.0"] + distros = ["amzn", "leap", "rhel", "sles", "ubuntu"] + // Different distros may require different packages, or use different aliases for the same package + distro_packages = { + amzn = { + "2" = ["nc"] + "2023" = ["nc"] + } + leap = { + "15.6" = ["netcat", "openssl"] + } + rhel = { + "8.10" = ["nc"] + "9.5" = ["nc"] + } + sles = { + // When installing Vault RPM packages on a SLES AMI, the openssl package provided + // isn't named "openssl, which rpm doesn't know how to handle. Therefore we add the + // "correctly" named one in our package installation before installing Vault. + "15.6" = ["netcat-openbsd", "openssl"] + } + ubuntu = { + "20.04" = ["netcat"] + "22.04" = ["netcat"] + "24.04" = ["netcat-openbsd"] + } + } + distro_version = { + amzn = var.distro_version_amzn + leap = var.distro_version_leap + rhel = var.distro_version_rhel + sles = var.distro_version_sles + ubuntu = var.distro_version_ubuntu + } + editions = ["ce", "ent", "ent.fips1403", "ent.hsm", "ent.hsm.fips1403"] + enterprise_editions = [for e in global.editions : e if e != "ce"] + ip_versions = ["4", "6"] + package_manager = { + "amzn" = "yum" + "leap" = "zypper" + "rhel" = "yum" + "sles" = "zypper" + "ubuntu" = "apt" + } + packages = ["jq"] + // Ports that we'll open up for ingress in the security group for all target machines. + // Port protocol maps to the IpProtocol schema: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html + ports = { + ssh : { + description = "SSH" + port = 22 + protocol = "tcp" + }, + vault_agent : { + description = "Vault Agent" + port = 8100 + protocol = "tcp" + }, + vault_proxy : { + description = "Vault Proxy" + port = 8101 + protocol = "tcp" + }, + vault_listener : { + description = "Vault Addr listener" + port = 8200 + protocol = "tcp" + }, + vault_cluster : { + description = "Vault Cluster listener" + port = 8201 + protocol = "tcp" + }, + consul_rpc : { + description = "Consul internal communication" + port = 8300 + protocol = "tcp" + }, + consul_serf_lan_tcp : { + description = "Consul Serf LAN TCP" + port = 8301 + protocol = "tcp" + }, + consul_serf_lan_udp : { + description = "Consul Serf LAN UDP" + port = 8301 + protocol = "udp" + }, + consul_serf_wan_tcp : { + description = "Consul Serf WAN TCP" + port = 8302 + protocol = "tcp" + }, + consul_serf_wan_udp : { + description = "Consul Serf WAN UDP" + port = 8302 + protocol = "udp" + }, + consul_http : { + description = "Consul HTTP API" + port = 8500 + protocol = "tcp" + }, + consul_https : { + description = "Consul HTTPS API" + port = 8501 + protocol = "tcp" + }, + consul_grpc : { + description = "Consul gRPC API" + port = 8502 + protocol = "tcp" + }, + consul_grpc_tls : { + description = "Consul gRPC TLS API" + port = 8503 + protocol = "tcp" + }, + consul_dns_tcp : { + description = "Consul TCP DNS Server" + port = 8600 + protocol = "tcp" + }, + consul_dns_udp : { + description = "Consul UDP DNS Server" + port = 8600 + protocol = "udp" + }, + } + seals = ["awskms", "pkcs11", "shamir"] + tags = merge({ + "Project Name" : var.project_name + "Project" : "Enos", + "Environment" : "ci" + }, var.tags) + vault_install_dir = { + bundle = "/opt/vault/bin" + package = "/usr/bin" + } + vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) + vault_tag_key = "vault-cluster" + vault_disable_mlock = false +} diff --git a/enos/enos-providers.hcl b/enos/enos-providers.hcl new file mode 100644 index 0000000..89c79bd --- /dev/null +++ b/enos/enos-providers.hcl @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +provider "aws" "default" { + region = var.aws_region +} + +// This default SSH user is used in RHEL, Amazon Linux, SUSE, and Leap distros +provider "enos" "ec2_user" { + transport = { + ssh = { + user = "ec2-user" + private_key_path = abspath(var.aws_ssh_private_key_path) + } + } +} + +// This default SSH user is used in the Ubuntu distro +provider "enos" "ubuntu" { + transport = { + ssh = { + user = "ubuntu" + private_key_path = abspath(var.aws_ssh_private_key_path) + } + } +} diff --git a/enos/enos-qualities.hcl b/enos/enos-qualities.hcl new file mode 100644 index 0000000..59fcdd9 --- /dev/null +++ b/enos/enos-qualities.hcl @@ -0,0 +1,666 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +quality "consul_api_agent_host_read" { + description = "The /v1/agent/host Consul API returns host info for each node in the cluster" +} + +quality "consul_api_health_node_read" { + description = <<-EOF + The /v1/health/node/ Consul API returns health info for each node in the cluster + EOF +} + +quality "consul_api_operator_raft_config_read" { + description = "The /v1/operator/raft/configuration Consul API returns raft info for the cluster" +} + +quality "consul_autojoin_aws" { + description = "The Consul cluster auto-joins with AWS tag discovery" +} + +quality "consul_cli_validate" { + description = "The 'consul validate' command validates the Consul configuration" +} + +quality "consul_config_file" { + description = "Consul starts when configured with a configuration file" +} + +quality "consul_ha_leader_election" { + description = "The Consul cluster elects a leader node on start up" +} + +quality "consul_health_state_passing_read_nodes_minimum" { + description = <<-EOF + The Consul cluster meets the minimum of number of healthy nodes according to the + /v1/health/state/passing Consul API + EOF +} + +quality "consul_operator_raft_configuration_read_voters_minimum" { + description = <<-EOF + The Consul cluster meets the minimum number of raft voters according to the + /v1/operator/raft/configuration Consul API + EOF +} + +quality "consul_service_start_client" { + description = "The Consul service starts in client mode" +} + +quality "consul_service_start_server" { + description = "The Consul service starts in server mode" +} + +quality "consul_service_systemd_notified" { + description = "The Consul binary notifies systemd when the service is active" +} + +quality "consul_service_systemd_unit" { + description = "The 'consul.service' systemd unit starts the service" +} + +quality "vault_agent_auto_auth_approle" { + description = <<-EOF + Vault running in Agent mode utilizes the approle auth method to do auto-auth via a role and + read secrets from a file source + EOF +} + +quality "vault_agent_log_template" { + description = global.description.verify_agent_output +} + +quality "vault_api_auth_userpass_login_write" { + description = "The v1/auth/userpass/login/ Vault API creates a token for a user" +} + +quality "vault_api_auth_userpass_user_write" { + description = "The v1/auth/userpass/users/ Vault API associates a policy with a user" +} + +quality "vault_api_identity_entity_read" { + description = <<-EOF + The v1/identity/entity Vault API returns an identity entity, has the correct metadata, and is + associated with the expected entity-alias, groups, and policies + EOF +} + +quality "vault_api_identity_entity_write" { + description = "The v1/identity/entity Vault API creates an identity entity" +} + +quality "vault_api_identity_entity_alias_write" { + description = "The v1/identity/entity-alias Vault API creates an identity entity alias" +} + +quality "vault_api_identity_group_write" { + description = "The v1/identity/group/ Vault API creates an identity group" +} + +quality "vault_api_identity_oidc_config_read" { + description = <<-EOF + The v1/identity/oidc/config Vault API returns the built-in identity secrets engine configuration + EOF +} + +quality "vault_api_identity_oidc_config_write" { + description = "The v1/identity/oidc/config Vault API configures the built-in identity secrets engine" +} + +quality "vault_api_identity_oidc_introspect_write" { + description = "The v1/identity/oidc/introspect Vault API creates introspect verifies the active state of a signed OIDC token" +} + +quality "vault_api_identity_oidc_key_read" { + description = <<-EOF + The v1/identity/oidc/key Vault API returns the OIDC signing key and verifies the key's algorithm, + rotation_period, and verification_ttl are correct + EOF +} + +quality "vault_api_identity_oidc_key_write" { + description = "The v1/identity/oidc/key Vault API creates an OIDC signing key" +} + +quality "vault_api_identity_oidc_key_rotate_write" { + description = "The v1/identity/oidc/key//rotate Vault API rotates an OIDC signing key and applies a new verification TTL" +} + +quality "vault_api_identity_oidc_role_read" { + description = <<-EOF + The v1/identity/oidc/role Vault API returns the OIDC role and verifies that the roles key and + ttl are corect. + EOF +} + +quality "vault_api_identity_oidc_role_write" { + description = "The v1/identity/oidc/role Vault API creates an OIDC role associated with a key and clients" +} + +quality "vault_api_identity_oidc_token_read" { + description = "The v1/identity/oidc/token Vault API creates an OIDC token associated with a role" +} + +quality "vault_api_sys_auth_userpass_user_write" { + description = "The v1/sys/auth/userpass/users/ Vault API associates a superuser policy with a user" +} + +quality "vault_api_sys_config_read" { + description = <<-EOF + The v1/sys/config/sanitized Vault API returns sanitized configuration which matches our given + configuration + EOF +} + +quality "vault_api_sys_ha_status_read" { + description = "The v1/sys/ha-status Vault API returns the HA status of the cluster" +} + +quality "vault_api_sys_health_read" { + description = <<-EOF + The v1/sys/health Vault API returns the correct codes depending on the replication and + 'seal-status' of the cluster + EOF +} + +quality "vault_api_sys_host_info_read" { + description = "The v1/sys/host-info Vault API returns the host info for each node in the cluster" +} + +quality "vault_api_sys_leader_read" { + description = "The v1/sys/leader Vault API returns the cluster leader info" +} + +quality "vault_api_sys_metrics_vault_core_replication_write_undo_logs_enabled" { + description = <<-EOF + The v1/sys/metrics Vault API returns metrics and verifies that + 'Gauges[vault.core.replication.write_undo_logs]' is enabled + EOF +} + +quality "vault_api_sys_policy_write" { + description = "The v1/sys/policy Vault API writes a policy" +} + +quality "vault_api_sys_quotas_lease_count_read_max_leases_default" { + description = <<-EOF + The v1/sys/quotas/lease-count/default Vault API returns the lease 'count' and 'max_leases' is + set to 300,000 + EOF +} + +quality "vault_api_sys_replication_dr_primary_enable_write" { + description = <<-EOF + The v1/sys/replication/dr/primary/enable Vault API enables DR replication + EOF +} + +quality "vault_api_sys_replication_dr_primary_secondary_token_write" { + description = <<-EOF + The v1/sys/replication/dr/primary/secondary-token Vault API configures the DR replication + secondary token + EOF +} + +quality "vault_api_sys_replication_dr_secondary_enable_write" { + description = <<-EOF + The v1/sys/replication/dr/secondary/enable Vault API enables DR replication + EOF +} + +quality "vault_api_sys_replication_dr_read_connection_status_connected" { + description = <<-EOF + The v1/sys/replication/dr/status Vault API returns status info and the + 'connection_status' is correct for the given node + EOF +} + +quality "vault_api_sys_replication_dr_status_known_primary_cluster_addrs" { + description = <<-EOF + The v1/sys/replication/dr/status Vault API returns the DR replication status and + 'known_primary_cluster_address' is the expected primary cluster leader + EOF +} + +quality "vault_api_sys_replication_dr_status_read" { + description = <<-EOF + The v1/sys/replication/dr/status Vault API returns the DR replication status + EOF +} + +quality "vault_api_sys_replication_dr_status_read_cluster_address" { + description = <<-EOF + The v1/sys/replication/dr/status Vault API returns the DR replication status + and the '{primaries,secondaries}[*].cluster_address' is correct for the given node + EOF +} + +quality "vault_api_sys_replication_dr_status_read_state_not_idle" { + description = <<-EOF + The v1/sys/replication/dr/status Vault API returns the DR replication status + and the state is not idle + EOF +} + +quality "vault_api_sys_replication_performance_primary_enable_write" { + description = <<-EOF + The v1/sys/replication/performance/primary/enable Vault API enables performance replication + EOF +} + +quality "vault_api_sys_replication_performance_primary_secondary_token_write" { + description = <<-EOF + The v1/sys/replication/performance/primary/secondary-token Vault API configures the replication + token + EOF +} + +quality "vault_api_sys_replication_performance_secondary_enable_write" { + description = <<-EOF + The v1/sys/replication/performance/secondary/enable Vault API enables performance replication + EOF +} + +quality "vault_api_sys_replication_performance_read_connection_status_connected" { + description = <<-EOF + The v1/sys/replication/performance/status Vault API returns status info and the + 'connection_status' is correct for the given node + EOF +} + +quality "vault_api_sys_replication_performance_status_known_primary_cluster_addrs" { + description = <<-EOF + The v1/sys/replication/performance/status Vault API returns the replication status and + 'known_primary_cluster_address' is the expected primary cluster leader + EOF +} + +quality "vault_api_sys_replication_performance_status_read" { + description = <<-EOF + The v1/sys/replication/performance/status Vault API returns the performance replication status + EOF +} + +quality "vault_api_sys_replication_performance_status_read_cluster_address" { + description = <<-EOF + The v1/sys/replication/performance/status Vault API returns the performance replication status + and the '{primaries,secondaries}[*].cluster_address' is correct for the given node + EOF +} + +quality "vault_api_sys_replication_performance_status_read_state_not_idle" { + description = <<-EOF + The v1/sys/replication/performance/status Vault API returns the performance replication status + and the state is not idle + EOF +} + +quality "vault_api_sys_replication_status_read" { + description = <<-EOF + The v1/sys/replication/status Vault API returns the performance replication status of the + cluster + EOF +} + +quality "vault_api_sys_seal_status_api_read_matches_sys_health" { + description = <<-EOF + The v1/sys/seal-status Vault API and v1/sys/health Vault API agree on the health of each node + and the cluster + EOF +} + +quality "vault_api_sys_sealwrap_rewrap_read_entries_processed_eq_entries_succeeded_post_rewrap" { + description = global.description.verify_seal_rewrap_entries_processed_eq_entries_succeeded_post_rewrap +} + +quality "vault_api_sys_sealwrap_rewrap_read_entries_processed_gt_zero_post_rewrap" { + description = global.description.verify_seal_rewrap_entries_processed_is_gt_zero_post_rewrap +} + +quality "vault_api_sys_sealwrap_rewrap_read_is_running_false_post_rewrap" { + description = global.description.verify_seal_rewrap_is_running_false_post_rewrap +} + +quality "vault_api_sys_sealwrap_rewrap_read_no_entries_fail_during_rewrap" { + description = global.description.verify_seal_rewrap_no_entries_fail_during_rewrap +} + +quality "vault_api_sys_step_down_steps_down" { + description = <<-EOF + The v1/sys/step-down Vault API forces the cluster leader to step down and intiates a new leader + election + EOF +} + +quality "vault_api_sys_storage_raft_autopilot_configuration_read" { + description = <<-EOF + The /sys/storage/raft/autopilot/configuration Vault API returns the autopilot configuration of + the cluster + EOF +} + +quality "vault_api_sys_storage_raft_autopilot_state_read" { + description = <<-EOF + The v1/sys/storage/raft/autopilot/state Vault API returns the raft autopilot state of the + cluster + EOF +} + +quality "vault_api_sys_storage_raft_autopilot_upgrade_info_read_status_matches" { + description = <<-EOF + The v1/sys/storage/raft/autopilot/state Vault API returns the raft autopilot state and the + 'upgrade_info.status' matches our expected state + EOF +} + +quality "vault_api_sys_storage_raft_autopilot_upgrade_info_target_version_read_matches_candidate" { + description = <<-EOF + The v1/sys/storage/raft/autopilot/state Vault API returns the raft autopilot state and the + 'upgrade_info.target_version' matches the the candidate version + EOF +} + +quality "vault_api_sys_storage_raft_configuration_read" { + description = <<-EOF + The v1/sys/storage/raft/configuration Vault API returns the raft configuration of the cluster + EOF +} + +quality "vault_api_sys_storage_raft_remove_peer_write_removes_peer" { + description = <<-EOF + The v1/sys/storage/raft/remove-peer Vault API removes the desired node from the raft sub-system + EOF +} + +quality "vault_api_sys_version_history_keys" { + description = <<-EOF + The v1/sys/version-history Vault API returns the cluster version history and the 'keys' data + includes our target version + EOF +} + +quality "vault_api_sys_version_history_key_info" { + description = <<-EOF + The v1/sys/version-history Vault API returns the cluster version history and the + 'key_info["$expected_version]' data is present for the expected version and the 'build_date' + matches the expected build_date. + EOF +} + +quality "vault_artifact_bundle" { + description = "The candidate binary packaged as a zip bundle is used for testing" +} + +quality "vault_artifact_deb" { + description = "The candidate binary packaged as a deb package is used for testing" +} + +quality "vault_artifact_rpm" { + description = "The candidate binary packaged as an rpm package is used for testing" +} + +quality "vault_audit_log" { + description = "The Vault audit sub-system is enabled with the log and writes to a log" +} + +quality "vault_audit_log_secrets" { + description = "The Vault audit sub-system does not output secret values" +} + +quality "vault_audit_socket" { + description = "The Vault audit sub-system is enabled with the socket and writes to a socket" +} + +quality "vault_audit_syslog" { + description = "The Vault audit sub-system is enabled with the syslog and writes to syslog" +} + +quality "vault_auto_unseals_after_autopilot_upgrade" { + description = "Vault auto-unseals after upgrading the cluster with autopilot" +} + +quality "vault_autojoins_new_nodes_into_initialized_cluster" { + description = "Vault sucessfully auto-joins new nodes into an existing cluster" +} + +quality "vault_autojoin_aws" { + description = "Vault auto-joins nodes using AWS tag discovery" +} + +quality "vault_autopilot_upgrade_leader_election" { + description = <<-EOF + Vault elects a new leader after upgrading the cluster with autopilot + EOF +} + +quality "vault_cli_audit_enable" { + description = "The 'vault audit enable' command enables audit devices" +} + +quality "vault_cli_auth_enable_approle" { + description = "The 'vault auth enable approle' command enables the approle auth method" +} + +quality "vault_cli_operator_members" { + description = "The 'vault operator members' command returns the expected list of members" +} + +quality "vault_cli_operator_raft_remove_peer" { + description = "The 'vault operator remove-peer' command removes the desired node" +} + +quality "vault_cli_operator_step_down" { + description = "The 'vault operator step-down' command forces the cluster leader to step down" +} + +quality "vault_cli_policy_write" { + description = "The 'vault policy write' command writes a policy" +} + +quality "vault_cli_status_exit_code" { + description = <<-EOF + The 'vault status' command exits with the correct code depending on expected seal status + EOF +} + +quality "vault_cluster_upgrade_in_place" { + description = <<-EOF + Vault starts with existing data and configuration in-place migrates the data + EOF +} + +quality "vault_config_env_variables" { + description = "Vault starts when configured primarily with environment variables" +} + +quality "vault_config_file" { + description = "Vault starts when configured primarily with a configuration file" +} + +quality "vault_config_log_level" { + description = "The 'log_level' config stanza modifies its log level" +} + +quality "vault_config_multiseal_is_toggleable" { + description = <<-EOF + The Vault Cluster can be configured with a single unseal method regardless of the + 'enable_multiseal' config value + EOF +} + +quality "vault_init" { + description = "Vault initializes the cluster with the given seal parameters" +} + +quality "vault_journal_secrets" { + description = "The Vault systemd journal does not output secret values" +} + +quality "vault_license_required_ent" { + description = "Vault Enterprise requires a license in order to start" +} + +quality "vault_listener_ipv4" { + description = "Vault operates on ipv4 TCP listeners" +} + +quality "vault_listener_ipv6" { + description = "Vault operates on ipv6 TCP listeners" +} + +quality "vault_mount_auth" { + description = "Vault mounts the auth engine" +} + +quality "vault_mount_identity" { + description = "Vault mounts the identity engine" +} + +quality "vault_mount_kv" { + description = "Vault mounts the kv engine" +} + +quality "vault_multiseal_enable" { + description = <<-EOF + The Vault Cluster starts with 'enable_multiseal' and multiple auto-unseal methods. + EOF +} + +quality "vault_proxy_auto_auth_approle" { + description = <<-EOF + Vault Proxy utilizes the approle auth method to to auto auth via a roles and secrets from file. + EOF +} + +quality "vault_proxy_cli_access" { + description = <<-EOF + The Vault CLI accesses tokens through the Vault proxy without a VAULT_TOKEN available + EOF +} + +quality "vault_radar_index_create" { + description = "Vault radar is able to create an index from KVv2 mounts" +} + +quality "vault_radar_scan_file" { + description = "Vault radar is able to scan a file for secrets" +} + +quality "vault_raft_voters" { + description = global.description.verify_raft_cluster_all_nodes_are_voters +} + +quality "vault_raft_removed_after_restart" { + description = "A removed raft node will continue reporting as removed after the process is restarted" +} + +quality "vault_raft_removed_statuses" { + description = "A removed raft node reports itself as removed in the status endpoints" +} + +quality "vault_raft_removed_cant_rejoin" { + description = "A removed raft node cannot rejoin a cluster while it still has old vault/raft data" +} + +quality "vault_raft_removed_rejoin_after_deletion" { + description = "A removed raft node can rejoin a cluster if it has deleted its old vault/raft data" +} + +quality "vault_replication_ce_disabled" { + description = "Replication is not enabled for CE editions" +} + +quality "vault_replication_ent_dr_available" { + description = "DR replication is available on Enterprise" +} + +quality "vault_replication_ent_pr_available" { + description = "PR replication is available on Enterprise" +} + +quality "vault_seal_awskms" { + description = "Vault auto-unseals with the awskms seal" +} + +quality "vault_seal_shamir" { + description = <<-EOF + Vault manually unseals with the shamir seal when given the expected number of 'key_shares' + EOF +} + +quality "vault_seal_pkcs11" { + description = "Vault auto-unseals with the pkcs11 seal" +} + +quality "vault_secrets_kv_read" { + description = "Vault kv secrets engine data is readable" +} + +quality "vault_secrets_kv_write" { + description = "Vault kv secrets engine data is writable" +} + + +quality "vault_secrets_ldap_write_config" { + description = "The Vault LDAP secrets engine is configured with the correct settings" +} + +quality "vault_service_restart" { + description = "Vault restarts with existing configuration" +} + +quality "vault_service_start" { + description = "Vault starts with the configuration" +} + +quality "vault_service_systemd_notified" { + description = "The Vault binary notifies systemd when the service is active" +} + +quality "vault_service_systemd_unit" { + description = "The 'vault.service' systemd unit starts the service" +} + +quality "vault_status_seal_type" { + description = global.description.verify_seal_type +} + +quality "vault_storage_backend_consul" { + description = "Vault operates using Consul for storage" +} + +quality "vault_storage_backend_raft" { + description = "Vault operates using integrated Raft storage" +} + +quality "vault_ui_assets" { + description = global.description.verify_ui +} + +quality "vault_ui_test" { + description = <<-EOF + The Vault Web UI test suite runs against a live Vault server with the embedded static assets + EOF +} + +quality "vault_unseal_ha_leader_election" { + description = "Vault performs a leader election after it is unsealed" +} + +quality "vault_version_build_date" { + description = "Vault's reported build date matches our expectations" +} + +quality "vault_version_edition" { + description = "Vault's reported edition matches our expectations" +} + +quality "vault_version_release" { + description = "Vault's reported release version matches our expectations" +} + +quality "vault_billing_start_date" { + description = "Vault's billing start date has adjusted to the latest billing year" +} diff --git a/enos/enos-terraform.hcl b/enos/enos-terraform.hcl new file mode 100644 index 0000000..a8f82f9 --- /dev/null +++ b/enos/enos-terraform.hcl @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +terraform_cli "default" { + plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null +} + +terraform_cli "dev" { + plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null + + provider_installation { + dev_overrides = { + "registry.terraform.io/hashicorp-forge/enos" = try(abspath("../../terraform-provider-enos/dist"), null) + } + direct {} + } +} + +terraform "default" { + required_version = ">= 1.2.0" + + required_providers { + aws = { + source = "hashicorp/aws" + } + + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" + } + } +} diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl new file mode 100644 index 0000000..169e9f1 --- /dev/null +++ b/enos/enos-variables.hcl @@ -0,0 +1,219 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +variable "artifactory_username" { + type = string + description = "The username to use when testing an artifact from artifactory" + default = null + sensitive = true +} + +variable "artifactory_token" { + type = string + description = "The token to use when authenticating to artifactory" + default = null + sensitive = true +} + +variable "artifactory_host" { + type = string + description = "The artifactory host to search for vault artifacts" + default = "https://artifactory.hashicorp.engineering/artifactory" +} + +variable "artifactory_repo" { + type = string + description = "The artifactory repo to search for vault artifacts" + default = "hashicorp-crt-stable-local*" +} + +variable "aws_region" { + description = "The AWS region where we'll create infrastructure" + type = string + default = "us-east-1" +} + +variable "aws_ssh_keypair_name" { + description = "The AWS keypair to use for SSH" + type = string + default = "enos-ci-ssh-key" +} + +variable "aws_ssh_private_key_path" { + description = "The path to the AWS keypair private key" + type = string + default = "./support/private_key.pem" +} + +variable "backend_edition" { + description = "The backend release edition if applicable" + type = string + default = "ce" // or "ent" +} + +variable "backend_instance_type" { + description = "The instance type to use for the Vault backend. Must be arm64/nitro compatible" + type = string + default = "t4g.small" +} + +variable "backend_license_path" { + description = "The license for the backend if applicable (Consul Enterprise)" + type = string + default = null +} + +variable "backend_log_level" { + description = "The server log level for the backend. Supported values include 'trace', 'debug', 'info', 'warn', 'error'" + type = string + default = "trace" +} + +variable "project_name" { + description = "The description of the project" + type = string + default = "vault-enos-integration" +} + +variable "distro_version_amzn" { + description = "The version of Amazon Linux 2 to use" + type = string + default = "2023" // or "2", though pkcs11 has not been tested with 2 +} + +variable "distro_version_leap" { + description = "The version of openSUSE leap to use" + type = string + default = "15.6" +} + +variable "distro_version_rhel" { + description = "The version of RHEL to use" + type = string + default = "9.5" // or "8.10" +} + +variable "distro_version_sles" { + description = "The version of SUSE SLES to use" + type = string + default = "15.6" +} + +variable "distro_version_ubuntu" { + description = "The version of ubuntu to use" + type = string + default = "24.04" // or "20.04", "22.04" +} + +variable "tags" { + description = "Tags that will be applied to infrastructure resources that support tagging" + type = map(string) + default = null +} + +variable "terraform_plugin_cache_dir" { + description = "The directory to cache Terraform modules and providers" + type = string + default = null +} + +variable "ui_test_filter" { + type = string + description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"\"'" + default = null +} + +variable "ui_run_tests" { + type = bool + description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run" + default = true +} + +variable "vault_artifact_type" { + description = "The type of Vault artifact to use when installing Vault from artifactory. It should be 'package' for .deb or .rpm package and 'bundle' for .zip bundles" + default = "bundle" +} + +variable "vault_artifact_path" { + description = "Path to CRT generated or local vault.zip bundle" + type = string + default = "/tmp/vault.zip" +} + +variable "vault_build_date" { + description = "The build date for Vault artifact" + type = string + default = "" +} + +variable "vault_enable_audit_devices" { + description = "If true every audit device will be enabled" + type = bool + default = true +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" + default = "/opt/vault/bin" +} + +variable "vault_instance_count" { + description = "How many instances to create for the Vault cluster" + type = number + default = 3 +} + +variable "vault_license_path" { + description = "The path to a valid Vault enterprise edition license. This is only required for non-ce editions" + type = string + default = null +} + +variable "vault_local_build_tags" { + description = "The build tags to pass to the Go compiler for builder:local variants" + type = list(string) + default = null +} + +variable "vault_log_level" { + description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err." + type = string + default = "trace" +} + +variable "vault_product_version" { + description = "The version of Vault we are testing" + type = string + default = null +} + +variable "vault_radar_license_path" { + description = "The license for vault-radar which is used to verify the audit log" + type = string + default = null +} + +variable "vault_revision" { + description = "The git sha of Vault artifact we are testing" + type = string + default = null +} + +variable "vault_upgrade_initial_version" { + description = "The Vault release to deploy before upgrading" + type = string + default = "1.13.13" +} + +variable "verify_aws_secrets_engine" { + description = "If true we'll verify AWS secrets engines behavior. Because of user creation restrictions in Doormat AWS accounts, only turn this on for CI, as it depends on resources that exist only in those accounts" + type = bool + default = false +} + +variable "verify_log_secrets" { + description = "If true and var.vault_enable_audit_devices is true we'll verify that the audit log does not contain unencrypted secrets. Requires var.vault_radar_license_path to be set to a valid license file." + type = bool + default = false +} diff --git a/enos/modules/artifact/metadata/main.tf b/enos/modules/artifact/metadata/main.tf new file mode 100644 index 0000000..111ed3a --- /dev/null +++ b/enos/modules/artifact/metadata/main.tf @@ -0,0 +1,229 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +// Given the architecture, distro, version, edition, and desired package type, +// return the metadata for an artifact. + +variable "arch" { + description = "The artifact platform architecture" + type = string + + validation { + condition = contains(["amd64", "arm64", "s390x"], var.arch) + error_message = <<-EOF + distro must be one of "amd64", "arm64", "s390x" + EOF + } +} + +variable "distro" { + description = "The target operating system distro" + type = string + + validation { + condition = contains(["amzn", "leap", "rhel", "sles", "ubuntu"], var.distro) + error_message = <<-EOF + distro must be one of "amzn", "leap", "rhel", "sles", "ubuntu" + EOF + } +} + +variable "edition" { + description = "The Vault edition. E.g. ent or ent.hsm.fips1403" + type = string + + validation { + condition = contains(["oss", "ce", "ent", "ent.fips1402", "ent.fips1403", "ent.hsm", "ent.hsm.fips1402", "ent.hsm.fips1403"], var.edition) + error_message = <<-EOF + edition must be one of "oss", "ce", "ent", "ent.fips1402", "ent.fips1403", "ent.hsm", "ent.hsm.fips1402", "ent.hsm.fips1403" + EOF + } +} + +variable "package_type" { + description = "The artifact packaging type" + type = string + + validation { + condition = contains(["package", "rpm", "deb", "zip", "bundle"], var.package_type) + error_message = <<-EOF + package_type must be one of "package", "rpm", "deb", "zip", "bundle" + EOF + } +} + +variable "vault_version" { + description = "The version of Vault or Vault Enterprise. E.g 1.18.2, 1.19.0-rc1, 1.18.5+ent.hsm" + type = string +} + +locals { + package_extension_amd64_deb = "-1_amd64.deb" + package_extension_amd64_rpm = "-1.x86_64.rpm" + package_extension_arm64_deb = "-1_arm64.deb" + package_extension_arm64_rpm = "-1.aarch64.rpm" + package_extension_s390x_deb = "-1_s390x.deb" + package_extension_s390x_rpm = "-1.s390x.rpm" + + // file name extensions for the install packages of vault for the various architectures, distributions and editions + package_extensions = { + amd64 = { + amzn = local.package_extension_amd64_rpm + leap = local.package_extension_amd64_rpm + rhel = local.package_extension_amd64_rpm + sles = local.package_extension_amd64_rpm + ubuntu = local.package_extension_amd64_deb + } + arm64 = { + amzn = local.package_extension_arm64_rpm + leap = local.package_extension_arm64_rpm + rhel = local.package_extension_arm64_rpm + sles = local.package_extension_arm64_rpm + ubuntu = local.package_extension_arm64_deb + } + s390x = { + amzn = null + leap = local.package_extension_s390x_rpm + rhel = local.package_extension_s390x_rpm + sles = local.package_extension_s390x_rpm + ubuntu = local.package_extension_s390x_deb + } + } + + package_prefixes_rpm = { + "ce" = "vault-" + "ent" = "vault-enterprise-", + "ent.fips1402" = "vault-enterprise-fips1402-", + "ent.fips1403" = "vault-enterprise-fips1403-", + "ent.hsm" = "vault-enterprise-hsm-", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402-", + "ent.hsm.fips1403" = "vault-enterprise-hsm-fips1403-", + "oss" = "vault-" + } + + package_prefixes_deb = { + "ce" = "vault_" + "ent" = "vault-enterprise_", + "ent.fips1402" = "vault-enterprise-fips1402_", + "ent.fips1403" = "vault-enterprise-fips1403_", + "ent.hsm" = "vault-enterprise-hsm_", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402_", + "ent.hsm.fips1403" = "vault-enterprise-hsm-fips1403_", + "oss" = "vault_" + } + + // file name prefixes for the install packages of vault for the various distributions and artifact types (package or bundle) + package_prefixes = { + amzn = local.package_prefixes_rpm, + leap = local.package_prefixes_rpm, + rhel = local.package_prefixes_rpm, + sles = local.package_prefixes_rpm, + ubuntu = local.package_prefixes_deb, + } + + // Stable release Artifactory repos for packages + release_repo_rpm = "hashicorp-rpm-release-local*" + release_repo_apt = "hashicorp-apt-release-local*" + release_repos = { + amzn = local.release_repo_rpm + leap = local.release_repo_rpm + rhel = local.release_repo_rpm + sles = local.release_repo_rpm + ubuntu = local.release_repo_apt + } + release_repo = local.release_repos[var.distro] + + // Stable release Artifactory paths for packages + release_package_rpm_arch = { + "amd64" = "x86_64", + "arm64" = "aarch64", + "s390x" = "s390x", + } + release_path_deb = "pool/${var.arch}/main" + release_sub_path_rpm = "${local.release_package_rpm_arch[var.arch]}/stable" + release_path_distro = { + amzn = { + "2" = "AmazonLinux/2/${local.release_sub_path_rpm}" + "2023" = "AmazonLinux/latest/${local.release_sub_path_rpm}" + "latest" = "AmazonLinux/latest/${local.release_sub_path_rpm}" + } + leap = { + "15.6" = "RHEL/9/${local.release_sub_path_rpm}" + } + rhel = { + "8.10" = "RHEL/8/${local.release_sub_path_rpm}" + "9.5" = "RHEL/9/${local.release_sub_path_rpm}" + } + sles = { + "15.6" = "RHEL/9/${local.release_sub_path_rpm}" + } + ubuntu = { + "20.04" = local.release_path_deb, + "22.04" = local.release_path_deb, + "24.04" = local.release_path_deb, + } + } + release_paths = local.release_path_distro[var.distro] + + // Reduce our supported inputs into two classes: system packages or a binary bundled into a zip archive. + package_type = contains(["package", "deb", "rpm"], var.package_type) ? "package" : "bundle" + + // Get the base version. This might still include pre-release metadata + // E.g. 1.18.2 => 1.18.2, 1.18.0-rc1 => 1.18.0-rc1, 1.18.0+ent.hsm => 1.18.0 + semverish_version = try(split("+", var.vault_version)[0], var.vault_version) + + // Determine the "product name". This corresponds properties on the artifactory artifact. + product_name = strcontains(var.edition, "ent") ? "vault-enterprise" : "vault" + + // Create the "product version", which is corresponds to properties on the artifactory artifact. + // It's the version along with edition metadata. We normalize all enterprise editions to .ent. + // E.g. 1.16.0-beta1+ent.hsm.fips1403 -> 1.16.0-beta+ent + product_version = strcontains(var.edition, "ent") ? "${local.semverish_version}+ent" : local.semverish_version + + // Convert product version strings to a syntax that matches deb and rpm packaging. + // E.g. 1.16.0-beta+ent -> 1.16.0~beta+ent + package_version = replace(local.product_version, "-", "~") + + // Get the bundle version. If the vault_version includes metadata, use it. Otherwise add the edition to it. + bundle_version = strcontains(var.vault_version, "+") ? var.vault_version : strcontains(var.edition, "ent") ? "${var.vault_version}+${var.edition}" : var.vault_version + + // Prefix for the artifact name. E.g.: vault_, vault-, vault-enterprise_, vault-enterprise-hsm-fips1402-, etc + artifact_name_prefix = local.package_type == "package" ? local.package_prefixes[var.distro][var.edition] : "vault_" + + // The version for the artifact name. + artifact_version = local.package_type == "package" ? local.package_version : local.bundle_version + + // Suffix and extension for the artifact name. E.g.: _linux_.zip, + artifact_name_extension = local.package_type == "package" ? local.package_extensions[var.arch][var.distro] : "_linux_${var.arch}.zip" + + // Combine prefix/suffix/extension together to form the artifact name + artifact_name = "${local.artifact_name_prefix}${local.artifact_version}${local.artifact_name_extension}" + +} +output "artifact_name" { + value = local.artifact_name +} + +output "package_type" { + value = local.package_type +} + +output "package_version" { + value = local.package_version +} + +output "product_name" { + value = local.product_name +} + +output "product_version" { + value = local.product_version +} + +output "release_repo" { + value = local.release_repo +} + +output "release_paths" { + value = local.release_paths +} diff --git a/enos/modules/autopilot_upgrade_storageconfig/main.tf b/enos/modules/autopilot_upgrade_storageconfig/main.tf new file mode 100644 index 0000000..3fcb77a --- /dev/null +++ b/enos/modules/autopilot_upgrade_storageconfig/main.tf @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "vault_product_version" {} + +output "storage_addl_config" { + value = { + autopilot_upgrade_version = var.vault_product_version + } +} diff --git a/enos/modules/backend_consul/main.tf b/enos/modules/backend_consul/main.tf new file mode 100644 index 0000000..1d0a514 --- /dev/null +++ b/enos/modules/backend_consul/main.tf @@ -0,0 +1,56 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_version = ">= 1.2.0" + + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.4" + } + } +} + +locals { + bin_path = "${var.install_dir}/consul" +} + +resource "enos_bundle_install" "consul" { + for_each = var.hosts + + destination = var.install_dir + release = merge(var.release, { product = "consul" }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_consul_start" "consul" { + for_each = enos_bundle_install.consul + + bin_path = local.bin_path + data_dir = var.data_dir + config_dir = var.config_dir + config = { + data_dir = var.data_dir + datacenter = "dc1" + retry_join = ["provider=aws tag_key=${var.cluster_tag_key} tag_value=${var.cluster_name}"] + server = true + bootstrap_expect = length(var.hosts) + log_level = var.log_level + log_file = var.log_dir + } + license = var.license + unit_name = "consul" + username = "consul" + + transport = { + ssh = { + host = var.hosts[each.key].public_ip + } + } +} diff --git a/enos/modules/backend_consul/outputs.tf b/enos/modules/backend_consul/outputs.tf new file mode 100644 index 0000000..5f78e3f --- /dev/null +++ b/enos/modules/backend_consul/outputs.tf @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "private_ips" { + description = "Consul cluster target host private_ips" + value = [for host in var.hosts : host.private_ip] +} + +output "public_ips" { + description = "Consul cluster target host public_ips" + value = [for host in var.hosts : host.public_ip] +} + +output "hosts" { + description = "The Consul cluster instances that were created" + + value = var.hosts +} diff --git a/enos/modules/backend_consul/variables.tf b/enos/modules/backend_consul/variables.tf new file mode 100644 index 0000000..c404c0f --- /dev/null +++ b/enos/modules/backend_consul/variables.tf @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "cluster_name" { + type = string + description = "The name of the Consul cluster" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The tag key for searching for Consul nodes" + default = null +} + +variable "config_dir" { + type = string + description = "The directory where the consul will write config files" + default = "/etc/consul.d" +} + +variable "data_dir" { + type = string + description = "The directory where the consul will store data" + default = "/opt/consul/data" +} + +variable "hosts" { + description = "The target machines host addresses to use for the consul cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "install_dir" { + type = string + description = "The directory where the consul binary will be installed" + default = "/opt/consul/bin" +} + +variable "license" { + type = string + sensitive = true + description = "The consul enterprise license" + default = null +} + +variable "log_dir" { + type = string + description = "The directory where the consul will write log files" + default = "/var/log/consul.d" +} + +variable "log_level" { + type = string + description = "The consul service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) + error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "release" { + type = object({ + version = string + edition = string + }) + description = "Consul release version and edition to install from releases.hashicorp.com" + default = { + version = "1.15.3" + edition = "ce" + } +} diff --git a/enos/modules/backend_raft/main.tf b/enos/modules/backend_raft/main.tf new file mode 100644 index 0000000..415b058 --- /dev/null +++ b/enos/modules/backend_raft/main.tf @@ -0,0 +1,70 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +// Shim module to handle the fact that Vault doesn't actually need a backend module when we use raft. +terraform { + required_version = ">= 1.2.0" + + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" + } + } +} + +variable "cluster_name" { + default = null +} + +variable "cluster_tag_key" { + default = null +} + +variable "config_dir" { + default = null +} + +variable "consul_log_level" { + default = null +} + +variable "data_dir" { + default = null +} + +variable "install_dir" { + default = null +} + +variable "license" { + default = null +} + +variable "log_dir" { + default = null +} + +variable "log_level" { + default = null +} + +variable "release" { + default = null +} + +variable "hosts" { + default = null +} + +output "private_ips" { + value = [for host in var.hosts : host.private_ip] +} + +output "public_ips" { + value = [for host in var.hosts : host.public_ip] +} + +output "hosts" { + value = var.hosts +} diff --git a/enos/modules/build_artifactory_artifact/main.tf b/enos/modules/build_artifactory_artifact/main.tf new file mode 100644 index 0000000..97d4e04 --- /dev/null +++ b/enos/modules/build_artifactory_artifact/main.tf @@ -0,0 +1,101 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.2.3" + } + } +} + +variable "artifactory_username" { + type = string + description = "The username to use when connecting to artifactory" + default = null +} + +variable "artifactory_token" { + type = string + description = "The token to use when connecting to artifactory" + default = null + sensitive = true +} + +variable "artifactory_host" { + type = string + description = "The artifactory host to search for vault artifacts" + default = "https://artifactory.hashicorp.engineering/artifactory" +} + +variable "artifactory_repo" { + type = string + description = "The artifactory repo to search for vault artifacts" + default = "hashicorp-crt-stable-local*" +} + +variable "arch" {} +variable "artifact_type" {} +variable "artifact_path" {} +variable "distro" {} +variable "edition" {} +variable "revision" {} +variable "product_version" {} +variable "build_tags" { default = null } +variable "bundle_path" { default = null } +variable "goarch" { default = null } +variable "goos" { default = null } + +module "artifact_metadata" { + source = "../artifact/metadata" + + arch = var.arch + distro = var.distro + edition = var.edition + package_type = var.artifact_type + vault_version = var.product_version +} + +data "enos_artifactory_item" "vault" { + username = var.artifactory_username + token = var.artifactory_token + name = module.artifact_metadata.artifact_name + host = var.artifactory_host + repo = var.artifactory_repo + path = "${module.artifact_metadata.product_name}/*" + properties = tomap({ + "commit" = var.revision, + "product-name" = module.artifact_metadata.product_name, + "product-version" = module.artifact_metadata.product_version, + }) +} + +output "url" { + value = data.enos_artifactory_item.vault.results[0].url + description = "The artifactory download url for the artifact" +} + +output "sha256" { + value = data.enos_artifactory_item.vault.results[0].sha256 + description = "The sha256 checksum for the artifact" +} + +output "size" { + value = data.enos_artifactory_item.vault.results[0].size + description = "The size in bytes of the artifact" +} + +output "name" { + value = data.enos_artifactory_item.vault.results[0].name + description = "The name of the artifact" +} + +output "vault_artifactory_release" { + value = { + url = data.enos_artifactory_item.vault.results[0].url + sha256 = data.enos_artifactory_item.vault.results[0].sha256 + username = var.artifactory_username + token = var.artifactory_token + } +} diff --git a/enos/modules/build_artifactory_package/main.tf b/enos/modules/build_artifactory_package/main.tf new file mode 100644 index 0000000..2444b1e --- /dev/null +++ b/enos/modules/build_artifactory_package/main.tf @@ -0,0 +1,115 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "arch" { + type = string + description = "The architecture for the desired artifact" +} + +variable "artifactory_username" { + type = string + description = "The username to use when connecting to Artifactory" +} + +variable "artifactory_token" { + type = string + description = "The token to use when connecting to Artifactory" + sensitive = true +} + +variable "artifactory_host" { + type = string + description = "The Artifactory host to search for Vault artifacts" + default = "https://artifactory.hashicorp.engineering/artifactory" +} + +variable "distro" { + type = string + description = "The distro for the desired artifact (ubuntu or rhel)" +} + +variable "distro_version" { + type = string + description = "The RHEL version for .rpm packages" +} + +variable "edition" { + type = string + description = "The edition of Vault to use" +} + +variable "product_version" { + type = string + description = "The version of Vault to use" +} + +// Shim variables that we don't use but include to satisfy the build module "interface" +variable "artifact_path" { default = null } +variable "artifact_type" { default = null } +variable "artifactory_repo" { default = null } +variable "build_tags" { default = null } +variable "build_ui" { default = null } +variable "bundle_path" { default = null } +variable "goarch" { default = null } +variable "goos" { default = null } +variable "revision" { default = null } + +module "artifact_metadata" { + source = "../artifact/metadata" + + arch = var.arch + distro = var.distro + edition = var.edition + package_type = var.artifact_type != null ? var.artifact_type : "package" + vault_version = var.product_version +} + +data "enos_artifactory_item" "vault" { + username = var.artifactory_username + token = var.artifactory_token + name = module.artifact_metadata.artifact_name + host = var.artifactory_host + repo = module.artifact_metadata.release_repo + path = module.artifact_metadata.release_paths[var.distro_version] +} + +output "results" { + value = data.enos_artifactory_item.vault.results +} + +output "url" { + value = data.enos_artifactory_item.vault.results[0].url + description = "The artifactory download url for the artifact" +} + +output "sha256" { + value = data.enos_artifactory_item.vault.results[0].sha256 + description = "The sha256 checksum for the artifact" +} + +output "size" { + value = data.enos_artifactory_item.vault.results[0].size + description = "The size in bytes of the artifact" +} + +output "name" { + value = data.enos_artifactory_item.vault.results[0].name + description = "The name of the artifact" +} + +output "release" { + value = { + url = data.enos_artifactory_item.vault.results[0].url + sha256 = data.enos_artifactory_item.vault.results[0].sha256 + username = var.artifactory_username + token = var.artifactory_token + } +} diff --git a/enos/modules/build_crt/main.tf b/enos/modules/build_crt/main.tf new file mode 100644 index 0000000..d113c9c --- /dev/null +++ b/enos/modules/build_crt/main.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Shim module since CRT provided things will use the crt_bundle_path variable +variable "bundle_path" { + default = "/tmp/vault.zip" +} + +variable "build_tags" { + default = ["ui"] +} + +variable "goarch" { + type = string + description = "The Go architecture target" + default = "amd64" +} + +variable "goos" { + type = string + description = "The Go OS target" + default = "linux" +} + +variable "artifactory_host" { default = null } +variable "artifactory_repo" { default = null } +variable "artifactory_username" { default = null } +variable "artifactory_token" { default = null } +variable "arch" { default = null } +variable "artifact_path" { default = null } +variable "artifact_type" { default = null } +variable "build_ui" { default = null } +variable "distro" { default = null } +variable "distro_version" { default = null } +variable "edition" { default = null } +variable "revision" { default = null } +variable "product_version" { default = null } diff --git a/enos/modules/build_local/main.tf b/enos/modules/build_local/main.tf new file mode 100644 index 0000000..1ad1338 --- /dev/null +++ b/enos/modules/build_local/main.tf @@ -0,0 +1,69 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "artifact_path" { + description = "Where to create the zip bundle of the Vault build" +} + +variable "build_tags" { + type = list(string) + description = "The build tags to pass to the Go compiler" +} + +variable "build_ui" { + type = bool + description = "Whether or not we should build the UI when creating the local build" + default = true +} + +variable "goarch" { + type = string + description = "The Go architecture target" + default = "amd64" +} + +variable "goos" { + type = string + description = "The Go OS target" + default = "linux" +} + +variable "artifactory_host" { default = null } +variable "artifactory_repo" { default = null } +variable "artifactory_username" { default = null } +variable "artifactory_token" { default = null } +variable "arch" { default = null } +variable "artifact_type" { default = null } +variable "distro" { default = null } +variable "distro_version" { default = null } +variable "edition" { default = null } +variable "revision" { default = null } +variable "product_version" { default = null } + +module "local_metadata" { + source = "../get_local_metadata" +} + +resource "enos_local_exec" "build" { + scripts = [abspath("${path.module}/scripts/build.sh")] + + environment = { + BASE_VERSION = module.local_metadata.version_base + BIN_PATH = abspath("${path.module}/../../../dist") + BUILD_UI = tostring(var.build_ui) + BUNDLE_PATH = abspath(var.artifact_path) + GO_TAGS = join(" ", var.build_tags) + GOARCH = var.goarch + GOOS = var.goos + PRERELEASE_VERSION = module.local_metadata.version_pre + VERSION_METADATA = module.local_metadata.version_meta + } +} diff --git a/enos/modules/build_local/scripts/build.sh b/enos/modules/build_local/scripts/build.sh new file mode 100755 index 0000000..b7b0950 --- /dev/null +++ b/enos/modules/build_local/scripts/build.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -eux -o pipefail + +# Install yarn so we can build the UI +npm install --global yarn || true + +export CGO_ENABLED=0 + +root_dir="$(git rev-parse --show-toplevel)" +pushd "$root_dir" > /dev/null + +if [ -n "$BUILD_UI" ] && [ "$BUILD_UI" = "true" ]; then + make ci-build-ui +fi + +make ci-build + +popd > /dev/null + +echo "--> Bundling $BIN_PATH/* to $BUNDLE_PATH" +zip -r -j "$BUNDLE_PATH" "$BIN_PATH/" diff --git a/enos/modules/choose_follower_host/main.tf b/enos/modules/choose_follower_host/main.tf new file mode 100644 index 0000000..881d5ca --- /dev/null +++ b/enos/modules/choose_follower_host/main.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "followers" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault follower hosts" +} + +output "chosen_follower" { + value = { + 0 : try(var.followers[0], null) + } +} diff --git a/enos/modules/create_vpc/main.tf b/enos/modules/create_vpc/main.tf new file mode 100644 index 0000000..55cbf01 --- /dev/null +++ b/enos/modules/create_vpc/main.tf @@ -0,0 +1,114 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-name" + values = ["*"] + } +} + +resource "random_string" "cluster_id" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "aws_vpc" "vpc" { + // Always set the ipv4 cidr block as it's required in "dual-stack" VPCs which we create. + cidr_block = var.ipv4_cidr + enable_dns_hostnames = true + enable_dns_support = true + assign_generated_ipv6_cidr_block = var.ip_version == 6 + + tags = merge( + var.common_tags, + { + "Name" = var.name + }, + ) +} + +resource "aws_subnet" "subnet" { + count = length(data.aws_availability_zones.available.names) + vpc_id = aws_vpc.vpc.id + availability_zone = data.aws_availability_zones.available.names[count.index] + + // IPV4, but since we need to support ipv4 connections from the machine running enos, we're + // always going to need ipv4 available. + map_public_ip_on_launch = true + cidr_block = cidrsubnet(var.ipv4_cidr, 8, count.index) + + // IPV6, only set these when we want to run in ipv6 mode. + assign_ipv6_address_on_creation = var.ip_version == 6 + ipv6_cidr_block = var.ip_version == 6 ? cidrsubnet(aws_vpc.vpc.ipv6_cidr_block, 4, count.index) : null + + tags = merge( + var.common_tags, + { + "Name" = "${var.name}-subnet-${data.aws_availability_zones.available.names[count.index]}" + }, + ) +} + +resource "aws_internet_gateway" "ipv4" { + vpc_id = aws_vpc.vpc.id + + tags = merge( + var.common_tags, + { + "Name" = "${var.name}-igw" + }, + ) +} + +resource "aws_egress_only_internet_gateway" "ipv6" { + count = var.ip_version == 6 ? 1 : 0 + vpc_id = aws_vpc.vpc.id +} + +resource "aws_route" "igw_ipv4" { + route_table_id = aws_vpc.vpc.default_route_table_id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.ipv4.id +} + +resource "aws_route" "igw_ipv6" { + count = var.ip_version == 6 ? 1 : 0 + route_table_id = aws_vpc.vpc.default_route_table_id + destination_ipv6_cidr_block = "::/0" + egress_only_gateway_id = aws_egress_only_internet_gateway.ipv6[0].id +} + +resource "aws_security_group" "default" { + vpc_id = aws_vpc.vpc.id + + ingress { + description = "allow_ingress_from_all" + from_port = 0 + to_port = 0 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = var.ip_version == 6 ? ["::/0"] : null + } + + egress { + description = "allow_egress_from_all" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = var.ip_version == 6 ? ["::/0"] : null + } + + tags = merge( + var.common_tags, + { + "Name" = "${var.name}-default" + }, + ) +} diff --git a/enos/modules/create_vpc/outputs.tf b/enos/modules/create_vpc/outputs.tf new file mode 100644 index 0000000..d54fbd8 --- /dev/null +++ b/enos/modules/create_vpc/outputs.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "id" { + description = "Created VPC ID" + value = aws_vpc.vpc.id +} + +output "ipv4_cidr" { + description = "The VPC subnet CIDR for ipv4 mode" + value = var.ipv4_cidr +} + +output "ipv6_cidr" { + description = "The VPC subnet CIDR for ipv6 mode" + value = aws_vpc.vpc.ipv6_cidr_block +} + +output "cluster_id" { + description = "A unique string associated with the VPC" + value = random_string.cluster_id.result +} diff --git a/enos/modules/create_vpc/variables.tf b/enos/modules/create_vpc/variables.tf new file mode 100644 index 0000000..80c64ea --- /dev/null +++ b/enos/modules/create_vpc/variables.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "name" { + type = string + default = "vault-ci" + description = "The name of the VPC" +} + +variable "ip_version" { + type = number + default = 4 + description = "The IP version to use for the default subnet" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "ipv4_cidr" { + type = string + default = "10.13.0.0/16" + description = "The CIDR block for the VPC when using IPV4 mode" +} + +variable "environment" { + description = "Name of the environment." + type = string + default = "vault-ci" +} + +variable "common_tags" { + description = "Tags to set for all resources" + type = map(string) + default = { "Project" : "vault-ci" } +} diff --git a/enos/modules/disable_selinux/main.tf b/enos/modules/disable_selinux/main.tf new file mode 100644 index 0000000..7ed2f52 --- /dev/null +++ b/enos/modules/disable_selinux/main.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The hosts to install packages on" +} + +resource "enos_remote_exec" "make_selinux_permissive" { + for_each = var.hosts + + scripts = [abspath("${path.module}/scripts/make-selinux-permissive.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/disable_selinux/scripts/make-selinux-permissive.sh b/enos/modules/disable_selinux/scripts/make-selinux-permissive.sh new file mode 100644 index 0000000..cedc23d --- /dev/null +++ b/enos/modules/disable_selinux/scripts/make-selinux-permissive.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +if ! type getenforce &> /dev/null; then + exit 0 +fi + +if sudo getenforce | grep Enforcing; then + sudo setenforce 0 +fi diff --git a/enos/modules/ec2_info/main.tf b/enos/modules/ec2_info/main.tf new file mode 100644 index 0000000..12fecf0 --- /dev/null +++ b/enos/modules/ec2_info/main.tf @@ -0,0 +1,264 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Note: in order to use the openSUSE Leap AMIs, the AWS account in use must "subscribe" +# and accept SUSE's terms of use. You can do this at the links below. If the AWS account +# you are using is already subscribed, this confirmation will be displayed on each page. +# openSUSE Leap arm64 subscription: https://aws.amazon.com/marketplace/server/procurement?productId=a516e959-df54-4035-bb1a-63599b7a6df9 +# openSUSE Leap amd64 subscription: https://aws.amazon.com/marketplace/server/procurement?productId=5535c495-72d4-4355-b169-54ffa874f849 + +locals { + architectures = toset(["arm64", "x86_64"]) + amazon_owner_id = "591542846629" + canonical_owner_id = "099720109477" + suse_owner_id = "013907871322" + opensuse_owner_id = "679593333241" + redhat_owner_id = "309956199498" + ids = { + // NOTE: If you modify these versions you'll probably also need to update the `softhsm_install` + // module to match. + "arm64" = { + "amzn" = { + "2" = data.aws_ami.amzn_2["arm64"].id + "2023" = data.aws_ami.amzn_2023["arm64"].id + } + "leap" = { + "15.6" = data.aws_ami.leap_15["arm64"].id + } + "rhel" = { + "8.10" = data.aws_ami.rhel_8["arm64"].id + "9.5" = data.aws_ami.rhel_9["arm64"].id + } + "sles" = { + "15.6" = data.aws_ami.sles_15["arm64"].id + } + "ubuntu" = { + "20.04" = data.aws_ami.ubuntu_2004["arm64"].id + "22.04" = data.aws_ami.ubuntu_2204["arm64"].id + "24.04" = data.aws_ami.ubuntu_2404["arm64"].id + } + } + "amd64" = { + "amzn" = { + "2" = data.aws_ami.amzn_2["x86_64"].id + "2023" = data.aws_ami.amzn_2023["x86_64"].id + } + "leap" = { + "15.6" = data.aws_ami.leap_15["x86_64"].id + } + "rhel" = { + "8.10" = data.aws_ami.rhel_8["x86_64"].id + "9.5" = data.aws_ami.rhel_9["x86_64"].id + } + "sles" = { + "15.6" = data.aws_ami.sles_15["x86_64"].id + } + "ubuntu" = { + "20.04" = data.aws_ami.ubuntu_2004["x86_64"].id + "22.04" = data.aws_ami.ubuntu_2204["x86_64"].id + "24.04" = data.aws_ami.ubuntu_2404["x86_64"].id + } + } + } +} + +data "aws_ami" "amzn_2" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["amzn2-ami-ecs-hvm-2.0*"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.amazon_owner_id] +} + +data "aws_ami" "amzn_2023" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["al2023-ami-ecs-hvm*"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.amazon_owner_id] +} + +data "aws_ami" "leap_15" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["openSUSE-Leap-15-6*"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.opensuse_owner_id] +} + +data "aws_ami" "rhel_8" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["RHEL-8.10*HVM-20*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.redhat_owner_id] +} + +data "aws_ami" "rhel_9" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["RHEL-9.5*HVM-20*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.redhat_owner_id] +} + +data "aws_ami" "sles_15" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["suse-sles-15-sp6-v*-hvm-*"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.suse_owner_id] +} + +data "aws_ami" "ubuntu_2004" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-*-20.04-*-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.canonical_owner_id] +} + +data "aws_ami" "ubuntu_2204" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-*-22.04-*-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.canonical_owner_id] +} + +data "aws_ami" "ubuntu_2404" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-*-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.canonical_owner_id] +} + +data "aws_region" "current" {} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-name" + values = ["*"] + } +} + +output "ami_ids" { + value = local.ids +} + +output "current_region" { + value = data.aws_region.current +} + +output "availability_zones" { + value = data.aws_availability_zones.available +} diff --git a/enos/modules/generate_dr_operation_token/main.tf b/enos/modules/generate_dr_operation_token/main.tf new file mode 100644 index 0000000..c582c0c --- /dev/null +++ b/enos/modules/generate_dr_operation_token/main.tf @@ -0,0 +1,82 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "storage_backend" { + type = string + description = "The storage backend to use for the Vault cluster" +} + +locals { + token_id = random_uuid.token_id.id + dr_operation_token = enos_remote_exec.fetch_dr_operation_token.stdout +} + +resource "random_uuid" "token_id" {} + +resource "enos_remote_exec" "fetch_dr_operation_token" { + depends_on = [random_uuid.token_id] + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + STORAGE_BACKEND = var.storage_backend + } + + scripts = [abspath("${path.module}/scripts/configure-vault-dr-primary.sh")] + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} + +output "dr_operation_token" { + value = local.dr_operation_token +} diff --git a/enos/modules/generate_dr_operation_token/scripts/configure-vault-dr-primary.sh b/enos/modules/generate_dr_operation_token/scripts/configure-vault-dr-primary.sh new file mode 100755 index 0000000..eae9b10 --- /dev/null +++ b/enos/modules/generate_dr_operation_token/scripts/configure-vault-dr-primary.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +binpath="${VAULT_INSTALL_DIR}/vault" + +fail() { + echo "$1" >&2 + exit 1 +} + +# Check required environment variables +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$STORAGE_BACKEND" ]] && fail "STORAGE_BACKEND env variable has not been set" + +# Define the policy content +policy_content() { + cat << EOF +path "sys/replication/dr/secondary/promote" { + capabilities = [ "update" ] +} + +path "sys/replication/dr/secondary/update-primary" { + capabilities = [ "update" ] +} +EOF + if [ "$STORAGE_BACKEND" = "raft" ]; then + cat << EOF +path "sys/storage/raft/autopilot/state" { + capabilities = [ "update", "read" ] +} +EOF + fi +} + +# Write the policy +$binpath policy write dr-secondary-promotion - <<< "$(policy_content)" &> /dev/null + +# Configure the failover handler token role +$binpath write auth/token/roles/failover-handler \ + allowed_policies=dr-secondary-promotion \ + orphan=true \ + renewable=false \ + token_type=batch &> /dev/null + +# Create a token for the failover handler role and output the token only +$binpath token create -field=token -role=failover-handler -ttl=8h diff --git a/enos/modules/generate_failover_secondary_token/main.tf b/enos/modules/generate_failover_secondary_token/main.tf new file mode 100644 index 0000000..537b0af --- /dev/null +++ b/enos/modules/generate_failover_secondary_token/main.tf @@ -0,0 +1,98 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "retry_interval" { + type = string + default = "2" + description = "How long to wait between retries" +} + +variable "secondary_public_key" { + type = string + description = "The secondary public key" +} + +variable "timeout" { + type = string + default = "15" + description = "How many seconds to wait before timing out" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip + token_id = random_uuid.token_id.id + secondary_token = enos_remote_exec.fetch_secondary_token.stdout +} + +resource "random_uuid" "token_id" {} + +resource "enos_remote_exec" "fetch_secondary_token" { + depends_on = [random_uuid.token_id] + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + SECONDARY_PUBLIC_KEY = var.secondary_public_key + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/generate-failover-secondary-token.sh")] + + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} + +output "secondary_token" { + value = local.secondary_token +} diff --git a/enos/modules/generate_failover_secondary_token/scripts/generate-failover-secondary-token.sh b/enos/modules/generate_failover_secondary_token/scripts/generate-failover-secondary-token.sh new file mode 100644 index 0000000..05da4a4 --- /dev/null +++ b/enos/modules/generate_failover_secondary_token/scripts/generate-failover-secondary-token.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +## Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +[[ -z "${VAULT_INSTALL_DIR}" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "${RETRY_INTERVAL}" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "${TIMEOUT_SECONDS}" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "${VAULT_ADDR}" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "${VAULT_TOKEN}" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "${SECONDARY_PUBLIC_KEY}" ]] && fail "SECONDARY_PUBLIC_KEY env variable has not been set" + +fail() { + echo "$1" 1>&2 + exit 1 +} + +binpath="${VAULT_INSTALL_DIR}"/vault +test -x "${binpath}" || fail "unable to locate vault binary at ${binpath}" + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "${end_time}" ]; do + if secondary_token=$(${binpath} write -field token sys/replication/dr/primary/secondary-token id="${VAULT_TOKEN}" secondary_public_key="${SECONDARY_PUBLIC_KEY}"); then + echo "${secondary_token}" + exit 0 + fi + + sleep "${RETRY_INTERVAL}" +done + +fail "Timed out trying to generate secondary token" diff --git a/enos/modules/generate_secondary_public_key/main.tf b/enos/modules/generate_secondary_public_key/main.tf new file mode 100644 index 0000000..761972d --- /dev/null +++ b/enos/modules/generate_secondary_public_key/main.tf @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip + token_id = random_uuid.token_id.id + secondary_public_key = enos_remote_exec.fetch_secondary_public_key.stdout +} + +resource "random_uuid" "token_id" {} + +resource "enos_remote_exec" "fetch_secondary_public_key" { + depends_on = [random_uuid.token_id] + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write -field secondary_public_key -f sys/replication/dr/secondary/generate-public-key"] + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} + +output "secondary_public_key" { + value = local.secondary_public_key +} diff --git a/enos/modules/generate_secondary_token/main.tf b/enos/modules/generate_secondary_token/main.tf new file mode 100644 index 0000000..41b2774 --- /dev/null +++ b/enos/modules/generate_secondary_token/main.tf @@ -0,0 +1,86 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "replication_type" { + type = string + description = "The type of replication to perform" + + validation { + condition = contains(["dr", "performance"], var.replication_type) + error_message = "The replication_type must be either dr or performance" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip + token_id = random_uuid.token_id.id + secondary_token = enos_remote_exec.fetch_secondary_token.stdout +} + +resource "random_uuid" "token_id" {} + +resource "enos_remote_exec" "fetch_secondary_token" { + depends_on = [random_uuid.token_id] + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write sys/replication/${var.replication_type}/primary/secondary-token id=${local.token_id} |sed -n '/^wrapping_token:/p' |awk '{print $2}'"] + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} + +output "secondary_token" { + value = local.secondary_token +} diff --git a/enos/modules/get_local_metadata/main.tf b/enos/modules/get_local_metadata/main.tf new file mode 100644 index 0000000..2b1ee6d --- /dev/null +++ b/enos/modules/get_local_metadata/main.tf @@ -0,0 +1,58 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +resource "enos_local_exec" "get_build_date" { + scripts = [abspath("${path.module}/scripts/build_date.sh")] +} + +resource "enos_local_exec" "get_revision" { + inline = ["git rev-parse HEAD"] +} + +resource "enos_local_exec" "get_version" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version"] +} + +resource "enos_local_exec" "get_version_base" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version-base"] +} + +resource "enos_local_exec" "get_version_pre" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version-pre"] +} + +resource "enos_local_exec" "get_version_meta" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version-meta"] +} + +output "build_date" { + value = trimspace(enos_local_exec.get_build_date.stdout) +} + +output "revision" { + value = trimspace(enos_local_exec.get_revision.stdout) +} + +output "version" { + value = trimspace(enos_local_exec.get_version.stdout) +} + +output "version_base" { + value = trimspace(enos_local_exec.get_version_base.stdout) +} + +output "version_pre" { + value = trimspace(enos_local_exec.get_version_pre.stdout) +} + +output "version_meta" { + value = trimspace(enos_local_exec.get_version_meta.stdout) +} diff --git a/enos/modules/get_local_metadata/scripts/build_date.sh b/enos/modules/get_local_metadata/scripts/build_date.sh new file mode 100755 index 0000000..ea63c74 --- /dev/null +++ b/enos/modules/get_local_metadata/scripts/build_date.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -eu -o pipefail + +pushd "$(git rev-parse --show-toplevel)" > /dev/null +make ci-get-date +popd > /dev/null diff --git a/enos/modules/get_local_metadata/scripts/version.sh b/enos/modules/get_local_metadata/scripts/version.sh new file mode 100755 index 0000000..ed1238b --- /dev/null +++ b/enos/modules/get_local_metadata/scripts/version.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -euo pipefail + +# Get the full version information +# this is only needed for local enos builds in order to get the default version from version_base.go +# this should match the default version that the binary has been built with +# CRT release builds use the new static version from ./release/VERSION +function version() { + local version + local prerelease + local metadata + + version=$(version_base) + prerelease=$(version_pre) + metadata=$(version_metadata) + + if [ -n "$metadata" ] && [ -n "$prerelease" ]; then + echo "$version-$prerelease+$metadata" + elif [ -n "$metadata" ]; then + echo "$version+$metadata" + elif [ -n "$prerelease" ]; then + echo "$version-$prerelease" + else + echo "$version" + fi +} + +# Get the base version +function version_base() { + : "${VAULT_VERSION:=""}" + + if [ -n "$VAULT_VERSION" ]; then + echo "$VAULT_VERSION" + return + fi + + : "${VERSION_FILE:=$(repo_root)/version/VERSION}" + awk -F- '{ print $1 }' < "$VERSION_FILE" +} + +# Get the version pre-release +function version_pre() { + : "${VAULT_PRERELEASE:=""}" + + if [ -n "$VAULT_PRERELEASE" ]; then + echo "$VAULT_PRERELEASE" + return + fi + + : "${VERSION_FILE:=$(repo_root)/version/VERSION}" + awk -F- '{ print $2 }' < "$VERSION_FILE" +} + +# Get the version metadata, which is commonly the edition +function version_metadata() { + : "${VAULT_METADATA:=""}" + + if [ -n "$VAULT_METADATA" ]; then + echo "$VAULT_METADATA" + return + fi + + : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" + awk '$1 == "VersionMetadata" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" +} + +# Determine the root directory of the repository +function repo_root() { + git rev-parse --show-toplevel +} + +# Run Enos local +function main() { + case $1 in + version) + version + ;; + version-base) + version_base + ;; + version-pre) + version_pre + ;; + version-meta) + version_metadata + ;; + *) + echo "unknown sub-command" >&2 + exit 1 + ;; + esac +} + +main "$@" diff --git a/enos/modules/install_packages/main.tf b/enos/modules/install_packages/main.tf new file mode 100644 index 0000000..007012b --- /dev/null +++ b/enos/modules/install_packages/main.tf @@ -0,0 +1,136 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + arch = { + "amd64" = "x86_64" + "arm64" = "aarch64" + } + package_manager = { + "amzn" = "yum" + "opensuse-leap" = "zypper" + "rhel" = "dnf" + "sles" = "zypper" + "ubuntu" = "apt" + } + distro_repos = { + "sles" = { + "15.6" = "https://download.opensuse.org/repositories/network:utilities/SLE_15_SP6/network:utilities.repo" + } + "rhel" = { + "8.10" = "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm" + "9.5" = "https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm" + } + } +} + +variable "packages" { + type = list(string) + default = [] +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The hosts to install packages on" +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out. This is applied to each step so total timeout will be longer." + default = 120 +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +resource "enos_host_info" "hosts" { + for_each = var.hosts + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Synchronize repositories on remote machines. This does not update packages but only ensures that +# the remote hosts are configured with default upstream repositories that have been refreshed to +# the latest metedata. +resource "enos_remote_exec" "synchronize_repos" { + for_each = var.hosts + + environment = { + DISTRO = enos_host_info.hosts[each.key].distro + PACKAGE_MANAGER = local.package_manager[enos_host_info.hosts[each.key].distro] + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + } + + scripts = [abspath("${path.module}/scripts/synchronize-repos.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Add any additional repositories. +resource "enos_remote_exec" "add_repos" { + for_each = var.hosts + depends_on = [enos_remote_exec.synchronize_repos] + + environment = { + DISTRO_REPOS = try(local.distro_repos[enos_host_info.hosts[each.key].distro][enos_host_info.hosts[each.key].distro_version], "__none") + PACKAGE_MANAGER = local.package_manager[enos_host_info.hosts[each.key].distro] + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + } + + scripts = [abspath("${path.module}/scripts/add-repos.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Install any required packages. +resource "enos_remote_exec" "install_packages" { + for_each = var.hosts + depends_on = [ + enos_remote_exec.synchronize_repos, + enos_remote_exec.add_repos, + ] + + environment = { + PACKAGE_MANAGER = local.package_manager[enos_host_info.hosts[each.key].distro] + PACKAGES = length(var.packages) >= 1 ? join(" ", var.packages) : "__skip" + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + } + + scripts = [abspath("${path.module}/scripts/install-packages.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/install_packages/scripts/add-repos.sh b/enos/modules/install_packages/scripts/add-repos.sh new file mode 100644 index 0000000..47f3279 --- /dev/null +++ b/enos/modules/install_packages/scripts/add-repos.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "${PACKAGE_MANAGER}" ]] && fail "PACKAGE_MANAGER env variable has not been set" +[[ -z "${RETRY_INTERVAL}" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "${TIMEOUT_SECONDS}" ]] && fail "TIMEOUT_SECONDS env variable has not been set" + +# Add any repositories that have have been passed in +add_repos() { + # If we don't have any repos on the list for this distro, no action needed. + if [ ${#DISTRO_REPOS[@]} -lt 1 ]; then + echo "DISTRO_REPOS is empty; No repos required for the packages for this Linux distro." + return 0 + fi + + case $PACKAGE_MANAGER in + apt) + # NOTE: We do not currently add any apt repositories in our scenarios. I suspect if that time + # comes we'll need to add support for apt-key here. + for repo in ${DISTRO_REPOS}; do + if [ "$repo" == "__none" ]; then + continue + fi + sudo add-apt-repository "${repo}" + done + ;; + dnf) + for repo in ${DISTRO_REPOS}; do + if [ "$repo" == "__none" ]; then + continue + fi + sudo dnf install -y "${repo}" + sudo dnf makecache -y + done + ;; + yum) + for repo in ${DISTRO_REPOS}; do + if [ "$repo" == "__none" ]; then + continue + fi + sudo yum install -y "${repo}" + sudo yum makecache -y + done + ;; + zypper) + # Add each repo + for repo in ${DISTRO_REPOS}; do + if [ "$repo" == "__none" ]; then + continue + fi + if sudo zypper lr "${repo}"; then + echo "A repo named ${repo} already exists, skipping..." + continue + fi + sudo zypper --gpg-auto-import-keys --non-interactive addrepo "${repo}" + done + sudo zypper --gpg-auto-import-keys ref + sudo zypper --gpg-auto-import-keys refs + ;; + *) + fail "Unsupported package manager: ${PACKAGE_MANAGER}" + ;; + esac +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if add_repos; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for distro repos to be set up" diff --git a/enos/modules/install_packages/scripts/install-packages.sh b/enos/modules/install_packages/scripts/install-packages.sh new file mode 100644 index 0000000..0b9bfde --- /dev/null +++ b/enos/modules/install_packages/scripts/install-packages.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "${RETRY_INTERVAL}" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "${TIMEOUT_SECONDS}" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "${PACKAGES}" ]] && fail "PACKAGES env variable has not been set" +[[ -z "${PACKAGE_MANAGER}" ]] && fail "PACKAGE_MANAGER env variable has not been set" + +# Install packages based on the provided packages and package manager. We assume that the repositories +# have already been synchronized by the repo setup that is a prerequisite for this script. +install_packages() { + if [[ "${PACKAGES}" = "__skip" ]]; then + return 0 + fi + + set -x + echo "Installing Dependencies: ${PACKAGES}" + + # Use the default package manager of the current Linux distro to install packages + case $PACKAGE_MANAGER in + apt) + for package in ${PACKAGES}; do + if dpkg -s "${package}"; then + echo "Skipping installation of ${package} because it is already installed" + continue + else + echo "Installing ${package}" + local output + if ! output=$(sudo apt install -y "${package}" 2>&1); then + echo "Failed to install ${package}: ${output}" 1>&2 + return 1 + fi + fi + done + ;; + dnf) + for package in ${PACKAGES}; do + if rpm -q "${package}"; then + echo "Skipping installation of ${package} because it is already installed" + continue + else + echo "Installing ${package}" + local output + if ! output=$(sudo dnf -y install "${package}" 2>&1); then + echo "Failed to install ${package}: ${output}" 1>&2 + return 1 + fi + fi + done + ;; + yum) + for package in ${PACKAGES}; do + if rpm -q "${package}"; then + echo "Skipping installation of ${package} because it is already installed" + continue + else + echo "Installing ${package}" + local output + if ! output=$(sudo yum -y install "${package}" 2>&1); then + echo "Failed to install ${package}: ${output}" 1>&2 + return 1 + fi + fi + done + ;; + zypper) + for package in ${PACKAGES}; do + if rpm -q "${package}"; then + echo "Skipping installation of ${package} because it is already installed" + continue + else + echo "Installing ${package}" + local output + if ! output=$(sudo zypper --non-interactive install -y -l --force-resolution "${package}" 2>&1); then + echo "Failed to install ${package}: ${output}" 1>&2 + return 1 + fi + fi + done + ;; + *) + fail "No matching package manager provided." + ;; + esac +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [[ "$(date +%s)" -lt "${end_time}" ]]; do + if install_packages; then + exit 0 + fi + + sleep "${RETRY_INTERVAL}" +done + +fail "Timed out waiting for packages to install" diff --git a/enos/modules/install_packages/scripts/synchronize-repos.sh b/enos/modules/install_packages/scripts/synchronize-repos.sh new file mode 100644 index 0000000..d5890fe --- /dev/null +++ b/enos/modules/install_packages/scripts/synchronize-repos.sh @@ -0,0 +1,151 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "${PACKAGE_MANAGER}" ]] && fail "PACKAGE_MANAGER env variable has not been set" +[[ -z "${RETRY_INTERVAL}" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "${TIMEOUT_SECONDS}" ]] && fail "TIMEOUT_SECONDS env variable has not been set" + +# The SLES AMI's do not come configured with Zypper repositories by default. To get them you +# have to run SUSEConnect to register the instance with SUSE. On the AMI this is handled +# automatically by a oneshot systemd unit called guestregister.service. This oneshot service needs +# to complete before any other repo or package steps are completed. At the time of writing it's very +# unreliable so we have to ensure that it has correctly executed ourselves or restart it. We do this +# by checking if the guestregister.service has reached the correct "inactive" state that we need. +# If it hasn't reached that state it's usually in some sort of active state, i.e. running, or it has +# failed. If it's in one of the active states we need to let it continue and check the status when +# it completes. If it has completed but is failed we'll restart the service to re-run the script that +# executes SUSEConnect. +sles_check_guestregister_service_and_restart_if_failed() { + local active_state + local failed_state + + # systemctl returns non-zero exit codes. We rely on output here because all states don't have + # their own exit code. + set +e + active_state=$(sudo systemctl is-active guestregister.service) + failed_state=$(sudo systemctl is-failed guestregister.service) + set -e + + case "$active_state" in + active | activating | deactivating) + # It's running so we'll return 1 and get retried by the caller + echo "the guestregister.service is still in the ${active_state} state" 1>&2 + return 1 + ;; + *) + if [ "$active_state" == "inactive" ] && [ "$failed_state" == "inactive" ]; then + # The oneshot has completed and hasn't "failed" + echo "the guestregister.service is 'inactive' for both active and failed states" + return 0 + fi + + # Our service is stopped and failed, restart it and hope it works the next time + sudo systemctl restart --wait guestregister.service + ;; + esac +} + +# Check or restart the guestregister service if it has failed. If it passes do another check to make +# sure that the zypper repositories list isn't empty. +sles_ensure_suseconnect() { + local health_output + if ! health_output=$(sles_check_guestregister_service_and_restart_if_failed); then + echo "the guestregister.service failed to reach a healthy state: ${health_output}" 1>&2 + return 1 + fi + + # Make sure Zypper has repositories. + if ! lr_output=$(zypper lr); then + echo "The guestregister.service failed. Unable to SUSEConnect and thus have no Zypper repositories: ${lr_output}: ${health_output}." 1>&2 + return 1 + fi + + return 0 +} + +# Synchronize our repositories so that futher installation steps are working with updated cache +# and repo metadata. +synchronize_repos() { + case $PACKAGE_MANAGER in + apt) + sudo apt update + ;; + dnf) + sudo dnf makecache + ;; + yum) + sudo yum makecache + ;; + zypper) + if [ "$DISTRO" == "sles" ]; then + if ! sles_ensure_suseconnect; then + return 1 + fi + fi + sudo zypper --gpg-auto-import-keys --non-interactive ref + sudo zypper --gpg-auto-import-keys --non-interactive refs + ;; + *) + return 0 + ;; + esac +} + +# Function to check cloud-init status and retry on failure +# Before we start to modify repositories and install packages we'll wait for cloud-init to finish +# so it doesn't race with any of our package installations. +# We run as sudo because Amazon Linux 2 throws Python 2.7 errors when running `cloud-init status` as +# non-root user (known bug). +wait_for_cloud_init() { + if output=$(sudo cloud-init status --wait); then + return 0 + else + res=$? + case $res in + 2) + { + echo "WARNING: cloud-init did not complete successfully but recovered." + echo "Exit code: $res" + echo "Output: $output" + echo "Here are the logs for the failure:" + cat /var/log/cloud-init-* + } 1>&2 + return 0 + ;; + *) + { + echo "cloud-init did not complete successfully." + echo "Exit code: $res" + echo "Output: $output" + echo "Here are the logs for the failure:" + cat /var/log/cloud-init-* + } 1>&2 + return 1 + ;; + esac + fi +} + +# Wait for cloud-init if it exists +type cloud-init && wait_for_cloud_init + +# Synchronizing repos +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if synchronize_repos; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for distro repos to be set up" diff --git a/enos/modules/k8s_deploy_vault/main.tf b/enos/modules/k8s_deploy_vault/main.tf new file mode 100644 index 0000000..a422be4 --- /dev/null +++ b/enos/modules/k8s_deploy_vault/main.tf @@ -0,0 +1,165 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_version = ">= 1.0" + + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + + helm = { + source = "hashicorp/helm" + version = "2.6.0" + } + } +} + +locals { + helm_chart_settings = { + "server.ha.enabled" = "true" + "server.ha.replicas" = var.vault_instance_count + "server.ha.raft.enabled" = "true" + "server.affinity" = "" + "server.image.repository" = var.image_repository + "server.image.tag" = var.image_tag + "server.image.pullPolicy" = "Never" # Forces local image use + "server.resources.requests.cpu" = "50m" + "server.limits.memory" = "200m" + "server.limits.cpu" = "200m" + "server.ha.raft.config" = file("${abspath(path.module)}/raft-config.hcl") + "server.dataStorage.size" = "100m" + "server.logLevel" = var.vault_log_level + } + all_helm_chart_settings = var.ent_license == null ? local.helm_chart_settings : merge(local.helm_chart_settings, { + "server.extraEnvironmentVars.VAULT_LICENSE" = var.ent_license + }) + + vault_address = "http://127.0.0.1:8200" + + instance_indexes = [for idx in range(var.vault_instance_count) : tostring(idx)] + + leader_idx = local.instance_indexes[0] + followers_idx = toset(slice(local.instance_indexes, 1, var.vault_instance_count)) +} + +resource "helm_release" "vault" { + name = "vault" + + repository = "https://helm.releases.hashicorp.com" + chart = "vault" + + dynamic "set" { + for_each = local.all_helm_chart_settings + + content { + name = set.key + value = set.value + } + } +} + +data "enos_kubernetes_pods" "vault_pods" { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + namespace = helm_release.vault.namespace + label_selectors = [ + "app.kubernetes.io/name=vault", + "component=server" + ] + + depends_on = [helm_release.vault] +} + +resource "enos_vault_init" "leader" { + bin_path = "/bin/vault" + vault_addr = local.vault_address + + key_shares = 5 + key_threshold = 3 + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = data.enos_kubernetes_pods.vault_pods.pods[local.leader_idx].name + namespace = data.enos_kubernetes_pods.vault_pods.pods[local.leader_idx].namespace + } + } +} + +resource "enos_vault_unseal" "leader" { + bin_path = "/bin/vault" + vault_addr = local.vault_address + seal_type = "shamir" + unseal_keys = enos_vault_init.leader.unseal_keys_b64 + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = data.enos_kubernetes_pods.vault_pods.pods[local.leader_idx].name + namespace = data.enos_kubernetes_pods.vault_pods.pods[local.leader_idx].namespace + } + } + + depends_on = [enos_vault_init.leader] +} + +// We need to manually join the followers since the join request must only happen after the leader +// has been initialized. We could use retry join, but in that case we'd need to restart the follower +// pods once the leader is setup. The default helm deployment configuration for an HA cluster as +// documented here: https://learn.hashicorp.com/tutorials/vault/kubernetes-raft-deployment-guide#configure-vault-helm-chart +// uses a liveness probe that automatically restarts nodes that are not healthy. This works well for +// clusters that are configured with auto-unseal as eventually the nodes would join and unseal. +resource "enos_remote_exec" "raft_join" { + for_each = local.followers_idx + + inline = [ + // asserts that vault is ready + "for i in 1 2 3 4 5; do vault status > /dev/null 2>&1 && break || sleep 5; done", + // joins the follower to the leader + "vault operator raft join http://vault-0.vault-internal:8200" + ] + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = data.enos_kubernetes_pods.vault_pods.pods[each.key].name + namespace = data.enos_kubernetes_pods.vault_pods.pods[each.key].namespace + } + } + + depends_on = [enos_vault_unseal.leader] +} + + +resource "enos_vault_unseal" "followers" { + for_each = local.followers_idx + + bin_path = "/bin/vault" + vault_addr = local.vault_address + seal_type = "shamir" + unseal_keys = enos_vault_init.leader.unseal_keys_b64 + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = data.enos_kubernetes_pods.vault_pods.pods[each.key].name + namespace = data.enos_kubernetes_pods.vault_pods.pods[each.key].namespace + } + } + + depends_on = [enos_remote_exec.raft_join] +} + +output "vault_root_token" { + value = enos_vault_init.leader.root_token +} + +output "vault_pods" { + value = data.enos_kubernetes_pods.vault_pods.pods +} diff --git a/enos/modules/k8s_deploy_vault/variables.tf b/enos/modules/k8s_deploy_vault/variables.tf new file mode 100644 index 0000000..9730f87 --- /dev/null +++ b/enos/modules/k8s_deploy_vault/variables.tf @@ -0,0 +1,42 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "context_name" { + type = string + description = "The name of the k8s context for Vault" +} + +variable "ent_license" { + type = string + description = "The value of a valid Vault Enterprise license" +} + +variable "image_repository" { + type = string + description = "The name of the Vault repository, ie hashicorp/vault or hashicorp/vault-enterprise for the image to deploy" +} + +variable "image_tag" { + type = string + description = "The tag of the vault image to deploy" +} + +variable "kubeconfig_base64" { + type = string + description = "The base64 encoded version of the Kubernetes configuration file" +} + +variable "vault_edition" { + type = string + description = "The Vault product edition" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_log_level" { + description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err." + type = string +} diff --git a/enos/modules/k8s_vault_verify_replication/main.tf b/enos/modules/k8s_vault_verify_replication/main.tf new file mode 100644 index 0000000..6660673 --- /dev/null +++ b/enos/modules/k8s_vault_verify_replication/main.tf @@ -0,0 +1,42 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) +} + +resource "enos_remote_exec" "replication_status" { + for_each = local.instances + + inline = ["vault read -format=json sys/replication/status"] + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = var.vault_pods[each.key].name + namespace = var.vault_pods[each.key].namespace + } + } +} + +resource "enos_local_exec" "verify_replication_status" { + + for_each = enos_remote_exec.replication_status + + environment = { + STATUS = each.value.stdout + VAULT_EDITION = var.vault_edition + } + + content = abspath("${path.module}/scripts/smoke-verify-replication.sh") +} diff --git a/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh new file mode 100755 index 0000000..6987f7c --- /dev/null +++ b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# The Vault replication smoke test, documented in +# https://docs.google.com/document/d/16sjIk3hzFDPyY5A9ncxTZV_9gnpYSF1_Vx6UA1iiwgI/edit#heading=h.kgrxf0f1et25 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +# Replication STATUS endpoint should have data.mode disabled for CE release +if [ "$VAULT_EDITION" == "ce" ]; then + if [ "$(echo "${STATUS}" | jq -r '.data.mode')" != "disabled" ]; then + fail "replication data mode is not disabled for CE release!" + fi +else + if [ "$(echo "${STATUS}" | jq -r '.data.dr')" == "" ]; then + fail "DR replication should be available for an ENT release!" + fi + if [ "$(echo "${STATUS}" | jq -r '.data.performance')" == "" ]; then + fail "Performance replication should be available for an ENT release!" + fi +fi diff --git a/enos/modules/k8s_vault_verify_replication/variables.tf b/enos/modules/k8s_vault_verify_replication/variables.tf new file mode 100644 index 0000000..011ae9c --- /dev/null +++ b/enos/modules/k8s_vault_verify_replication/variables.tf @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_edition" { + type = string + description = "The vault product edition" +} + +variable "vault_pods" { + type = list(object({ + name = string + namespace = string + })) + description = "The vault instances for the cluster to verify" +} + +variable "kubeconfig_base64" { + type = string + description = "The base64 encoded version of the Kubernetes configuration file" +} + +variable "context_name" { + type = string + description = "The name of the k8s context for Vault" +} diff --git a/enos/modules/k8s_vault_verify_ui/main.tf b/enos/modules/k8s_vault_verify_ui/main.tf new file mode 100644 index 0000000..4013254 --- /dev/null +++ b/enos/modules/k8s_vault_verify_ui/main.tf @@ -0,0 +1,45 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +terraform { + required_providers { + enos = { + version = "> 0.4.0" + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) +} + +resource "enos_remote_exec" "curl_ui" { + for_each = local.instances + + inline = [ + "curl -s -o /dev/null -w '%%{redirect_url}' http://localhost:8200/", + "curl -s -o /dev/null -Iw '%%{http_code}\n' http://localhost:8200/ui/" + ] + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = var.vault_pods[each.key].name + namespace = var.vault_pods[each.key].namespace + } + } +} + +resource "enos_local_exec" "verify_ui" { + for_each = enos_remote_exec.curl_ui + + environment = { + REDIRECT_URL = split("\n", each.value.stdout)[0] + UI_URL_RESULT = split("\n", each.value.stdout)[1] + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-ui.sh")] +} diff --git a/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh new file mode 100755 index 0000000..9964df2 --- /dev/null +++ b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +if [ "${REDIRECT_URL}" != "http://localhost:8200/ui/" ]; then + fail "Port 8200 not redirecting to UI" +fi +if [ "${UI_URL_RESULT}" != "200" ]; then + fail "Vault UI is not available" +fi diff --git a/enos/modules/k8s_vault_verify_ui/variables.tf b/enos/modules/k8s_vault_verify_ui/variables.tf new file mode 100644 index 0000000..3f000c5 --- /dev/null +++ b/enos/modules/k8s_vault_verify_ui/variables.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_pods" { + type = list(object({ + name = string + namespace = string + })) + description = "The vault instances for the cluster to verify" +} + +variable "kubeconfig_base64" { + type = string + description = "The base64 encoded version of the Kubernetes configuration file" +} + +variable "context_name" { + type = string + description = "The name of the k8s context for Vault" +} diff --git a/enos/modules/k8s_vault_verify_version/main.tf b/enos/modules/k8s_vault_verify_version/main.tf new file mode 100644 index 0000000..3574635 --- /dev/null +++ b/enos/modules/k8s_vault_verify_version/main.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) + expected_version = var.vault_edition == "ce" ? var.vault_product_version : "${var.vault_product_version}-ent" +} + +resource "enos_remote_exec" "release_info" { + for_each = local.instances + + environment = { + VAULT_BIN_PATH = var.vault_bin_path + } + + scripts = [abspath("${path.module}/scripts/get-status.sh")] + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = var.vault_pods[each.key].name + namespace = var.vault_pods[each.key].namespace + } + } +} + +resource "enos_local_exec" "smoke-verify-version" { + for_each = enos_remote_exec.release_info + + environment = { + ACTUAL_VERSION = jsondecode(each.value.stdout).version + BUILD_DATE = var.vault_build_date + CHECK_BUILD_DATE = var.check_build_date + EXPECTED_VERSION = var.vault_product_version, + VAULT_EDITION = var.vault_edition, + VAULT_REVISION = var.vault_product_revision, + VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status) + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-version.sh")] +} diff --git a/enos/modules/k8s_vault_verify_version/scripts/get-status.sh b/enos/modules/k8s_vault_verify_version/scripts/get-status.sh new file mode 100755 index 0000000..b68e0f6 --- /dev/null +++ b/enos/modules/k8s_vault_verify_version/scripts/get-status.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +status=$(${VAULT_BIN_PATH} status -format=json) +version=$(${VAULT_BIN_PATH} version) + +echo "{\"status\": ${status}, \"version\": \"${version}\"}" diff --git a/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh new file mode 100755 index 0000000..fc0de96 --- /dev/null +++ b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# The Vault smoke test to verify the Vault version installed + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +if [[ "${CHECK_BUILD_DATE}" == "false" ]]; then + expected_build_date="" +else + cfg_build_date="${BUILD_DATE}" + if [[ "${cfg_build_date}" == "" ]]; then + cfg_build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date) + fi + expected_build_date=", built $cfg_build_date" +fi + +vault_expected_version="Vault v${EXPECTED_VERSION} (${VAULT_REVISION})" + +case "${VAULT_EDITION}" in + ce) version_expected="${vault_expected_version}${expected_build_date}" ;; + ent) version_expected="${vault_expected_version}${expected_build_date}" ;; + ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; + ent.fips1403) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; + ent.hsm.fips1403) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; + *) fail "(${VAULT_EDITION}) does not match any known Vault editions" ;; +esac + +version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') + +if [[ "${ACTUAL_VERSION}" == "$version_expected_nosha" ]] || [[ "${ACTUAL_VERSION}" == "$version_expected" ]]; then + echo "Version verification succeeded!" +else + echo "Version checking enabled: ${CHECK_BUILD_DATE}" 1>&2 + echo "Given build date: ${BUILD_DATE}" 1>&2 + echo "Interpreted build date: ${cfg_build_date}" 1>&2 + + fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}" +fi diff --git a/enos/modules/k8s_vault_verify_version/variables.tf b/enos/modules/k8s_vault_verify_version/variables.tf new file mode 100644 index 0000000..05ca660 --- /dev/null +++ b/enos/modules/k8s_vault_verify_version/variables.tf @@ -0,0 +1,62 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_pods" { + type = list(object({ + name = string + namespace = string + })) + description = "The vault instances for the cluster to verify" +} + +variable "vault_bin_path" { + type = string + description = "The path to the vault binary" + default = "/bin/vault" +} + +variable "vault_product_version" { + type = string + description = "The vault product version" +} + +variable "vault_product_revision" { + type = string + description = "The vault product revision" +} + +variable "vault_edition" { + type = string + description = "The vault product edition" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "kubeconfig_base64" { + type = string + description = "The base64 encoded version of the Kubernetes configuration file" +} + +variable "context_name" { + type = string + description = "The name of the k8s context for Vault" +} + +variable "check_build_date" { + type = bool + description = "Whether or not to verify that the version includes the build date" +} + +variable "vault_build_date" { + type = string + description = "The build date of the vault docker image to check" + default = "" +} diff --git a/enos/modules/k8s_vault_verify_write_data/main.tf b/enos/modules/k8s_vault_verify_write_data/main.tf new file mode 100644 index 0000000..5227971 --- /dev/null +++ b/enos/modules/k8s_vault_verify_write_data/main.tf @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) +} + +resource "enos_remote_exec" "smoke-enable-secrets-kv" { + environment = { + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_bin_path} secrets enable -path=\"secret\" kv"] + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = var.vault_pods[0].name + namespace = var.vault_pods[0].namespace + } + } +} + +# Verify that we can enable the k/v secrets engine and write data to it. +resource "enos_remote_exec" "smoke-write-test-data" { + depends_on = [enos_remote_exec.smoke-enable-secrets-kv] + for_each = local.instances + + environment = { + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_bin_path} kv put secret/test smoke${each.key}=fire"] + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = var.vault_pods[each.key].name + namespace = var.vault_pods[each.key].namespace + } + } +} diff --git a/enos/modules/k8s_vault_verify_write_data/variables.tf b/enos/modules/k8s_vault_verify_write_data/variables.tf new file mode 100644 index 0000000..4e1754e --- /dev/null +++ b/enos/modules/k8s_vault_verify_write_data/variables.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_pods" { + type = list(object({ + name = string + namespace = string + })) + description = "The vault instances for the cluster to verify" +} + +variable "vault_bin_path" { + type = string + description = "The path to the vault binary" + default = "/bin/vault" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "kubeconfig_base64" { + type = string + description = "The base64 encoded version of the Kubernetes configuration file" +} + +variable "context_name" { + type = string + description = "The name of the k8s context for Vault" +} diff --git a/enos/modules/load_docker_image/main.tf b/enos/modules/load_docker_image/main.tf new file mode 100644 index 0000000..9f5e15c --- /dev/null +++ b/enos/modules/load_docker_image/main.tf @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_name" { + type = string + description = "The name of the cluster to load the image into" +} + +variable "image" { + type = string + description = "The image name for the image to load, i.e. hashicorp/vault" +} + +variable "tag" { + type = string + description = "The tag for the image to load, i.e. 1.12.0-dev" +} + +variable "archive" { + type = string + description = "The path to the image archive to load" + default = null +} + +resource "enos_local_kind_load_image" "vault" { + cluster_name = var.cluster_name + image = var.image + tag = var.tag + archive = var.archive +} + +output "tag" { + value = var.tag + description = "The tag of the docker image to load without the tag, i.e. 1.10.0" +} + +output "image" { + value = var.image + description = "The tag of the docker image to load without the tag, i.e. vault" +} + +output "repository" { + value = enos_local_kind_load_image.vault.loaded_images.repository + description = "The name of the image's repository, i.e. hashicorp/vault" +} diff --git a/enos/modules/local_kind_cluster/main.tf b/enos/modules/local_kind_cluster/main.tf new file mode 100644 index 0000000..b21bfe6 --- /dev/null +++ b/enos/modules/local_kind_cluster/main.tf @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } +} + +resource "random_pet" "cluster_name" {} + +resource "enos_local_kind_cluster" "this" { + name = random_pet.cluster_name.id + kubeconfig_path = var.kubeconfig_path +} + +variable "kubeconfig_path" { + type = string +} + +output "cluster_name" { + value = random_pet.cluster_name.id +} + +output "kubeconfig_base64" { + value = enos_local_kind_cluster.this.kubeconfig_base64 +} + +output "context_name" { + value = enos_local_kind_cluster.this.context_name +} + +output "host" { + value = enos_local_kind_cluster.this.endpoint +} + +output "client_certificate" { + value = enos_local_kind_cluster.this.client_certificate +} + +output "client_key" { + value = enos_local_kind_cluster.this.client_key +} + +output "cluster_ca_certificate" { + value = enos_local_kind_cluster.this.cluster_ca_certificate +} diff --git a/enos/modules/read_license/main.tf b/enos/modules/read_license/main.tf new file mode 100644 index 0000000..823714f --- /dev/null +++ b/enos/modules/read_license/main.tf @@ -0,0 +1,8 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "file_name" {} + +output "license" { + value = file(var.file_name) +} diff --git a/enos/modules/replication_data/main.tf b/enos/modules/replication_data/main.tf new file mode 100644 index 0000000..91c89a4 --- /dev/null +++ b/enos/modules/replication_data/main.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +// An arithmetic module for calculating inputs and outputs for various replication steps. + +variable "added_hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + default = {} +} + +variable "initial_hosts" { + description = "The initial set of Vault cluster hosts before removing and adding hosts" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + default = {} +} + +variable "removed_primary_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + default = null +} + +variable "removed_follower_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + default = null +} + +locals { + remaining_initial = setsubtract(values(var.initial_hosts), [var.removed_primary_host, var.removed_follower_host]) + remaining_hosts_list = tolist(setunion(values(var.added_hosts), local.remaining_initial)) + remaining_hosts = { for idx in range(length(local.remaining_hosts_list)) : idx => local.remaining_hosts_list[idx] } +} + +output "remaining_hosts" { + value = local.remaining_hosts +} diff --git a/enos/modules/restart_vault/main.tf b/enos/modules/restart_vault/main.tf new file mode 100644 index 0000000..2486671 --- /dev/null +++ b/enos/modules/restart_vault/main.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault hosts" +} + +variable "vault_addr" { + type = string + description = "The local vault api address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the vault binary is installed" +} + + +resource "enos_remote_exec" "restart" { + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/restart-vault.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + diff --git a/enos/modules/restart_vault/scripts/restart-vault.sh b/enos/modules/restart_vault/scripts/restart-vault.sh new file mode 100644 index 0000000..3521994 --- /dev/null +++ b/enos/modules/restart_vault/scripts/restart-vault.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +if ! out=$(sudo systemctl stop vault 2>&1); then + fail "failed to stop vault: $out: $(sudo systemctl status vault)" +fi + +if ! out=$(sudo systemctl daemon-reload 2>&1); then + fail "failed to daemon-reload systemd: $out" 1>&2 +fi + +if ! out=$(sudo systemctl start vault 2>&1); then + fail "failed to start vault: $out: $(sudo systemctl status vault)" +fi + +count=0 +retries=5 +while :; do + # Check the Vault seal status + status=$($binpath status) + code=$? + + if [ $code == 0 ] || [ $code == 2 ]; then + # 0 is unsealed and 2 is running but sealed + echo "$status" + exit 0 + fi + + printf "Waiting for Vault cluster to be ready: status code: %s, status:\n%s\n" "$code" "$status" 2>&1 + + wait=$((3 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out waiting for Vault node to be ready after restart" + fi +done diff --git a/enos/modules/seal_awskms/main.tf b/enos/modules/seal_awskms/main.tf new file mode 100644 index 0000000..e8a1ad3 --- /dev/null +++ b/enos/modules/seal_awskms/main.tf @@ -0,0 +1,68 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_id" { + type = string +} + +variable "cluster_meta" { + type = string + default = null +} + +variable "cluster_ssh_keypair" { + type = string + default = null +} + +variable "common_tags" { + type = map(string) + default = null +} + +variable "other_resources" { + type = list(string) + default = [] +} + +locals { + cluster_name = var.cluster_meta == null ? var.cluster_id : "${var.cluster_id}-${var.cluster_meta}" +} + +resource "aws_kms_key" "key" { + description = "auto-unseal-key-${local.cluster_name}" + deletion_window_in_days = 7 // 7 is the shortest allowed window + tags = var.common_tags +} + +resource "aws_kms_alias" "alias" { + name = "alias/auto-unseal-key-${local.cluster_name}" + target_key_id = aws_kms_key.key.key_id +} + +output "attributes" { + description = "Seal device specific attributes" + value = { + kms_key_id = aws_kms_key.key.arn + } +} + +// We output our resource name and a collection of those passed in to create a full list of key +// resources that might be required for instance roles that are associated with some unseal types. +output "resource_name" { + description = "The awskms key name" + value = aws_kms_key.key.arn +} + +output "resource_names" { + description = "The list of awskms key names to associate with a role" + value = compact(concat([aws_kms_key.key.arn], var.other_resources)) +} diff --git a/enos/modules/seal_pkcs11/main.tf b/enos/modules/seal_pkcs11/main.tf new file mode 100644 index 0000000..084d364 --- /dev/null +++ b/enos/modules/seal_pkcs11/main.tf @@ -0,0 +1,133 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +/* + +A seal module that emulates using a real PKCS#11 HSM. For this we'll use softhsm2. You'll +need softhsm2 and opensc installed to get access to the userspace tools and dynamic library that +Vault Enterprise will use. Here we'll take in the vault hosts and use the one of the nodes +to generate the hsm slot and the tokens, and then we'll copy the softhsm tokens to the other nodes. + +Using softhsm2 and opensc is a bit complicated but here's a cheat sheet for getting started. + +$ brew install softhsm opensc +or +$ sudo apt install softhsm2 opensc + +Create a softhsm slot. You can use anything you want for the pin and the supervisor pin. This will +output the slot identifier, which you'll use as the `slot` parameter in the seal config. +$ softhsm2-util --init-token --free --so-pin=1234 --pin=1234 --label="seal" | grep -oE '[0-9]+$' + +You can see the slots: +$ softhsm2-util --show-slots +Or use opensc's pkcs11-tool. Make sure to use your pin for the -p flag. The module that we refer +to is the location of the shared library that we need to provide to Vault Enterprise. Depending on +your platform or installation method this could be different. +$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 -IL + +Find yours +$ find /usr/local -type f -name libsofthsm2.so -print -quit + +Your tokens will be installed in the default directories.tokendir. See man softhsm2.conf(5) for +more details. On macOS from brew this is /usr/local/var/lib/softhsm/tokens/ + +Vault Enterprise supports creating the HSM keys, but for softhsm2 that would require us to +initialize with one node before copying the contents. So instead we'll create an HSM key and HMAC +key that we'll copy everywhere. + +$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 --token-label seal --keygen --usage-sign --label hsm_hmac --id 1 --key-type GENERIC:32 --private --sensitive +$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 --token-label seal --keygen --usage-sign --label hsm_aes --id 2 --key-type AES:32 --private --sensitive --usage-wrap + +Now you should be able to configure Vault Enterprise seal stanza. +*/ + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_id" { + type = string + description = "The VPC ID of the cluster" +} + +variable "cluster_meta" { + type = string + default = null + description = "Any metadata that needs to be passed in. If we're creating multiple softhsm tokens this value could be a prior KEYS_BASE64" +} + +variable "cluster_ssh_keypair" { + type = string + description = "The ssh keypair of the vault cluster. We need this to used the inherited provider for our target" +} + +variable "common_tags" { + type = map(string) + default = null +} + +variable "other_resources" { + type = list(string) + default = [] +} + +resource "random_string" "id" { + length = 8 + numeric = false + special = false + upper = false +} + +module "ec2_info" { + source = "../ec2_info" +} + +locals { + id = "${var.cluster_id}-${random_string.id.result}" +} + +module "target" { + source = "../target_ec2_instances" + ami_id = module.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = local.id + common_tags = var.common_tags + instance_count = 1 + instance_types = { + amd64 = "t3a.small" + arm64 = "t4g.small" + } + ports_ingress = [ + { + description = "SSH" + port = 22 + protocol = "tcp" + }, + ] + // Make sure it's not too long as we use this for aws resources that size maximums that are easy + // to hit. + project_name = substr("vault-ci-softhsm-${local.id}", 0, 32) + ssh_keypair = var.cluster_ssh_keypair + vpc_id = var.cluster_id +} + +module "create_vault_keys" { + source = "../softhsm_create_vault_keys" + + cluster_id = var.cluster_id + hosts = module.target.hosts +} + +// Our attributes contain all required keys for the seal stanza and our base64 encoded softhsm +// token and keys. +output "attributes" { + description = "Seal device specific attributes" + value = module.create_vault_keys.all_attributes +} + +// Shim for chaining seals that require IAM roles +output "resource_name" { value = null } +output "resource_names" { value = var.other_resources } diff --git a/enos/modules/seal_shamir/main.tf b/enos/modules/seal_shamir/main.tf new file mode 100644 index 0000000..55e26d1 --- /dev/null +++ b/enos/modules/seal_shamir/main.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# A shim seal module for shamir seals. For Shamir seals the enos_vault_init resource will take care +# of creating our seal. + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_id" { default = null } +variable "cluster_meta" { default = null } +variable "cluster_ssh_keypair" { default = null } +variable "common_tags" { default = null } +variable "image_id" { default = null } +variable "other_resources" { + type = list(string) + default = [] +} + +output "resource_name" { value = null } +output "resource_names" { value = var.other_resources } +output "attributes" { value = null } diff --git a/enos/modules/shutdown_multiple_nodes/main.tf b/enos/modules/shutdown_multiple_nodes/main.tf new file mode 100644 index 0000000..c2781cd --- /dev/null +++ b/enos/modules/shutdown_multiple_nodes/main.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "old_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances to be shutdown" +} + +resource "enos_remote_exec" "shutdown_multiple_nodes" { + for_each = var.old_hosts + inline = ["sudo shutdown -P --no-wall; exit 0"] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/shutdown_node/main.tf b/enos/modules/shutdown_node/main.tf new file mode 100644 index 0000000..0458570 --- /dev/null +++ b/enos/modules/shutdown_node/main.tf @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The node to shut down" +} + +resource "enos_remote_exec" "shutdown_node" { + inline = ["sudo shutdown -P --no-wall; exit 0"] + + transport = { + ssh = { + host = var.host.public_ip + } + } +} diff --git a/enos/modules/softhsm_create_vault_keys/main.tf b/enos/modules/softhsm_create_vault_keys/main.tf new file mode 100644 index 0000000..4132de8 --- /dev/null +++ b/enos/modules/softhsm_create_vault_keys/main.tf @@ -0,0 +1,129 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_id" { + type = string +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The hosts that will have access to the softhsm" +} + +locals { + pin = resource.random_string.pin.result + aes_label = "vault_hsm_aes_${local.pin}" + hmac_label = "vault_hsm_hmac_${local.pin}" + seal_attributes = jsondecode(resource.enos_remote_exec.create_keys.stdout) + target = tomap({ "0" = var.hosts[0] }) + token = "${var.cluster_id}_${local.pin}" +} + +resource "random_string" "pin" { + length = 5 + lower = true + upper = false + numeric = true + special = false +} + +module "install" { + source = "../softhsm_install" + + hosts = local.target + include_tools = true # make sure opensc is also installed as we need it to create keys +} + +module "initialize" { + source = "../softhsm_init" + depends_on = [module.install] + + hosts = local.target +} + +// Create our keys. Our stdout contains the requried the values for the pksc11 seal stanza +// as JSON. https://developer.hashicorp.com/vault/docs/configuration/seal/pkcs11#pkcs11-parameters +resource "enos_remote_exec" "create_keys" { + depends_on = [ + module.install, + module.initialize, + ] + + environment = { + AES_LABEL = local.aes_label + HMAC_LABEL = local.hmac_label + PIN = resource.random_string.pin.result + TOKEN_DIR = module.initialize.token_dir + TOKEN_LABEL = local.token + SO_PIN = resource.random_string.pin.result + } + + scripts = [abspath("${path.module}/scripts/create-keys.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +// Get our softhsm token. Stdout is a base64 encoded gzipped tarball of the softhsm token dir. This +// allows us to pass around binary data inside of Terraform's type system. +resource "enos_remote_exec" "get_keys" { + depends_on = [enos_remote_exec.create_keys] + + environment = { + TOKEN_DIR = module.initialize.token_dir + } + + scripts = [abspath("${path.module}/scripts/get-keys.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +output "seal_attributes" { + description = "Seal device specific attributes. Contains all required keys for the seal stanza" + value = local.seal_attributes +} + +output "token_base64" { + description = "The softhsm token and keys gzipped tarball in base64" + value = enos_remote_exec.get_keys.stdout +} + +output "token_dir" { + description = "The softhsm directory where tokens and keys are stored" + value = module.initialize.token_dir +} + +output "token_label" { + description = "The HSM slot token label" + value = local.token +} + +output "all_attributes" { + description = "Seal device specific attributes" + value = merge( + local.seal_attributes, + { + token_base64 = enos_remote_exec.get_keys.stdout, + token_dir = module.initialize.token_dir + }, + ) +} diff --git a/enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh b/enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh new file mode 100644 index 0000000..6518779 --- /dev/null +++ b/enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$AES_LABEL" ]] && fail "AES_LABEL env variable has not been set" +[[ -z "$HMAC_LABEL" ]] && fail "HMAC_LABEL env variable has not been set" +[[ -z "$PIN" ]] && fail "PIN env variable has not been set" +[[ -z "$SO_PIN" ]] && fail "SO_PIN env variable has not been set" +[[ -z "$TOKEN_LABEL" ]] && fail "TOKEN_LABEL env variable has not been set" +[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" + +if ! type softhsm2-util &> /dev/null; then + fail "unable to locate softhsm2-util in PATH. Have you installed softhsm?" +fi + +if ! type pkcs11-tool &> /dev/null; then + fail "unable to locate pkcs11-tool in PATH. Have you installed opensc?" +fi + +# Create an HSM slot and return the slot number in decimal value. +create_slot() { + sudo softhsm2-util --init-token --free --so-pin="$SO_PIN" --pin="$PIN" --label="$TOKEN_LABEL" | grep -oE '[0-9]+$' +} + +# Find the location of our softhsm shared object. +find_softhsm_so() { + sudo find /usr -type f -name libsofthsm2.so -print -quit +} + +# Create key a key in the slot. Args: module, key label, id number, key type +keygen() { + sudo pkcs11-tool --keygen --usage-sign --private --sensitive --usage-wrap \ + --module "$1" \ + -p "$PIN" \ + --token-label "$TOKEN_LABEL" \ + --label "$2" \ + --id "$3" \ + --key-type "$4" +} + +# Create our softhsm slot and keys +main() { + local slot + if ! slot=$(create_slot); then + fail "failed to create softhsm token slot" + fi + + local so + if ! so=$(find_softhsm_so); then + fail "unable to locate libsofthsm2.so shared object" + fi + + if ! keygen "$so" "$AES_LABEL" 1 'AES:32' 1>&2; then + fail "failed to create AES key" + fi + + if ! keygen "$so" "$HMAC_LABEL" 2 'GENERIC:32' 1>&2; then + fail "failed to create HMAC key" + fi + + # Return our seal configuration attributes as JSON + cat << EOF +{ + "lib": "${so}", + "slot": "${slot}", + "pin": "${PIN}", + "key_label": "${AES_LABEL}", + "hmac_key_label": "${HMAC_LABEL}", + "generate_key": "false" +} +EOF + exit 0 +} + +main diff --git a/enos/modules/softhsm_create_vault_keys/scripts/get-keys.sh b/enos/modules/softhsm_create_vault_keys/scripts/get-keys.sh new file mode 100644 index 0000000..953880f --- /dev/null +++ b/enos/modules/softhsm_create_vault_keys/scripts/get-keys.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" + +# Tar up our token. We have to do this as a superuser because softhsm is owned by root. +sudo tar -czf token.tgz -C "$TOKEN_DIR" . +me="$(whoami)" +sudo chown "$me:$me" token.tgz + +# Write the value STDOUT as base64 so we can handle binary data as a string +base64 -i token.tgz diff --git a/enos/modules/softhsm_distribute_vault_keys/main.tf b/enos/modules/softhsm_distribute_vault_keys/main.tf new file mode 100644 index 0000000..0ccebe1 --- /dev/null +++ b/enos/modules/softhsm_distribute_vault_keys/main.tf @@ -0,0 +1,110 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.9" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The hosts for whom we'll distribute the softhsm tokens and keys" +} + +variable "token_base64" { + type = string + description = "The base64 encoded gzipped tarball of the softhsm token" +} + +locals { + // The user/group name for softhsm + softhsm_groups = { + "amzn" = "ods" + "rhel" = "ods" + "ubuntu" = "softhsm" + } + + // Determine if we should skip distribution. If we haven't been passed in a base64 token tarball + // we should short circuit the rest of the module. + skip = var.token_base64 == null || var.token_base64 == "" ? true : false +} + +module "install" { + // TODO: Should packages take a string instead of array so we can plan with unknown values that could change? + source = "../softhsm_install" + + hosts = var.hosts + include_tools = false # we don't need opensc on machines that did not create the HSM. +} + +module "initialize" { + source = "../softhsm_init" + depends_on = [module.install] + + hosts = var.hosts + skip = local.skip +} + +# In order for the vault service to access our keys we need to deal with ownership of files. Make +# sure we have a vault user on the machine if it doesn't already exist. Our distribution script +# below will handle adding vault to the "softhsm" group and setting ownership of the tokens. +resource "enos_user" "vault" { + for_each = var.hosts + + name = "vault" + home_dir = "/etc/vault.d" + shell = "/bin/false" + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +// Get the host information so we can ensure that the correct user/group is used for softhsm. +resource "enos_host_info" "hosts" { + for_each = var.hosts + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +// Distribute our softhsm token and keys to the given hosts. +resource "enos_remote_exec" "distribute_token" { + for_each = var.hosts + depends_on = [ + module.initialize, + enos_user.vault, + enos_host_info.hosts, + ] + + environment = { + TOKEN_BASE64 = var.token_base64 + TOKEN_DIR = module.initialize.token_dir + SOFTHSM_GROUP = local.softhsm_groups[enos_host_info.hosts[each.key].distro] + } + + scripts = [abspath("${path.module}/scripts/distribute-token.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +output "lib" { + value = module.install.lib +} diff --git a/enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh b/enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh new file mode 100644 index 0000000..3427991 --- /dev/null +++ b/enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -ex + +fail() { + echo "$1" 1>&2 + exit 1 +} + +# If we're not given keys we'll short circuit. This should only happen if we're skipping distribution +# because we haven't created a token or keys. +if [ -z "$TOKEN_BASE64" ]; then + echo "TOKEN_BASE64 environment variable was unset. Assuming we don't need to distribute our token" 1>&2 + exit 0 +fi + +[[ -z "$SOFTHSM_GROUP" ]] && fail "SOFTHSM_GROUP env variable has not been set" +[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" + +# Convert our base64 encoded gzipped tarball of the softhsm token back into a tarball. +base64 --decode - > token.tgz <<< "$TOKEN_BASE64" + +# Expand it. We assume it was written with the correct directory metadata. Do this as a superuser +# because the token directory should be owned by root. +sudo tar -xvf token.tgz -C "$TOKEN_DIR" + +# Make sure the vault user is in the softhsm group to get access to the tokens. +sudo usermod -aG "$SOFTHSM_GROUP" vault +sudo chown -R "vault:$SOFTHSM_GROUP" "$TOKEN_DIR" diff --git a/enos/modules/softhsm_init/main.tf b/enos/modules/softhsm_init/main.tf new file mode 100644 index 0000000..edadca8 --- /dev/null +++ b/enos/modules/softhsm_init/main.tf @@ -0,0 +1,83 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.9" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The hosts for whom default softhsm configuration will be applied" +} + +variable "skip" { + type = bool + default = false + description = "Whether or not to skip initializing softhsm" +} + +locals { + // The location on disk to write the softhsm tokens to + token_dir = "/var/lib/softhsm/tokens" + + // Where the default configuration is + config_paths = { + "amzn" = "/etc/softhsm2.conf" + "rhel" = "/etc/softhsm2.conf" + "ubuntu" = "/etc/softhsm/softhsm2.conf" + } + + host_key = element(keys(enos_host_info.hosts), 0) + config_path = local.config_paths[enos_host_info.hosts[local.host_key].distro] +} + +resource "enos_host_info" "hosts" { + for_each = var.hosts + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_remote_exec" "init_softhsm" { + for_each = var.hosts + depends_on = [enos_host_info.hosts] + + environment = { + CONFIG_PATH = local.config_paths[enos_host_info.hosts[each.key].distro] + TOKEN_DIR = local.token_dir + SKIP = var.skip ? "true" : "false" + } + + scripts = [abspath("${path.module}/scripts/init-softhsm.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +output "config_path" { + // Technically this is actually just the first config path of our hosts. + value = local.config_path +} + +output "token_dir" { + value = local.token_dir +} + +output "skipped" { + value = var.skip +} diff --git a/enos/modules/softhsm_init/scripts/init-softhsm.sh b/enos/modules/softhsm_init/scripts/init-softhsm.sh new file mode 100644 index 0000000..3181d9e --- /dev/null +++ b/enos/modules/softhsm_init/scripts/init-softhsm.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$CONFIG_PATH" ]] && fail "CONFIG_PATH env variable has not been set" +[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" +[[ -z "$SKIP" ]] && fail "SKIP env variable has not been set" + +if [ "$SKIP" == "true" ]; then + exit 0 +fi + +cat << EOF | sudo tee "$CONFIG_PATH" +directories.tokendir = $TOKEN_DIR +objectstore.backend = file +log.level = DEBUG +slots.removable = false +slots.mechanisms = ALL +library.reset_on_fork = false +EOF + +sudo mkdir -p "$TOKEN_DIR" +sudo chmod 0770 "$TOKEN_DIR" diff --git a/enos/modules/softhsm_install/main.tf b/enos/modules/softhsm_install/main.tf new file mode 100644 index 0000000..ff0f497 --- /dev/null +++ b/enos/modules/softhsm_install/main.tf @@ -0,0 +1,116 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The hosts that will have access to the softhsm. We assume they're all the same platform and architecture" +} + +variable "include_tools" { + type = bool + default = false + description = "Install opensc pkcs11-tools along with softhsm" +} + +variable "retry_interval" { + type = string + default = "2" + description = "How long to wait between retries" +} + +variable "timeout" { + type = string + default = "15" + description = "How many seconds to wait before timing out" +} + +locals { + packages = var.include_tools ? { + // These packages match the distros that are currently defined in the `ec2_info` module. + amzn = { + "2023" = ["softhsm", "opensc"] + } + rhel = { + "8.10" = ["softhsm", "opensc"] + "9.5" = ["softhsm", "opensc"] + } + ubuntu = { + "20.04" = ["softhsm", "opensc"] + "22.04" = ["softhsm", "opensc"] + "24.04" = ["softhsm2", "opensc"] + } + } : { + amzn = { + "2023" = ["softhsm"] + } + rhel = { + "8.10" = ["softhsm"] + "9.5" = ["softhsm"] + } + ubuntu = { + "20.04" = ["softhsm"] + "22.04" = ["softhsm"] + "24.04" = ["softhsm2"] + } + } +} + +// Get the host information so we can ensure that we install the correct packages depending on the +// distro and distro version +resource "enos_host_info" "target" { + transport = { + ssh = { + host = var.hosts["0"].public_ip + } + } +} + +module "install_softhsm" { + source = "../install_packages" + + hosts = var.hosts + packages = local.packages[enos_host_info.target.distro][enos_host_info.target.distro_version] +} + +resource "enos_remote_exec" "find_shared_object" { + for_each = var.hosts + depends_on = [module.install_softhsm] + + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + } + + scripts = [abspath("${path.module}/scripts/find-shared-object.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +locals { + object_paths = compact(distinct(values(enos_remote_exec.find_shared_object)[*].stdout)) +} + +output "lib" { + value = local.object_paths[0] + + precondition { + condition = length(local.object_paths) == 1 + error_message = "SoftHSM targets cannot have different libsofthsm2.so shared object paths. Are they all the same Linux distro?" + } +} diff --git a/enos/modules/softhsm_install/scripts/find-shared-object.sh b/enos/modules/softhsm_install/scripts/find-shared-object.sh new file mode 100644 index 0000000..52b720d --- /dev/null +++ b/enos/modules/softhsm_install/scripts/find-shared-object.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +## Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if so=$(sudo find /usr -type f -name libsofthsm2.so -print -quit); then + echo "$so" + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out trying to locate libsofthsm2.so shared object" diff --git a/enos/modules/start_vault/main.tf b/enos/modules/start_vault/main.tf new file mode 100644 index 0000000..e2eec5f --- /dev/null +++ b/enos/modules/start_vault/main.tf @@ -0,0 +1,276 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.5.3" + } + } +} + +locals { + api_addr_localhost = var.ip_version == 4 ? "http://127.0.0.1:${var.listener_port}" : "http://[::1]:${var.listener_port}" + api_addrs = tolist([for h in var.hosts : { + 4 : "http://${h.public_ip}:${var.listener_port}", + 6 : "http://[${h.ipv6}]:${var.listener_port}", + }]) + api_addrs_internal = tolist([for h in var.hosts : { + 4 : "http://${h.private_ip}:${var.listener_port}", + 6 : "http://[${h.ipv6}]:${var.listener_port}", + }]) + bin_path = "${var.install_dir}/vault" + cluster_addrs = tolist([for h in var.hosts : { + 4 : "http://${h.public_ip}:${var.cluster_port}", + 6 : "http://[${h.ipv6}]:${var.cluster_port}", + }]) + cluster_addrs_internal = tolist([for h in var.hosts : { + 4 : "http://${h.private_ip}:${var.cluster_port}", + 6 : "http://[${h.ipv6}]:${var.cluster_port}", + }]) + // In order to get Terraform to plan we have to use collections with keys that are known at plan + // time. Here we're creating locals that keep track of index values that point to our target hosts. + followers = toset(slice(local.instances, 1, length(local.instances))) + instances = [for idx in range(length(var.hosts)) : tostring(idx)] + leader = toset(slice(local.instances, 0, 1)) + listener_address = var.ip_version == 4 ? "0.0.0.0:${var.listener_port}" : "[::]:${var.listener_port}" + // Handle cases where we might have to distribute HSM tokens for the pkcs11 seal before starting + // vault. + token_base64 = try(lookup(var.seal_attributes, "token_base64", ""), "") + token_base64_secondary = try(lookup(var.seal_attributes_secondary, "token_base64", ""), "") + // This module currently supports up to two defined seals. Most of our locals logic here is for + // creating the correct seal configuration. + seals = { + primary = local.seal_primary + secondary = local.seal_secondary + } + seals_primary = { + awskms = { + type = "awskms" + attributes = merge( + { + name = var.seal_alias + priority = var.seal_priority + }, var.seal_attributes + ) + } + pkcs11 = { + type = "pkcs11" + attributes = merge( + { + name = var.seal_alias + priority = var.seal_priority + }, + // Strip out attributes that aren't supposed to be in seal stanza like our base64 encoded + // softhsm blob and the token directory. We'll also inject the shared object library + // location that we detect on the target machines. This allows use to create the token and + // keys on a machines that have different shared object locations. + merge( + try({ for key, val in var.seal_attributes : key => val if key != "token_base64" && key != "token_dir" }, {}), + # Note: the below reference has to point to a specific instance of the maybe_configure_hsm + # module (in this case [0]) due to the maybe_configure_hsm module call using `count` to control whether it runs or not. + try({ lib = module.maybe_configure_hsm[0].lib }, {}) + ), + ) + } + shamir = { + type = "shamir" + attributes = null + } + } + seal_primary = local.seals_primary[var.seal_type] + seals_secondary = { + awskms = { + type = "awskms" + attributes = merge( + { + name = var.seal_alias_secondary + priority = var.seal_priority_secondary + }, var.seal_attributes_secondary + ) + } + pkcs11 = { + type = "pkcs11" + attributes = merge( + { + name = var.seal_alias_secondary + priority = var.seal_priority_secondary + }, + merge( + try({ for key, val in var.seal_attributes_secondary : key => val if key != "token_base64" && key != "token_dir" }, {}), + # Note: the below reference has to point to a specific instance of the maybe_configure_hsm_secondary + # module (in this case [0]) due to the maybe_configure_hsm_secondary module call using `count` to control whether it runs or not. + try({ lib = module.maybe_configure_hsm_secondary[0].lib }, {}) + ), + ) + } + none = { + type = "none" + attributes = null + } + } + seal_secondary = local.seals_secondary[var.seal_type_secondary] + storage_address = var.ip_version == 4 ? "0.0.0.0:${var.external_storage_port}" : "[::]:${var.external_storage_port}" + storage_attributes = [for idx, host in var.hosts : (var.storage_backend == "raft" ? + merge( + { + node_id = "${var.storage_node_prefix}_${idx}" + }, + var.storage_backend_attrs + ) : + { + address = local.storage_address + path = "vault" + }) + ] + storage_retry_join = { + "raft" : { + auto_join : "provider=aws addr_type=${var.ip_version == 4 ? "private_v4" : "public_v6"} tag_key=${var.cluster_tag_key} tag_value=${var.cluster_name}", + auto_join_scheme : "http", + }, + } +} + +# You might be wondering why our start_vault module, which supports shamir, awskms, and pkcs11 seal +# types, contains sub-modules that are only used for HSM. Well, each of those seal devices has +# different requirements and as such we have some seal specific requirements before starting Vault. +# +# A Shamir seal key cannot exist until Vault has already started, so this modules responsibility for +# shamir seals is ensuring that the seal type is passed to the enos_vault_start resource. That's it. +# +# Auto-unseal with a KMS requires that we configure the enos_vault_start resource with the correct +# seal type and the attributes necessary to know which KMS key to use. Vault should automatically +# unseal if we've given it the correct configuration. As long as Vault is able to access the key +# in the KMS it should be able to start. That's normally done via roles associated to the target +# machines, which is outside the scope of this module. +# +# Auto-unseal with an HSM and PKCS#11 is more complicated because a shared object library, which is +# how we interface with the HSM, must be present on each node in order to start Vault. In the real +# world this means an actual HSM in the same rack or data center as every node in the Vault cluster, +# but in our case we're creating ephemeral infrastructure for these test scenarios and don't have a +# real HSM available. We could use CloudHSM or the like, but at the time of writing CloudHSM +# provisioning takes anywhere from 30 to 60 minutes and costs upwards of $2 dollars an hour. That's +# far too long and expensive for scenarios we'll run fairly frequently. Instead, we test using a +# software HSM. Using a software HSM solves the cost and speed problems but creates new set of +# problems. We need to ensure every node in the cluster has access to the same "HSM" and with +# softhsm that means the same software, configuration, tokens and keys. Our `seal_pkcs11` module +# takes care of creating the token and keys, but that's the end of the road for that module. It's +# our job to ensure that when we're starting Vault with a software HSM that we'll ensure the correct +# software, configuration and data are available on the nodes. That's where the following two +# modules come in. They handle installing the required software, configuring it, and distributing +# the key data that was passed in via seal attributes. +module "maybe_configure_hsm" { + source = "../softhsm_distribute_vault_keys" + count = (var.seal_type == "pkcs11" || var.seal_type_secondary == "pkcs11") ? 1 : 0 + + hosts = var.hosts + token_base64 = local.token_base64 +} + +module "maybe_configure_hsm_secondary" { + source = "../softhsm_distribute_vault_keys" + depends_on = [module.maybe_configure_hsm] + count = (var.seal_type == "pkcs11" || var.seal_type_secondary == "pkcs11") ? 1 : 0 + + hosts = var.hosts + token_base64 = local.token_base64_secondary +} + +resource "enos_vault_start" "leader" { + for_each = local.leader + depends_on = [ + module.maybe_configure_hsm_secondary, + ] + + bin_path = local.bin_path + config_dir = var.config_dir + config_mode = var.config_mode + environment = merge(var.environment, { + VAULT_DISABLE_MLOCK = var.disable_mlock + }) + config = { + api_addr = local.api_addrs_internal[tonumber(each.value)][var.ip_version] + cluster_addr = local.cluster_addrs_internal[tonumber(each.value)][var.ip_version] + cluster_name = var.cluster_name + listener = { + type = "tcp" + attributes = { + address = local.listener_address + tls_disable = "true" + } + } + log_level = var.log_level + storage = { + type = var.storage_backend + attributes = local.storage_attributes[each.key] + retry_join = try(local.storage_retry_join[var.storage_backend], null) + } + seals = local.seals + ui = true + } + license = var.license + manage_service = var.manage_service + username = var.service_username + unit_name = "vault" + + transport = { + ssh = { + host = var.hosts[each.value].public_ip + } + } +} + +resource "enos_vault_start" "followers" { + depends_on = [ + enos_vault_start.leader, + ] + for_each = local.followers + + bin_path = local.bin_path + config_dir = var.config_dir + config_mode = var.config_mode + environment = merge(var.environment, { + VAULT_DISABLE_MLOCK = var.disable_mlock + }) + config = { + api_addr = local.api_addrs_internal[tonumber(each.value)][var.ip_version] + cluster_addr = local.cluster_addrs_internal[tonumber(each.value)][var.ip_version] + cluster_name = var.cluster_name + listener = { + type = "tcp" + attributes = { + address = local.listener_address + tls_disable = "true" + } + } + log_level = var.log_level + storage = { + type = var.storage_backend + attributes = { for key, value in local.storage_attributes[each.key] : key => value } + retry_join = try(local.storage_retry_join[var.storage_backend], null) + } + seals = local.seals + ui = true + } + license = var.license + manage_service = var.manage_service + username = var.service_username + unit_name = "vault" + + transport = { + ssh = { + host = var.hosts[each.value].public_ip + } + } +} + +output "token_base64" { + value = local.token_base64 +} + +output "token_base64_secondary" { + value = local.token_base64_secondary +} diff --git a/enos/modules/start_vault/outputs.tf b/enos/modules/start_vault/outputs.tf new file mode 100644 index 0000000..c20e7b8 --- /dev/null +++ b/enos/modules/start_vault/outputs.tf @@ -0,0 +1,63 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "api_addr_localhost" { + description = "The localhost API address" + value = local.api_addr_localhost +} + +output "api_addrs" { + description = "The external API addresses of all nodes the cluster" + value = local.api_addrs +} + +output "cluster_name" { + description = "The Vault cluster name" + value = var.cluster_name +} + +output "cluster_port" { + description = "The Vault cluster request forwarding listener port" + value = var.cluster_port +} + +output "external_storage_port" { + description = "The Vault cluster non-raft external storage port" + value = var.external_storage_port +} + +output "followers" { + description = "The follower enos_vault_start resources" + value = enos_vault_start.followers +} + +output "leader" { + description = "The leader enos_vault_start resource" + value = enos_vault_start.leader +} + +output "ipv6s" { + description = "Vault cluster target host ipv6s" + value = [for host in var.hosts : host.ipv6] +} + +output "listener_port" { + description = "The Vault cluster TCP listener port" + value = var.listener_port +} + +output "private_ips" { + description = "Vault cluster target host private_ips" + value = [for host in var.hosts : host.private_ip] +} + +output "public_ips" { + description = "Vault cluster target host public_ips" + value = [for host in var.hosts : host.public_ip] +} + +output "hosts" { + description = "The vault cluster instances that were created" + + value = var.hosts +} diff --git a/enos/modules/start_vault/variables.tf b/enos/modules/start_vault/variables.tf new file mode 100644 index 0000000..21d4a4e --- /dev/null +++ b/enos/modules/start_vault/variables.tf @@ -0,0 +1,193 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "cluster_name" { + type = string + description = "The Vault cluster name" +} + +variable "cluster_port" { + type = number + description = "The cluster port for Vault to listen on" + default = 8201 +} + +variable "cluster_tag_key" { + type = string + description = "The Vault cluster tag key" + default = "retry_join" +} + +variable "config_dir" { + type = string + description = "The directory to use for Vault configuration" + default = "/etc/vault.d" +} + +variable "config_mode" { + description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." + default = "file" + + validation { + condition = contains(["env", "file"], var.config_mode) + error_message = "The config_mode must be either 'env' or 'file'. No other configuration modes are supported." + } +} + +variable "disable_mlock" { + type = bool + description = "Disable mlock for Vault process." + default = false +} + +variable "environment" { + description = "Optional Vault configuration environment variables to set starting Vault" + type = map(string) + default = null +} + +variable "external_storage_port" { + type = number + description = "The port to connect to when using external storage" + default = 8500 +} + +variable "hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "install_dir" { + type = string + description = "The directory where the vault binary will be installed" + default = "/opt/vault/bin" +} + +variable "ip_version" { + type = number + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "license" { + type = string + sensitive = true + description = "The value of the Vault license" + default = null +} + +variable "log_level" { + type = string + description = "The vault service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) + error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "manage_service" { + type = bool + description = "Manage the Vault service users and systemd unit. Disable this to use configuration in RPM and Debian packages" + default = true +} + +variable "listener_port" { + type = number + description = "The port for Vault to listen on" + default = 8200 +} + +variable "seal_alias" { + type = string + description = "The primary seal alias name" + default = "primary" +} + +variable "seal_alias_secondary" { + type = string + description = "The secondary seal alias name" + default = "secondary" +} + +variable "seal_attributes" { + description = "The primary auto-unseal attributes" + default = null +} + +variable "seal_attributes_secondary" { + description = "The secondary auto-unseal attributes" + default = null +} + +variable "seal_priority" { + type = string + description = "The primary seal priority" + default = "1" +} + +variable "seal_priority_secondary" { + type = string + description = "The secondary seal priority" + default = "2" +} + +variable "seal_type" { + type = string + description = "The method by which to unseal the Vault cluster" + default = "awskms" + + validation { + condition = contains(["awskms", "pkcs11", "shamir"], var.seal_type) + error_message = "The seal_type must be either 'awskms', 'pkcs11', or 'shamir'. No other seal types are supported." + } +} + +variable "seal_type_secondary" { + type = string + description = "A secondary HA seal method. Only supported in Vault Enterprise >= 1.15" + default = "none" + + validation { + condition = contains(["awskms", "pkcs11", "none"], var.seal_type_secondary) + error_message = "The secondary_seal_type must be 'awskms', 'pkcs11' or 'none'. No other secondary seal types are supported." + } +} + +variable "service_username" { + type = string + description = "The host username to own the vault service" + default = "vault" +} + +variable "storage_backend" { + type = string + description = "The storage backend to use" + default = "raft" + + validation { + condition = contains(["raft", "consul"], var.storage_backend) + error_message = "The storage_backend must be either raft or consul. No other storage backends are supported." + } +} + +variable "storage_backend_attrs" { + type = map(any) + description = "An optional set of key value pairs to inject into the storage block" + default = {} +} + +variable "storage_node_prefix" { + type = string + description = "A prefix to use for each node in the Vault storage configuration" + default = "node" +} diff --git a/enos/modules/stop_vault/main.tf b/enos/modules/stop_vault/main.tf new file mode 100644 index 0000000..6dd477d --- /dev/null +++ b/enos/modules/stop_vault/main.tf @@ -0,0 +1,39 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" + } + } +} + +variable "service_name" { + type = string + description = "The Vault systemd service name" + default = "vault" +} + +variable "hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +resource "enos_remote_exec" "shutdown_multiple_nodes" { + for_each = var.hosts + inline = ["sudo systemctl stop ${var.service_name}.service; sleep 5"] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/target_ec2_fleet/main.tf b/enos/modules/target_ec2_fleet/main.tf new file mode 100644 index 0000000..411d174 --- /dev/null +++ b/enos/modules/target_ec2_fleet/main.tf @@ -0,0 +1,339 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_subnets" "vpc" { + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + dynamic "statement" { + for_each = var.seal_key_names + + content { + resources = [statement.value] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } + } +} + +data "aws_iam_policy_document" "target_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +resource "random_string" "random_cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +// ec2:CreateFleet only allows up to 4 InstanceRequirements overrides so we can only ever request +// a fleet across 4 or fewer subnets if we want to bid with InstanceRequirements instead of +// weighted instance types. +resource "random_shuffle" "subnets" { + input = data.aws_subnets.vpc.ids + result_count = 4 +} + +locals { + spot_allocation_strategy = "lowestPrice" + on_demand_allocation_strategy = "lowestPrice" + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + cluster_name = coalesce(var.cluster_name, random_string.random_cluster_name.result) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" + fleet_tag = "${local.name_prefix}-spot-fleet-target" + fleet_tags = { + Name = "${local.name_prefix}-${var.cluster_tag_key}-target" + "${var.cluster_tag_key}" = local.cluster_name + Fleet = local.fleet_tag + } +} + +resource "aws_iam_role" "target" { + name = "${local.name_prefix}-target-role" + assume_role_policy = data.aws_iam_policy_document.target_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-target-profile" + role = aws_iam_role.target.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-target-policy" + role = aws_iam_role.target.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-target" + description = "Target instance security group" + vpc_id = var.vpc_id + + # SSH traffic + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Vault traffic + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + } + + # Consul traffic + ingress { + from_port = 8300 + to_port = 8302 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8301 + to_port = 8302 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8500 + to_port = 8503 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_launch_template" "target" { + name = "${local.name_prefix}-target" + image_id = var.ami_id + key_name = var.ssh_keypair + + iam_instance_profile { + name = aws_iam_instance_profile.target.name + } + + instance_requirements { + burstable_performance = "included" + + memory_mib { + min = var.instance_mem_min + max = var.instance_mem_max + } + + vcpu_count { + min = var.instance_cpu_min + max = var.instance_cpu_max + } + } + + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [aws_security_group.target.id] + } + + tag_specifications { + resource_type = "instance" + + tags = merge( + var.common_tags, + local.fleet_tags, + ) + } +} + +# There are three primary knobs we can turn to try and optimize our costs by +# using a spot fleet: our min and max instance requirements, our max bid +# price, and the allocation strategy to use when fulfilling the spot request. +# We've currently configured our instance requirements to allow for anywhere +# from 2-4 vCPUs and 4-16GB of RAM. We intentionally have a wide range +# to allow for a large instance size pool to be considered. Our next knob is our +# max bid price. As we're using spot fleets to save on instance cost, we never +# want to pay more for an instance than we were on-demand. We've set the max price +# to equal what we pay for t3.medium instances on-demand, which are the smallest +# reliable size for Vault scenarios. The final knob is the allocation strategy +# that AWS will use when looking for instances that meet our resource and cost +# requirements. We're using the "lowestPrice" strategy to get the absolute +# cheapest machines that will fit the requirements, but it comes with a slightly +# higher capacity risk than say, "capacityOptimized" or "priceCapacityOptimized". +# Unless we see capacity issues or instances being shut down then we ought to +# stick with that strategy. +resource "aws_ec2_fleet" "targets" { + replace_unhealthy_instances = false + terminate_instances = true // terminate instances when we "delete" the fleet + terminate_instances_with_expiration = false + tags = merge( + var.common_tags, + local.fleet_tags, + ) + type = "instant" // make a synchronous request for the entire fleet + + launch_template_config { + launch_template_specification { + launch_template_id = aws_launch_template.target.id + version = aws_launch_template.target.latest_version + } + + dynamic "override" { + for_each = random_shuffle.subnets.result + + content { + subnet_id = override.value + } + } + } + + on_demand_options { + allocation_strategy = local.on_demand_allocation_strategy + max_total_price = (var.max_price * var.instance_count) + min_target_capacity = var.capacity_type == "on-demand" ? var.instance_count : null + // One of these has to be set to enforce our on-demand target capacity minimum + single_availability_zone = false + single_instance_type = true + } + + spot_options { + allocation_strategy = local.spot_allocation_strategy + // The instance_pools_to_use_count is only valid for the allocation_strategy + // lowestPrice. When we are using that strategy we'll want to always set it + // to non-zero to avoid rebuilding the fleet on a re-run. For any other strategy + // set it to zero to avoid rebuilding the fleet on a re-run. + instance_pools_to_use_count = local.spot_allocation_strategy == "lowestPrice" ? 1 : null + } + + // Try and provision only spot instances and fall back to on-demand. + target_capacity_specification { + default_target_capacity_type = var.capacity_type + spot_target_capacity = var.capacity_type == "spot" ? var.instance_count : 0 + on_demand_target_capacity = var.capacity_type == "on-demand" ? var.instance_count : 0 + target_capacity_unit_type = "units" // units == instance count + total_target_capacity = var.instance_count + } +} + +data "aws_instance" "targets" { + depends_on = [ + aws_ec2_fleet.targets, + ] + for_each = local.instances + + instance_id = aws_ec2_fleet.targets.fleet_instance_set[0].instance_ids[each.key] + +} diff --git a/enos/modules/target_ec2_fleet/outputs.tf b/enos/modules/target_ec2_fleet/outputs.tf new file mode 100644 index 0000000..505db0e --- /dev/null +++ b/enos/modules/target_ec2_fleet/outputs.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 fleet target hosts" + value = { for idx in range(var.instance_count) : idx => { + public_ip = data.aws_instance.targets[idx].public_ip + private_ip = data.aws_instance.targets[idx].private_ip + ipv6 = try(data.aws_instance.targets[idx].ipv6_addresses[0], null) + } } +} diff --git a/enos/modules/target_ec2_fleet/variables.tf b/enos/modules/target_ec2_fleet/variables.tf new file mode 100644 index 0000000..f0eb87b --- /dev/null +++ b/enos/modules/target_ec2_fleet/variables.tf @@ -0,0 +1,107 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The key name for the cluster tag" + default = "TargetCluster" +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { + Project = "vault-ci" + } +} + +variable "disable_selinux" { + description = "Optionally disable SELinux for certain distros/versions" + type = bool + default = true +} + +variable "instance_mem_min" { + description = "The minimum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 4096 // ~4 GB +} + +variable "instance_mem_max" { + description = "The maximum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 16385 // ~16 GB +} + +variable "instance_cpu_min" { + description = "The minimum number of vCPU's for each instance in the fleet" + type = number + default = 2 +} + +variable "instance_cpu_max" { + description = "The maximum number of vCPU's for each instance in the fleet" + type = number + default = 8 // Unlikely we'll ever get that high due to spot price bid protection +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "max_price" { + description = "The maximum hourly price to pay for each target instance" + type = string + default = "0.0416" +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "seal_key_names" { + type = list(string) + description = "The key management seal key names" + default = null +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "capacity_type" { + description = "What capacity type to use for EC2 instances" + type = string + default = "on-demand" + + validation { + condition = contains(["on-demand", "spot"], var.capacity_type) + error_message = "The capacity_type must be either 'on-demand' or 'spot'." + } +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/target_ec2_shim/main.tf b/enos/modules/target_ec2_shim/main.tf new file mode 100644 index 0000000..c755668 --- /dev/null +++ b/enos/modules/target_ec2_shim/main.tf @@ -0,0 +1,52 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.3.24" + } + } +} + +variable "ami_id" { default = null } +variable "cluster_name" { default = null } +variable "cluster_tag_key" { default = null } +variable "common_tags" { default = null } +variable "disable_selinux" { default = true } +variable "instance_count" { default = 3 } +variable "instance_cpu_max" { default = null } +variable "instance_cpu_min" { default = null } +variable "instance_mem_max" { default = null } +variable "instance_mem_min" { default = null } +variable "instance_types" { default = null } +variable "max_price" { default = null } +variable "ports_ingress" { default = null } +variable "project_name" { default = null } +variable "seal_key_names" { default = null } +variable "ssh_allow_ips" { default = null } +variable "ssh_keypair" { default = null } +variable "vpc_id" { default = null } + +resource "random_string" "cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +output "cluster_name" { + value = coalesce(var.cluster_name, random_string.cluster_name.result) +} + +output "hosts" { + value = { for idx in range(var.instance_count) : idx => { + public_ip = "null-public-${idx}" + private_ip = "null-private-${idx}" + ipv6 = "null-ipv6-${idx}" + } } +} diff --git a/enos/modules/target_ec2_spot_fleet/main.tf b/enos/modules/target_ec2_spot_fleet/main.tf new file mode 100644 index 0000000..4a76274 --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/main.tf @@ -0,0 +1,466 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_subnets" "vpc" { + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + dynamic "statement" { + for_each = var.seal_key_names + + content { + resources = [statement.value] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } + } +} + +data "aws_iam_policy_document" "target_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "aws_iam_policy_document" "fleet" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeImages", + "ec2:DescribeSubnets", + "ec2:RequestSpotInstances", + "ec2:TerminateInstances", + "ec2:DescribeInstanceStatus", + "ec2:CancelSpotFleetRequests", + "ec2:CreateTags", + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + ] + } + + statement { + effect = "Deny" + + resources = [ + "arn:aws:ec2:*:*:instance/*", + ] + + actions = [ + "ec2:RunInstances", + ] + + condition { + test = "StringNotEquals" + variable = "ec2:InstanceMarketType" + values = ["spot"] + } + } + + statement { + resources = ["*"] + + actions = [ + "iam:PassRole", + ] + + condition { + test = "StringEquals" + variable = "iam:PassedToService" + values = [ + "ec2.amazonaws.com", + ] + } + } + + statement { + resources = [ + "arn:aws:elasticloadbalancing:*:*:loadbalancer/*", + ] + + actions = [ + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + ] + } + + statement { + resources = [ + "arn:aws:elasticloadbalancing:*:*:*/*" + ] + + actions = [ + "elasticloadbalancing:RegisterTargets" + ] + } +} + +data "aws_iam_policy_document" "fleet_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["spotfleet.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +resource "random_string" "random_cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +// ec2:RequestSpotFleet only allows up to 4 InstanceRequirements overrides so we can only ever +// request a fleet across 4 or fewer subnets if we want to bid with InstanceRequirements instead of +// weighted instance types. +resource "random_shuffle" "subnets" { + input = data.aws_subnets.vpc.ids + result_count = 4 +} + +locals { + allocation_strategy = "lowestPrice" + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + cluster_name = coalesce(var.cluster_name, random_string.random_cluster_name.result) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" + fleet_tag = "${local.name_prefix}-spot-fleet-target" + fleet_tags = { + Name = "${local.name_prefix}-${var.cluster_tag_key}-target" + "${var.cluster_tag_key}" = local.cluster_name + Fleet = local.fleet_tag + } +} + +resource "aws_iam_role" "target" { + name = "${local.name_prefix}-target-role" + assume_role_policy = data.aws_iam_policy_document.target_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-target-profile" + role = aws_iam_role.target.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-target-policy" + role = aws_iam_role.target.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_iam_role" "fleet" { + name = "${local.name_prefix}-fleet-role" + assume_role_policy = data.aws_iam_policy_document.fleet_role.json +} + +resource "aws_iam_role_policy" "fleet" { + name = "${local.name_prefix}-fleet-policy" + role = aws_iam_role.fleet.id + policy = data.aws_iam_policy_document.fleet.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-target" + description = "Target instance security group" + vpc_id = var.vpc_id + + # SSH traffic + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Vault traffic + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + } + + # Consul traffic + ingress { + from_port = 8300 + to_port = 8302 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8301 + to_port = 8302 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8500 + to_port = 8503 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_launch_template" "target" { + name = "${local.name_prefix}-target" + image_id = var.ami_id + instance_type = null + key_name = var.ssh_keypair + + iam_instance_profile { + name = aws_iam_instance_profile.target.name + } + + instance_requirements { + burstable_performance = "included" + + memory_mib { + min = var.instance_mem_min + max = var.instance_mem_max + } + + vcpu_count { + min = var.instance_cpu_min + max = var.instance_cpu_max + } + } + + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [aws_security_group.target.id] + } + + tag_specifications { + resource_type = "instance" + + tags = merge( + var.common_tags, + local.fleet_tags, + ) + } +} + +# There are three primary knobs we can turn to try and optimize our costs by +# using a spot fleet: our min and max instance requirements, our max bid +# price, and the allocation strategy to use when fulfilling the spot request. +# We've currently configured our instance requirements to allow for anywhere +# from 2-4 vCPUs and 4-16GB of RAM. We intentionally have a wide range +# to allow for a large instance size pool to be considered. Our next knob is our +# max bid price. As we're using spot fleets to save on instance cost, we never +# want to pay more for an instance than we were on-demand. We've set the max price +# to equal what we pay for t3.medium instances on-demand, which are the smallest +# reliable size for Vault scenarios. The final knob is the allocation strategy +# that AWS will use when looking for instances that meet our resource and cost +# requirements. We're using the "lowestPrice" strategy to get the absolute +# cheapest machines that will fit the requirements, but it comes with a slightly +# higher capacity risk than say, "capacityOptimized" or "priceCapacityOptimized". +# Unless we see capacity issues or instances being shut down then we ought to +# stick with that strategy. +resource "aws_spot_fleet_request" "targets" { + allocation_strategy = local.allocation_strategy + fleet_type = "request" + iam_fleet_role = aws_iam_role.fleet.arn + // The instance_pools_to_use_count is only valid for the allocation_strategy + // lowestPrice. When we are using that strategy we'll want to always set it + // to 1 to avoid rebuilding the fleet on a re-run. For any other strategy + // set it to zero to avoid rebuilding the fleet on a re-run. + instance_pools_to_use_count = local.allocation_strategy == "lowestPrice" ? 1 : 0 + spot_price = var.max_price + target_capacity = var.instance_count + terminate_instances_on_delete = true + wait_for_fulfillment = true + + launch_template_config { + launch_template_specification { + id = aws_launch_template.target.id + version = aws_launch_template.target.latest_version + } + + // We cannot currently use more than one subnet[0]. Until the bug has been resolved + // we'll choose a random subnet. It would be ideal to bid across all subnets to get + // the absolute cheapest available at the time of bidding. + // + // [0] https://github.com/hashicorp/terraform-provider-aws/issues/30505 + + /* + dynamic "overrides" { + for_each = random_shuffle.subnets.result + + content { + subnet_id = overrides.value + } + } + */ + + overrides { + subnet_id = random_shuffle.subnets.result[0] + } + } + + tags = merge( + var.common_tags, + local.fleet_tags, + ) +} + +resource "time_sleep" "wait_for_fulfillment" { + depends_on = [aws_spot_fleet_request.targets] + create_duration = "2s" +} + +data "aws_instances" "targets" { + depends_on = [ + time_sleep.wait_for_fulfillment, + aws_spot_fleet_request.targets, + ] + + instance_tags = local.fleet_tags + instance_state_names = [ + "pending", + "running", + ] + + filter { + name = "image-id" + values = [var.ami_id] + } + + filter { + name = "iam-instance-profile.arn" + values = [aws_iam_instance_profile.target.arn] + } +} + +data "aws_instance" "targets" { + depends_on = [ + aws_spot_fleet_request.targets, + data.aws_instances.targets + ] + for_each = local.instances + + instance_id = data.aws_instances.targets.ids[each.key] +} + +module "disable_selinux" { + source = "../disable_selinux" + count = var.disable_selinux == true ? 1 : 0 + + hosts = { for idx in range(var.instance_count) : idx => { + public_ip = aws_instance.targets[idx].public_ip + private_ip = aws_instance.targets[idx].private_ip + } } +} diff --git a/enos/modules/target_ec2_spot_fleet/outputs.tf b/enos/modules/target_ec2_spot_fleet/outputs.tf new file mode 100644 index 0000000..505db0e --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/outputs.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 fleet target hosts" + value = { for idx in range(var.instance_count) : idx => { + public_ip = data.aws_instance.targets[idx].public_ip + private_ip = data.aws_instance.targets[idx].private_ip + ipv6 = try(data.aws_instance.targets[idx].ipv6_addresses[0], null) + } } +} diff --git a/enos/modules/target_ec2_spot_fleet/variables.tf b/enos/modules/target_ec2_spot_fleet/variables.tf new file mode 100644 index 0000000..af6c0dc --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/variables.tf @@ -0,0 +1,96 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The key name for the cluster tag" + default = "TargetCluster" +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { + Project = "Vault" + } +} + +variable "disable_selinux" { + description = "Optionally disable SELinux for certain distros/versions" + type = bool + default = true +} + +variable "instance_mem_min" { + description = "The minimum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 4096 // ~4 GB +} + +variable "instance_mem_max" { + description = "The maximum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 16385 // ~16 GB +} + +variable "instance_cpu_min" { + description = "The minimum number of vCPU's for each instance in the fleet" + type = number + default = 2 +} + +variable "instance_cpu_max" { + description = "The maximum number of vCPU's for each instance in the fleet" + type = number + default = 8 // Unlikely we'll ever get that high due to spot price bid protection +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "max_price" { + description = "The maximum hourly price to pay for each target instance" + type = string + default = "0.0416" +} + +variable "seal_key_names" { + type = list(string) + description = "The key management seal key names" + default = null +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/vault_agent/main.tf b/enos/modules/vault_agent/main.tf new file mode 100644 index 0000000..e5d1966 --- /dev/null +++ b/enos/modules/vault_agent/main.tf @@ -0,0 +1,91 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + default = 4 + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_agent_port" { + type = number + description = "The listener port number for the Vault Agent" +} + +variable "vault_agent_template_destination" { + type = string + description = "The destination of the template rendered by Agent" +} + +variable "vault_agent_template_contents" { + type = string + description = "The template contents to be rendered by Agent" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +locals { + agent_listen_addr = "${var.ip_version == 4 ? "127.0.0.1" : "[::1]"}:${var.vault_agent_port}" +} + +resource "enos_remote_exec" "set_up_approle_auth_and_agent" { + environment = { + AGENT_LISTEN_ADDR = local.agent_listen_addr, + VAULT_ADDR = var.vault_addr, + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_TOKEN = var.vault_root_token, + VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination, + VAULT_AGENT_TEMPLATE_CONTENTS = var.vault_agent_template_contents, + } + + scripts = [abspath("${path.module}/scripts/set-up-approle-and-agent.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +output "vault_agent_listen_addr" { + description = "The vault agent listen address" + value = local.agent_listen_addr +} diff --git a/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh b/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh new file mode 100644 index 0000000..6af219a --- /dev/null +++ b/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + return 1 +} + +[[ -z "$AGENT_LISTEN_ADDR" ]] && fail "AGENT_LISTEN_ADDR env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_AGENT_TEMPLATE_CONTENTS" ]] && fail "VAULT_AGENT_TEMPLATE_CONTENTS env variable has not been set" +[[ -z "$VAULT_AGENT_TEMPLATE_DESTINATION" ]] && fail "VAULT_AGENT_TEMPLATE_DESTINATION env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) +$binpath auth disable approle || true + +$binpath auth enable approle + +$binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000 + +ROLEID=$($binpath read --format=json auth/approle/role/agent-role/role-id | jq -r '.data.role_id') + +if [[ "$ROLEID" == '' ]]; then + fail "expected ROLEID to be nonempty, but it is empty" +fi + +SECRETID=$($binpath write -f --format=json auth/approle/role/agent-role/secret-id | jq -r '.data.secret_id') + +if [[ "$SECRETID" == '' ]]; then + fail "expected SECRETID to be nonempty, but it is empty" +fi + +echo "$ROLEID" > /tmp/role-id +echo "$SECRETID" > /tmp/secret-id + +cat > /tmp/vault-agent.hcl <<- EOM +pid_file = "/tmp/pidfile" + +vault { + address = "${VAULT_ADDR}" + tls_skip_verify = true + retry { + num_retries = 10 + } +} + +cache { + enforce_consistency = "always" + use_auto_auth_token = true +} + +listener "tcp" { + address = "${AGENT_LISTEN_ADDR}" + tls_disable = true +} + +template { + destination = "${VAULT_AGENT_TEMPLATE_DESTINATION}" + contents = "${VAULT_AGENT_TEMPLATE_CONTENTS}" + exec { + command = "pkill -F /tmp/pidfile" + } +} + +auto_auth { + method { + type = "approle" + config = { + role_id_file_path = "/tmp/role-id" + secret_id_file_path = "/tmp/secret-id" + } + } + sink { + type = "file" + config = { + path = "/tmp/token" + } + } +} +EOM + +# If Agent is still running from a previous run, kill it +pkill -F /tmp/pidfile || true + +# If the template file already exists, remove it +rm "${VAULT_AGENT_TEMPLATE_DESTINATION}" || true + +# Run agent (it will kill itself when it finishes rendering the template) +if ! $binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1; then + fail "failed to run vault agent: $(cat /tmp/agent-logs.txt)" +fi diff --git a/enos/modules/vault_cluster/main.tf b/enos/modules/vault_cluster/main.tf new file mode 100644 index 0000000..a70ab69 --- /dev/null +++ b/enos/modules/vault_cluster/main.tf @@ -0,0 +1,414 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" + } + } +} + +data "enos_environment" "localhost" {} + +locals { + audit_device_file_path = "/var/log/vault/vault_audit.log" + audit_socket_port = "9090" + bin_path = "${var.install_dir}/vault" + consul_bin_path = "${var.consul_install_dir}/consul" + enable_audit_devices = var.enable_audit_devices && var.initialize_cluster + disable_mlock = false + // In order to get Terraform to plan we have to use collections with keys + // that are known at plan time. In order for our module to work our var.hosts + // must be a map with known keys at plan time. Here we're creating locals + // that keep track of index values that point to our target hosts. + followers = toset(slice(local.instances, 1, length(local.instances))) + instances = [for idx in range(length(var.hosts)) : tostring(idx)] + key_shares = { + "awskms" = null + "shamir" = 5 + "pkcs11" = null + } + key_threshold = { + "awskms" = null + "shamir" = 3 + "pkcs11" = null + } + leader = toset(slice(local.instances, 0, 1)) + netcat_command = { + amzn = "nc" + opensuse-leap = "netcat" + rhel = "nc" + sles = "nc" + ubuntu = "netcat" + } + recovery_shares = { + "awskms" = 5 + "shamir" = null + "pkcs11" = 5 + } + recovery_threshold = { + "awskms" = 3 + "shamir" = null + "pkcs11" = 3 + } + vault_service_user = "vault" +} + +resource "enos_host_info" "hosts" { + for_each = var.hosts + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_bundle_install" "consul" { + for_each = { + for idx, host in var.hosts : idx => var.hosts[idx] + if var.storage_backend == "consul" + } + + destination = var.consul_install_dir + release = merge(var.consul_release, { product = "consul" }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# We run install_packages before we install Vault because for some combinations of +# certain Linux distros and artifact types (e.g. SLES and RPM packages), there may +# be packages that are required to perform Vault installation (e.g. openssl). +module "install_packages" { + source = "../install_packages" + + hosts = var.hosts + packages = var.packages +} + +resource "enos_bundle_install" "vault" { + for_each = var.hosts + depends_on = [ + module.install_packages, // Don't race for the package manager locks with install_packages + ] + + destination = var.install_dir + release = var.release == null ? var.release : merge({ product = "vault" }, var.release) + artifactory = var.artifactory_release + path = var.local_artifact_path + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_consul_start" "consul" { + for_each = enos_bundle_install.consul + + bin_path = local.consul_bin_path + data_dir = var.consul_data_dir + config = { + # GetPrivateInterfaces is a go-sockaddr template that helps Consul get the correct + # addr in all of our default cases. This is required in the case of Amazon Linux, + # because amzn has a default docker listener that will make Consul try to use the + # incorrect addr. + bind_addr = "{{ GetPrivateInterfaces | include \"type\" \"IP\" | sort \"default\" | limit 1 | attr \"address\"}}" + data_dir = var.consul_data_dir + datacenter = "dc1" + retry_join = ["provider=aws tag_key=${var.backend_cluster_tag_key} tag_value=${var.backend_cluster_name}"] + server = false + bootstrap_expect = 0 + license = var.consul_license + log_level = var.consul_log_level + log_file = var.consul_log_file + } + license = var.consul_license + unit_name = "consul" + username = "consul" + + transport = { + ssh = { + host = var.hosts[each.key].public_ip + } + } +} + +module "start_vault" { + source = "../start_vault" + + depends_on = [ + enos_consul_start.consul, + module.install_packages, + enos_bundle_install.vault, + ] + + cluster_name = var.cluster_name + cluster_port = var.cluster_port + cluster_tag_key = var.cluster_tag_key + config_dir = var.config_dir + config_mode = var.config_mode + disable_mlock = local.disable_mlock + external_storage_port = var.external_storage_port + hosts = var.hosts + install_dir = var.install_dir + ip_version = var.ip_version + license = var.license + listener_port = var.listener_port + log_level = var.log_level + manage_service = var.manage_service + seal_attributes = var.seal_attributes + seal_attributes_secondary = var.seal_attributes_secondary + seal_type = var.seal_type + seal_type_secondary = var.seal_type_secondary + service_username = local.vault_service_user + storage_backend = var.storage_backend + storage_backend_attrs = var.storage_backend_addl_config + storage_node_prefix = var.storage_node_prefix +} + +resource "enos_vault_init" "leader" { + depends_on = [ + module.start_vault, + ] + for_each = toset([ + for idx, leader in local.leader : leader + if var.initialize_cluster + ]) + + bin_path = local.bin_path + vault_addr = module.start_vault.leader[0].config.api_addr + + key_shares = local.key_shares[var.seal_type] + key_threshold = local.key_threshold[var.seal_type] + + recovery_shares = local.recovery_shares[var.seal_type] + recovery_threshold = local.recovery_threshold[var.seal_type] + + transport = { + ssh = { + host = var.hosts[each.value].public_ip + } + } +} + +resource "enos_vault_unseal" "leader" { + depends_on = [ + module.start_vault, + enos_vault_init.leader, + ] + for_each = enos_vault_init.leader // only unseal the leader if we initialized it + + bin_path = local.bin_path + vault_addr = module.start_vault.leader[each.key].config.api_addr + seal_type = var.seal_type + unseal_keys = var.seal_type != "shamir" ? null : coalesce(var.shamir_unseal_keys, enos_vault_init.leader[0].unseal_keys_hex) + + transport = { + ssh = { + host = var.hosts[tolist(local.leader)[0]].public_ip + } + } +} + +resource "enos_vault_unseal" "followers" { + depends_on = [ + enos_vault_init.leader, + enos_vault_unseal.leader, + ] + // Only unseal followers if we're not using an auto-unseal method and we've + // initialized the cluster + for_each = toset([ + for idx, follower in local.followers : follower + if var.seal_type == "shamir" && var.initialize_cluster + ]) + + bin_path = local.bin_path + vault_addr = module.start_vault.followers[each.key].config.api_addr + seal_type = var.seal_type + unseal_keys = var.seal_type != "shamir" ? null : coalesce(var.shamir_unseal_keys, enos_vault_init.leader[0].unseal_keys_hex) + + transport = { + ssh = { + host = var.hosts[each.value].public_ip + } + } +} + +// Force unseal the cluster. This is used if the vault-cluster module is used +// to add additional nodes to a cluster via auto-pilot, or some other means. +// When that happens we'll want to set initialize_cluster to false and +// force_unseal to true. +resource "enos_vault_unseal" "maybe_force_unseal" { + depends_on = [ + module.start_vault.followers, + ] + for_each = { + for idx, host in var.hosts : idx => host + if var.force_unseal && !var.initialize_cluster + } + + bin_path = local.bin_path + vault_addr = module.start_vault.api_addr_localhost + seal_type = var.seal_type + unseal_keys = coalesce( + var.shamir_unseal_keys, + try(enos_vault_init.leader[0].unseal_keys_hex, null), + ) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Add the vault install location to the PATH and set up VAULT_ADDR and VAULT_TOKEN environement +# variables in the login shell so we don't have to do it if/when we login in to a cluster node. +resource "enos_remote_exec" "configure_login_shell_profile" { + depends_on = [ + enos_vault_init.leader, + enos_vault_unseal.leader, + ] + for_each = var.hosts + + environment = { + VAULT_ADDR = module.start_vault.api_addr_localhost + VAULT_TOKEN = var.root_token != null ? var.root_token : try(enos_vault_init.leader[0].root_token, "_") + VAULT_INSTALL_DIR = var.install_dir + } + + scripts = [abspath("${path.module}/scripts/set-up-login-shell-profile.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Add a motd to assist people that might be logging in. +resource "enos_file" "motd" { + depends_on = [ + enos_remote_exec.configure_login_shell_profile + ] + for_each = var.hosts + + destination = "/etc/motd" + content = <&2 + exit 1 +} + +[[ -z "$LOG_FILE_PATH" ]] && fail "LOG_FILE_PATH env variable has not been set" +[[ -z "$SERVICE_USER" ]] && fail "SERVICE_USER env variable has not been set" + +LOG_DIR=$(dirname "$LOG_FILE_PATH") + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=10 + count=$((count + 1)) + + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +retry 7 id -a "$SERVICE_USER" + +sudo mkdir -p "$LOG_DIR" +sudo chown -R "$SERVICE_USER":"$SERVICE_USER" "$LOG_DIR" diff --git a/enos/modules/vault_cluster/scripts/enable-audit-devices.sh b/enos/modules/vault_cluster/scripts/enable-audit-devices.sh new file mode 100644 index 0000000..a93bd55 --- /dev/null +++ b/enos/modules/vault_cluster/scripts/enable-audit-devices.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -exo pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$LOG_FILE_PATH" ]] && fail "LOG_FILE_PATH env variable has not been set" +[[ -z "$SOCKET_PORT" ]] && fail "SOCKET_PORT env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_BIN_PATH" ]] && fail "VAULT_BIN_PATH env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +enable_file_audit_device() { + $VAULT_BIN_PATH audit enable file file_path="$LOG_FILE_PATH" +} + +enable_syslog_audit_device() { + $VAULT_BIN_PATH audit enable syslog tag="vault" facility="AUTH" +} + +enable_socket_audit_device() { + if [ "$IP_VERSION" = "4" ]; then + "$VAULT_BIN_PATH" audit enable socket address="127.0.0.1:$SOCKET_PORT" + else + "$VAULT_BIN_PATH" audit enable socket address="[::1]:$SOCKET_PORT" + fi +} + +main() { + if ! enable_file_audit_device; then + fail "Failed to enable vault file audit device" + fi + + if ! enable_syslog_audit_device; then + fail "Failed to enable vault syslog audit device" + fi + + if ! enable_socket_audit_device; then + local log + log=$(cat /tmp/vault-socket.log) + fail "Failed to enable vault socket audit device: listener log: $log" + fi + + return 0 +} + +main diff --git a/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh b/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh new file mode 100644 index 0000000..f3a42d2 --- /dev/null +++ b/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# Determine the profile file we should write to. We only want to affect login shells and bash will +# only read one of these in ordered of precendence. +determineProfileFile() { + if [ -f "$HOME/.bash_profile" ]; then + printf "%s/.bash_profile\n" "$HOME" + return 0 + fi + + if [ -f "$HOME/.bash_login" ]; then + printf "%s/.bash_login\n" "$HOME" + return 0 + fi + + printf "%s/.profile\n" "$HOME" +} + +appendVaultProfileInformation() { + tee -a "$1" <<< "export PATH=$PATH:$VAULT_INSTALL_DIR +export VAULT_ADDR=$VAULT_ADDR +export VAULT_TOKEN=$VAULT_TOKEN" +} + +main() { + local profile_file + if ! profile_file=$(determineProfileFile); then + fail "failed to determine login shell profile file location" + fi + + # If vault_cluster is used more than once, eg: autopilot or replication, this module can + # be called more than once. Short ciruit here if our profile is already set up. + if grep VAULT_ADDR < "$profile_file"; then + exit 0 + fi + + if ! appendVaultProfileInformation "$profile_file"; then + fail "failed to write vault configuration to login shell profile" + fi + + exit 0 +} + +main diff --git a/enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh b/enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh new file mode 100644 index 0000000..9c714a3 --- /dev/null +++ b/enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -exo pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$NETCAT_COMMAND" ]] && fail "NETCAT_COMMAND env variable has not been set" +[[ -z "$SOCKET_PORT" ]] && fail "SOCKET_PORT env variable has not been set" + +if [ "$IP_VERSION" = "4" ]; then + export SOCKET_ADDR="127.0.0.1" +else + export SOCKET_ADDR="::1" +fi + +socket_listener_procs() { + pgrep -x "${NETCAT_COMMAND}" +} + +kill_socket_listener() { + pkill "${NETCAT_COMMAND}" +} + +test_socket_listener() { + case $IP_VERSION in + 4) + "${NETCAT_COMMAND}" -zvw 2 "${SOCKET_ADDR}" "$SOCKET_PORT" < /dev/null + ;; + 6) + "${NETCAT_COMMAND}" -6 -zvw 2 "${SOCKET_ADDR}" "$SOCKET_PORT" < /dev/null + ;; + *) + fail "unknown IP_VERSION: $IP_VERSION" + ;; + esac +} + +start_socket_listener() { + if socket_listener_procs; then + test_socket_listener + return $? + fi + + # Run nc to listen on port 9090 for the socket auditor. We spawn nc + # with nohup to ensure that the listener doesn't expect a SIGHUP and + # thus block the SSH session from exiting or terminating on exit. + case $IP_VERSION in + 4) + nohup nc -kl "$SOCKET_PORT" >> /tmp/vault-socket.log 2>&1 < /dev/null & + ;; + 6) + nohup nc -6 -kl "$SOCKET_PORT" >> /tmp/vault-socket.log 2>&1 < /dev/null & + ;; + *) + fail "unknown IP_VERSION: $IP_VERSION" + ;; + esac +} + +read_log() { + local f + f=/tmp/vault-socket.log + [[ -f "$f" ]] && cat "$f" +} + +main() { + if socket_listener_procs; then + # Clean up old nc's that might not be working + kill_socket_listener + fi + + if ! start_socket_listener; then + fail "Failed to start audit socket listener: socket listener log: $(read_log)" + fi + + # wait for nc to listen + sleep 1 + + if ! test_socket_listener; then + fail "Error testing socket listener: socket listener log: $(read_log)" + fi + + return 0 +} + +main diff --git a/enos/modules/vault_cluster/variables.tf b/enos/modules/vault_cluster/variables.tf new file mode 100644 index 0000000..1e4de12 --- /dev/null +++ b/enos/modules/vault_cluster/variables.tf @@ -0,0 +1,291 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "artifactory_release" { + type = object({ + username = string + token = string + url = string + sha256 = string + }) + description = "The Artifactory release information to install Vault artifacts from Artifactory" + default = null +} + +variable "backend_cluster_name" { + type = string + description = "The name of the backend cluster" + default = null +} + +variable "backend_cluster_tag_key" { + type = string + description = "The tag key for searching for backend nodes" + default = null +} + +variable "cluster_name" { + type = string + description = "The Vault cluster name" + default = null +} + +variable "cluster_port" { + type = number + description = "The cluster port for Vault to listen on" + default = 8201 +} + +variable "cluster_tag_key" { + type = string + description = "The Vault cluster tag key" + default = "retry_join" +} + +variable "config_dir" { + type = string + description = "The directory to use for Vault configuration" + default = "/etc/vault.d" +} + +variable "config_mode" { + description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." + default = "file" + + validation { + condition = contains(["env", "file"], var.config_mode) + error_message = "The config_mode must be either 'env' or 'file'. No other configuration modes are supported." + } +} + +variable "config_env_vars" { + description = "Optional Vault configuration environment variables to set starting Vault" + type = map(string) + default = null +} + +variable "consul_data_dir" { + type = string + description = "The directory where the consul will store data" + default = "/opt/consul/data" +} + +variable "consul_install_dir" { + type = string + description = "The directory where the consul binary will be installed" + default = "/opt/consul/bin" +} + +variable "consul_license" { + type = string + sensitive = true + description = "The consul enterprise license" + default = null +} + +variable "consul_log_file" { + type = string + description = "The file where the consul will write log output" + default = "/var/log/consul.log" +} + +variable "consul_log_level" { + type = string + description = "The consul service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.consul_log_level) + error_message = "The consul_log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "consul_release" { + type = object({ + version = string + edition = string + }) + description = "Consul release version and edition to install from releases.hashicorp.com" + default = { + version = "1.15.1" + edition = "ce" + } +} + +variable "distro_version" { + type = string + description = "The Linux distro version" + default = null +} + +variable "enable_audit_devices" { + description = "If true every audit device will be enabled" + type = bool + default = true +} + +variable "external_storage_port" { + type = number + description = "The port to connect to when using external storage" + default = 8500 +} + +variable "force_unseal" { + type = bool + description = "Always unseal the Vault cluster even if we're not initializing it" + default = false +} + +variable "hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "initialize_cluster" { + type = bool + description = "Initialize the Vault cluster" + default = true +} + +variable "install_dir" { + type = string + description = "The directory where the Vault binary will be installed" + default = "/opt/vault/bin" +} + +variable "ip_version" { + type = number + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "license" { + type = string + sensitive = true + description = "The value of the Vault license" + default = null +} + +variable "listener_port" { + type = number + description = "The port for Vault to listen on" + default = 8200 +} + +variable "local_artifact_path" { + type = string + description = "The path to a locally built vault artifact to install. It can be a zip archive, RPM, or Debian package" + default = null +} + +variable "log_level" { + type = string + description = "The vault service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) + error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "manage_service" { + type = bool + description = "Manage the Vault service users and systemd unit. Disable this to use configuration in RPM and Debian packages" + default = true +} + +variable "packages" { + type = list(string) + description = "A list of packages to install via the target host package manager" + default = [] +} + +variable "release" { + type = object({ + version = string + edition = string + }) + description = "Vault release version and edition to install from releases.hashicorp.com" + default = null +} + +variable "root_token" { + type = string + description = "The Vault root token that we can use to initialize and configure the cluster" + default = null +} + +variable "seal_ha_beta" { + description = "Enable using Seal HA on clusters that meet minimum version requirements and are enterprise editions" + default = true +} + +variable "seal_attributes" { + description = "The auto-unseal device attributes" + default = null +} + +variable "seal_attributes_secondary" { + description = "The secondary auto-unseal device attributes" + default = null +} + +variable "seal_type" { + type = string + description = "The primary seal device type" + default = "awskms" + + validation { + condition = contains(["awskms", "pkcs11", "shamir"], var.seal_type) + error_message = "The seal_type must be either 'awskms', 'pkcs11', or 'shamir'. No other seal types are supported." + } +} + +variable "seal_type_secondary" { + type = string + description = "A secondary HA seal device type. Only supported in Vault Enterprise >= 1.15" + default = "none" + + validation { + condition = contains(["awskms", "none", "pkcs11"], var.seal_type_secondary) + error_message = "The secondary_seal_type must be 'awskms', 'none', or 'pkcs11'. No other secondary seal types are supported." + } +} + +variable "shamir_unseal_keys" { + type = list(string) + description = "Shamir unseal keys. Often only used adding additional nodes to an already initialized cluster." + default = null +} + +variable "storage_backend" { + type = string + description = "The storage backend to use" + default = "raft" + + validation { + condition = contains(["raft", "consul"], var.storage_backend) + error_message = "The storage_backend must be either raft or consul. No other storage backends are supported." + } +} + +variable "storage_backend_addl_config" { + type = map(any) + description = "An optional set of key value pairs to inject into the storage block" + default = {} +} + +variable "storage_node_prefix" { + type = string + description = "A prefix to use for each node in the Vault storage configuration" + default = "node" +} diff --git a/enos/modules/vault_failover_demote_dr_primary/main.tf b/enos/modules/vault_failover_demote_dr_primary/main.tf new file mode 100644 index 0000000..8193370 --- /dev/null +++ b/enos/modules/vault_failover_demote_dr_primary/main.tf @@ -0,0 +1,63 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { +} + +resource "enos_remote_exec" "demote_dr_primary" { + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write -f sys/replication/dr/primary/demote"] + + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} diff --git a/enos/modules/vault_failover_promote_dr_secondary/main.tf b/enos/modules/vault_failover_promote_dr_secondary/main.tf new file mode 100644 index 0000000..8538253 --- /dev/null +++ b/enos/modules/vault_failover_promote_dr_secondary/main.tf @@ -0,0 +1,69 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "secondary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The secondary cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "dr_operation_token" { + type = string + description = "The wrapping token created on primary cluster" +} + +locals { + dr_operation_token = var.dr_operation_token +} + +resource "enos_remote_exec" "promote_dr_secondary" { + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write -f sys/replication/dr/secondary/promote dr_operation_token=${local.dr_operation_token}"] + + + transport = { + ssh = { + host = var.secondary_leader_host.public_ip + } + } +} diff --git a/enos/modules/vault_failover_update_dr_primary/main.tf b/enos/modules/vault_failover_update_dr_primary/main.tf new file mode 100644 index 0000000..cc159f2 --- /dev/null +++ b/enos/modules/vault_failover_update_dr_primary/main.tf @@ -0,0 +1,76 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "secondary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The secondary cluster leader host" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" + +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "dr_operation_token" { + type = string + description = "The wrapping token created on primary cluster" +} + +variable "wrapping_token" { + type = string + description = "The wrapping token created on primary cluster" +} + +locals { + dr_operation_token = var.dr_operation_token + wrapping_token = var.wrapping_token +} + +resource "enos_remote_exec" "update_dr_primary" { + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write sys/replication/dr/secondary/update-primary dr_operation_token=${local.dr_operation_token} token=${local.wrapping_token}"] + + + transport = { + ssh = { + host = var.secondary_leader_host.public_ip + } + } +} diff --git a/enos/modules/vault_get_cluster_ips/main.tf b/enos/modules/vault_get_cluster_ips/main.tf new file mode 100644 index 0000000..ef31018 --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/main.tf @@ -0,0 +1,185 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +/* + +Given our expected hosts, determine which is currently the leader and verify that all expected +nodes are either the leader or a follower. + +*/ + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster hosts that are expected to be in the cluster" +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + follower_hosts_list = [ + for idx in range(length(var.hosts)) : var.hosts[idx] if var.ip_version == 6 ? + contains(tolist(local.follower_ipv6s), var.hosts[idx].ipv6) : + contains(tolist(local.follower_private_ips), var.hosts[idx].private_ip) + ] + follower_hosts = { + for idx in range(local.host_count - 1) : idx => try(local.follower_hosts_list[idx], null) + } + follower_ipv6s = jsondecode(enos_remote_exec.follower_ipv6s.stdout) + follower_private_ips = jsondecode(enos_remote_exec.follower_private_ipv4s.stdout) + follower_public_ips = [for host in local.follower_hosts : host.public_ip] + host_count = length(var.hosts) + ipv6s = [for k, v in values(tomap(var.hosts)) : tostring(v["ipv6"])] + leader_host_list = [ + for idx in range(length(var.hosts)) : var.hosts[idx] if var.ip_version == 6 ? + var.hosts[idx].ipv6 == local.leader_ipv6 : + var.hosts[idx].private_ip == local.leader_private_ip + ] + leader_host = try(local.leader_host_list[0], null) + leader_ipv6 = trimspace(enos_remote_exec.leader_ipv6.stdout) + leader_private_ip = trimspace(enos_remote_exec.leader_private_ipv4.stdout) + leader_public_ip = try(local.leader_host.public_ip, null) + private_ips = [for k, v in values(tomap(var.hosts)) : tostring(v["private_ip"])] +} + +resource "enos_remote_exec" "leader_private_ipv4" { + environment = { + IP_VERSION = var.ip_version + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/get-leader-ipv4.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +resource "enos_remote_exec" "leader_ipv6" { + environment = { + IP_VERSION = var.ip_version + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/get-leader-ipv6.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +resource "enos_remote_exec" "follower_private_ipv4s" { + environment = { + IP_VERSION = var.ip_version + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_LEADER_PRIVATE_IP = local.leader_private_ip + VAULT_PRIVATE_IPS = jsonencode(local.private_ips) + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/get-follower-ipv4s.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +resource "enos_remote_exec" "follower_ipv6s" { + environment = { + IP_VERSION = var.ip_version + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_IPV6S = jsonencode(local.ipv6s) + VAULT_LEADER_IPV6 = local.leader_ipv6 + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/get-follower-ipv6s.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +output "follower_hosts" { + value = local.follower_hosts +} + +output "follower_ipv6s" { + value = local.follower_ipv6s +} + +output "follower_private_ips" { + value = local.follower_private_ips +} + +output "follower_public_ips" { + value = local.follower_public_ips +} + +output "leader_host" { + value = local.leader_host +} + +output "leader_hosts" { + value = { 0 : local.leader_host } +} + +output "leader_ipv6" { + value = local.leader_ipv6 +} + +output "leader_private_ip" { + value = local.leader_private_ip +} + +output "leader_public_ip" { + value = local.leader_public_ip +} diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv4s.sh b/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv4s.sh new file mode 100644 index 0000000..51f3b76 --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv4s.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +getFollowerPrivateIPsFromOperatorMembers() { + if members=$($binpath operator members -format json); then + if followers=$(echo "$members" | jq -e --argjson expected "$VAULT_PRIVATE_IPS" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")) as $followers | $expected - ($expected - $followers)'); then + # Make sure that we got all the followers + if jq -e --argjson expected "$VAULT_PRIVATE_IPS" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then + echo "$followers" + return 0 + fi + fi + fi + + return 1 +} + +removeIP() { + local needle + local haystack + needle=$1 + haystack=$2 + if remain=$(jq -e --arg ip "$needle" -c '. | map(select(.!=$ip))' <<< "$haystack"); then + if [[ -n "$remain" ]]; then + echo "$remain" + return 0 + fi + fi + + return 1 +} + +count=0 +retries=10 +while :; do + case $IP_VERSION in + 4) + [[ -z "$VAULT_PRIVATE_IPS" ]] && fail "VAULT_PRIVATE_IPS env variable has not been set" + [[ -z "$VAULT_LEADER_PRIVATE_IP" ]] && fail "VAULT_LEADER_PRIVATE_IP env variable has not been set" + + # Vault >= 1.10.x has the operator members. If we have that then we'll use it. + if $binpath operator -h 2>&1 | grep members &> /dev/null; then + if followers=$(getFollowerPrivateIPsFromOperatorMembers); then + echo "$followers" + exit 0 + fi + else + removeIP "$VAULT_LEADER_PRIVATE_IP" "$VAULT_PRIVATE_IPS" + + return $? + fi + ;; + 6) + echo '[]' + exit 0 + ;; + *) + fail "unknown IP_VERSION: $IP_VERSION" + ;; + esac + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster followers" + fi +done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv6s.sh b/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv6s.sh new file mode 100644 index 0000000..f51247b --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv6s.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +echo "$VAULT_IPV6S" > /tmp/vaultipv6s + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +getFollowerIPV6sFromOperatorMembers() { + if members=$($binpath operator members -format json); then + if followers=$(echo "$members" | jq -e --argjson expected "$VAULT_IPV6S" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("\\[(.+)\\]") | .[0]) as $followers | $expected - ($expected - $followers)'); then + # Make sure that we got all the followers + if jq -e --argjson expected "$VAULT_IPV6S" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then + echo "$followers" + return 0 + fi + fi + fi + + return 1 +} + +removeIP() { + local needle + local haystack + needle=$1 + haystack=$2 + if remain=$(jq -e --arg ip "$needle" -c '. | map(select(.!=$ip))' <<< "$haystack"); then + if [[ -n "$remain" ]]; then + echo "$remain" + return 0 + fi + fi + + return 1 +} + +count=0 +retries=10 +while :; do + case $IP_VERSION in + 4) + echo "[]" + exit 0 + ;; + 6) + [[ -z "$VAULT_IPV6S" ]] && fail "VAULT_IPV6S env variable has not been set" + [[ -z "$VAULT_LEADER_IPV6" ]] && fail "VAULT_LEADER_IPV6 env variable has not been set" + + # Vault >= 1.10.x has the operator members. If we have that then we'll use it. + if $binpath operator -h 2>&1 | grep members &> /dev/null; then + if followers=$(getFollowerIPV6sFromOperatorMembers); then + echo "$followers" + exit 0 + fi + else + [[ -z "$VAULT_LEADER_IPV6" ]] && fail "VAULT_LEADER_IPV6 env variable has not been set" + removeIP "$VAULT_LEADER_IPV6" "$VAULT_IPV6S" + exit $? + fi + ;; + *) + fail "unknown IP_VERSION: $IP_VERSION" + ;; + esac + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster followers" + fi +done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv4.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv4.sh new file mode 100644 index 0000000..f5697a9 --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv4.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +findLeaderPrivateIP() { + # Find the leader private IP address + if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + fi + + # Some older versions of vault don't support reading sys/leader. Try falling back to the cli status. + if ip=$($binpath status -format json | jq -r '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + fi + + return 1 +} + +count=0 +retries=5 +while :; do + case $IP_VERSION in + 4) + # Find the leader private IP address + if ip=$(findLeaderPrivateIP); then + echo "$ip" + exit 0 + fi + ;; + 6) + exit 0 + ;; + *) + fail "unknown IP_VERSION: $IP_VERSION" + ;; + esac + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster leader" + fi +done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv6.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv6.sh new file mode 100644 index 0000000..d5d5a45 --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv6.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +findLeaderIPV6() { + # Find the leader private IP address + if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("\\[(.+)\\]") | .[0]'); then + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + fi + + # Some older versions of vault don't support reading sys/leader. Try falling back to the cli status. + if ip=$($binpath status -format json | jq -r '.leader_address | scan("\\[(.+)\\]") | .[0]'); then + if [[ -n "$ip" ]]; then + echo "$ip" + return 0 + fi + fi + + return 1 +} + +count=0 +retries=5 +while :; do + # Find the leader private IP address + case $IP_VERSION in + 4) + exit 0 + ;; + 6) + if ip=$(findLeaderIPV6); then + echo "$ip" + exit 0 + fi + ;; + *) + fail "unknown IP_VERSION: $IP_VERSION" + ;; + esac + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster leader" + fi +done diff --git a/enos/modules/vault_proxy/main.tf b/enos/modules/vault_proxy/main.tf new file mode 100644 index 0000000..b69b052 --- /dev/null +++ b/enos/modules/vault_proxy/main.tf @@ -0,0 +1,100 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "ip_version" { + type = number + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_proxy_pidfile" { + type = string + description = "The filepath where the Vault Proxy pid file is kept" + default = "/tmp/pidfile" +} + +variable "vault_proxy_port" { + type = number + description = "The Vault Proxy listener port" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" +} + +locals { + vault_proxy_address = "${var.ip_version == 4 ? "127.0.0.1" : "[::1]"}:${var.vault_proxy_port}" +} + +resource "enos_remote_exec" "set_up_approle_auth_and_proxy" { + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_PROXY_ADDRESS = local.vault_proxy_address + VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/set-up-approle-and-proxy.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +resource "enos_remote_exec" "use_proxy" { + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile + VAULT_PROXY_ADDRESS = local.vault_proxy_address + } + + scripts = [abspath("${path.module}/scripts/use-proxy.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } + + depends_on = [ + enos_remote_exec.set_up_approle_auth_and_proxy + ] +} diff --git a/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh b/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh new file mode 100644 index 0000000..a4be7e8 --- /dev/null +++ b/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) +$binpath auth disable approle || true + +$binpath auth enable approle + +$binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000 + +ROLEID=$($binpath read --format=json auth/approle/role/proxy-role/role-id | jq -r '.data.role_id') + +if [[ "$ROLEID" == '' ]]; then + fail "expected ROLEID to be nonempty, but it is empty" +fi + +SECRETID=$($binpath write -f --format=json auth/approle/role/proxy-role/secret-id | jq -r '.data.secret_id') + +if [[ "$SECRETID" == '' ]]; then + fail "vault write -f --format=json auth/approle/role/proxy-role/secret-id did not return a .data.secret_id" +fi + +echo "$ROLEID" > /tmp/role-id +echo "$SECRETID" > /tmp/secret-id + +# Write the Vault Proxy's configuration to /tmp/vault-proxy.hcl +# The Proxy references the Vault server address passed in as $VAULT_ADDR +# The Proxy itself listens at the address passed in as $VAULT_PROXY_ADDRESS +cat > /tmp/vault-proxy.hcl <<- EOM +pid_file = "${VAULT_PROXY_PIDFILE}" + +vault { + address = "${VAULT_ADDR}" + tls_skip_verify = true + retry { + num_retries = 10 + } +} + +api_proxy { + enforce_consistency = "always" + use_auto_auth_token = true +} + +listener "tcp" { + address = "${VAULT_PROXY_ADDRESS}" + tls_disable = true +} + +auto_auth { + method { + type = "approle" + config = { + role_id_file_path = "/tmp/role-id" + secret_id_file_path = "/tmp/secret-id" + } + } + sink { + type = "file" + config = { + path = "/tmp/token" + } + } +} +EOM + +# If Proxy is still running from a previous run, kill it +pkill -F "${VAULT_PROXY_PIDFILE}" || true + +# Run proxy in the background +$binpath proxy -config=/tmp/vault-proxy.hcl > /tmp/proxy-logs.txt 2>&1 & diff --git a/enos/modules/vault_proxy/scripts/use-proxy.sh b/enos/modules/vault_proxy/scripts/use-proxy.sh new file mode 100644 index 0000000..23a62e0 --- /dev/null +++ b/enos/modules/vault_proxy/scripts/use-proxy.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + return 1 +} + +[[ -z "$VAULT_PROXY_ADDRESS" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_PROXY_PIDFILE" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Will cause the Vault CLI to communicate with the Vault Proxy, since it +# is listening at port 8100. +export VAULT_ADDR="http://${VAULT_PROXY_ADDRESS}" + +# Explicitly unsetting VAULT_TOKEN to make sure that the Vault Proxy's token +# is used. +unset VAULT_TOKEN + +# Use the Vault CLI to communicate with the Vault Proxy (via the VAULT_ADDR env +# var) to lookup the details of the Proxy's token and make sure that the +# .data.path field contains 'auth/approle/login', thus confirming that the Proxy +# automatically authenticated itself. +if ! $binpath token lookup -format=json | jq -Mer --arg expected "auth/approle/login" '.data.path == $expected'; then + fail "expected proxy to automatically authenticate using 'auth/approle/login', got: '$($binpath token lookup -format=json | jq -r '.data.path')'" +fi + +# Now that we're done, kill the proxy +pkill -F "${VAULT_PROXY_PIDFILE}" || true diff --git a/enos/modules/vault_raft_remove_node_and_verify/main.tf b/enos/modules/vault_raft_remove_node_and_verify/main.tf new file mode 100644 index 0000000..ed6842c --- /dev/null +++ b/enos/modules/vault_raft_remove_node_and_verify/main.tf @@ -0,0 +1,125 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster followers" +} + + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "listener_port" { + type = number + description = "The listener port for vault" +} +variable "vault_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The leader's host information" +} +variable "vault_addr" { + type = string + description = "The local address to use to query vault" +} +variable "cluster_port" { + type = number + description = "The cluster port for vault" +} +variable "ip_version" { + type = number + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} +variable "vault_root_token" { + type = string + description = "The vault root token" +} +variable "vault_seal_type" { + type = string + description = "The Vault seal type" +} + +variable "add_back_nodes" { + type = bool + description = "whether to add the nodes back" +} + +variable "vault_unseal_keys" {} + +variable "vault_install_dir" { + type = string + description = "The directory where the vault binary is installed" +} + + +module "choose_follower_to_remove" { + source = "../choose_follower_host" + followers = var.hosts +} + +module "remove_raft_node" { + source = "../vault_raft_remove_peer" + depends_on = [module.choose_follower_to_remove] + + + hosts = module.choose_follower_to_remove.chosen_follower + ip_version = var.ip_version + is_voter = true + operator_instance = var.vault_leader_host.public_ip + vault_addr = var.vault_addr + vault_cluster_addr_port = var.cluster_port + vault_install_dir = var.vault_install_dir + vault_root_token = var.vault_root_token +} + +module "verify_removed" { + source = "../vault_verify_removed_node" + depends_on = [ + module.remove_raft_node + ] + + add_back_nodes = true + cluster_port = var.cluster_port + hosts = module.choose_follower_to_remove.chosen_follower + ip_version = var.ip_version + listener_port = var.listener_port + vault_addr = var.vault_addr + vault_install_dir = var.vault_install_dir + vault_leader_host = var.vault_leader_host + vault_root_token = var.vault_root_token + vault_seal_type = var.vault_seal_type + vault_unseal_keys = var.vault_seal_type == "shamir" ? var.vault_unseal_keys : null +} diff --git a/enos/modules/vault_raft_remove_peer/main.tf b/enos/modules/vault_raft_remove_peer/main.tf new file mode 100644 index 0000000..cbadd3f --- /dev/null +++ b/enos/modules/vault_raft_remove_peer/main.tf @@ -0,0 +1,80 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The old vault nodes to be removed" +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "operator_instance" { + type = string + description = "The ip address of the operator (Voter) node" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "is_voter" { + type = bool + default = false + description = "Whether the nodes that are going to be removed are voters" +} + +resource "enos_remote_exec" "vault_raft_remove_peer" { + for_each = var.hosts + + environment = { + REMOVE_VAULT_CLUSTER_ADDR = "${var.ip_version == 4 ? "${each.value.private_ip}" : "[${each.value.ipv6}]"}:${var.vault_cluster_addr_port}" + VAULT_TOKEN = var.vault_root_token + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + REMOVE_NODE_IS_VOTER = var.is_voter + } + + scripts = [abspath("${path.module}/scripts/raft-remove-peer.sh")] + + transport = { + ssh = { + host = var.operator_instance + } + } +} diff --git a/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh b/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh new file mode 100644 index 0000000..9fdd40b --- /dev/null +++ b/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault +node_addr=${REMOVE_VAULT_CLUSTER_ADDR} + +fail() { + echo "$1" 2>&1 + return 1 +} + +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + echo "retry $count" + else + return "$exit" + fi + done + + return 0 +} + +remove_peer() { + if ! node_id=$("$binpath" operator raft list-peers -format json | jq -Mr --argjson expected "${REMOVE_NODE_IS_VOTER}" '.data.config.servers[] | select(.address=='\""$node_addr"\"') | select(.voter==$expected) | .node_id'); then + fail "failed to get node id of a node with voter status ${REMOVE_NODE_IS_VOTER}" + fi + + $binpath operator raft remove-peer "$node_id" +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Retry a few times because it can take some time for things to settle after autopilot upgrade +retry 5 remove_peer diff --git a/enos/modules/vault_setup_dr_primary/main.tf b/enos/modules/vault_setup_dr_primary/main.tf new file mode 100644 index 0000000..69e29e6 --- /dev/null +++ b/enos/modules/vault_setup_dr_primary/main.tf @@ -0,0 +1,61 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +// Enable DR replication on the primary. This will immediately clear all data in the secondary. +resource "enos_remote_exec" "enable_dr_replication" { + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/enable.sh")] + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} diff --git a/enos/modules/vault_setup_dr_primary/scripts/enable.sh b/enos/modules/vault_setup_dr_primary/scripts/enable.sh new file mode 100644 index 0000000..b8c987b --- /dev/null +++ b/enos/modules/vault_setup_dr_primary/scripts/enable.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Activate the primary +$binpath write -f sys/replication/dr/primary/enable diff --git a/enos/modules/vault_setup_perf_primary/main.tf b/enos/modules/vault_setup_perf_primary/main.tf new file mode 100644 index 0000000..155ab20 --- /dev/null +++ b/enos/modules/vault_setup_perf_primary/main.tf @@ -0,0 +1,60 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +resource "enos_remote_exec" "configure_pr_primary" { + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/configure-vault-pr-primary.sh")] + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} diff --git a/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh new file mode 100644 index 0000000..10398b8 --- /dev/null +++ b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Activate the primary +$binpath write -f sys/replication/performance/primary/enable diff --git a/enos/modules/vault_setup_replication_secondary/main.tf b/enos/modules/vault_setup_replication_secondary/main.tf new file mode 100644 index 0000000..ec1ae64 --- /dev/null +++ b/enos/modules/vault_setup_replication_secondary/main.tf @@ -0,0 +1,114 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "secondary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The secondary cluster leader host" +} + +variable "replication_type" { + type = string + description = "The type of replication to perform" + + validation { + condition = contains(["dr", "performance"], var.replication_type) + error_message = "The replication_type must be either dr or performance" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "wrapping_token" { + type = string + description = "The wrapping token created on primary cluster" +} + +resource "enos_remote_exec" "enable_replication" { + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write sys/replication/${var.replication_type}/secondary/enable token=${var.wrapping_token}"] + + transport = { + ssh = { + host = var.secondary_leader_host.public_ip + } + } +} + +// Wait for our primary host to be the "leader", which means it's running and all "setup" tasks +// have been completed. We'll have to unseal our follower nodes after this has occurred. +module "wait_for_leader" { + source = "../vault_wait_for_leader" + + depends_on = [ + enos_remote_exec.enable_replication + ] + + hosts = { "0" : var.secondary_leader_host } + ip_version = var.ip_version + vault_addr = var.vault_addr + vault_install_dir = var.vault_install_dir + vault_root_token = var.vault_root_token +} + +// Ensure that our leader is ready to for us to unseal follower nodes. +resource "enos_remote_exec" "wait_for_leader_ready" { + depends_on = [ + module.wait_for_leader, + ] + + environment = { + REPLICATION_TYPE = var.replication_type + RETRY_INTERVAL = 3 // seconds + TIMEOUT_SECONDS = 60 // seconds + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-for-leader-ready.sh")] + + transport = { + ssh = { + host = var.secondary_leader_host.public_ip + } + } +} diff --git a/enos/modules/vault_setup_replication_secondary/scripts/wait-for-leader-ready.sh b/enos/modules/vault_setup_replication_secondary/scripts/wait-for-leader-ready.sh new file mode 100644 index 0000000..09837c6 --- /dev/null +++ b/enos/modules/vault_setup_replication_secondary/scripts/wait-for-leader-ready.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + return 1 +} + +[[ -z "$REPLICATION_TYPE" ]] && fail "REPLICATION_TYPE env variable has not been set" +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json + +replicationStatus() { + $binpath read "sys/replication/${REPLICATION_TYPE}/status" | jq .data +} + +isReady() { + # Find the leader private IP address + local status + if ! status=$(replicationStatus); then + return 1 + fi + + if ! jq -eMc '.state == "stream-wals"' &> /dev/null <<< "$status"; then + echo "DR replication state is not yet running" 1>&2 + echo "DR replication is not yet running, got: $(jq '.state' <<< "$status")" 1>&2 + return 1 + fi + + if ! jq -eMc '.mode == "secondary"' &> /dev/null <<< "$status"; then + echo "DR replication mode is not yet primary, got: $(jq '.mode' <<< "$status")" 1>&2 + return 1 + fi + + if ! jq -eMc '.corrupted_merkle_tree == false' &> /dev/null <<< "$status"; then + echo "DR replication merkle is corrupted" 1>&2 + return 1 + fi + + echo "${REPLICATION_TYPE} primary is ready for followers to be unsealed!" 1>&2 + return 0 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if isReady; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for ${REPLICATION_TYPE} primary to ready: $(replicationStatus)" diff --git a/enos/modules/vault_step_down/main.tf b/enos/modules/vault_step_down/main.tf new file mode 100644 index 0000000..4074969 --- /dev/null +++ b/enos/modules/vault_step_down/main.tf @@ -0,0 +1,50 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "leader_host" { + type = object({ + private_ip = string + public_ip = string + }) + + description = "The vault cluster host that can be expected as a leader" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +resource "enos_remote_exec" "vault_operator_step_down" { + environment = { + VAULT_TOKEN = var.vault_root_token + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/operator-step-down.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} diff --git a/enos/modules/vault_step_down/scripts/operator-step-down.sh b/enos/modules/vault_step_down/scripts/operator-step-down.sh new file mode 100644 index 0000000..07f2c38 --- /dev/null +++ b/enos/modules/vault_step_down/scripts/operator-step-down.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -eou pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +eval "$binpath" operator step-down diff --git a/enos/modules/vault_test_ui/main.tf b/enos/modules/vault_test_ui/main.tf new file mode 100644 index 0000000..9fc16a7 --- /dev/null +++ b/enos/modules/vault_test_ui/main.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + # base test environment excludes the filter argument + ui_test_environment_base = { + VAULT_ADDR = "http://${var.vault_addr}:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_UNSEAL_KEYS = jsonencode(slice(var.vault_unseal_keys, 0, var.vault_recovery_threshold)) + } + ui_test_environment = var.ui_test_filter == null || try(length(trimspace(var.ui_test_filter)) == 0, true) ? local.ui_test_environment_base : merge(local.ui_test_environment_base, { + TEST_FILTER = var.ui_test_filter + }) + # The environment variables need to be double escaped since the process of rendering them to the + # outputs eats the escaping. Therefore double escaping ensures that the values are rendered as + # properly escaped json, i.e. "[\"value\"]" suitable to be parsed as json. + escaped_ui_test_environment = [ + for key, value in local.ui_test_environment : "export ${key}='${value}'" + ] +} + +resource "enos_local_exec" "test_ui" { + count = var.ui_run_tests ? 1 : 0 + environment = local.ui_test_environment + scripts = ["${path.module}/scripts/test_ui.sh"] +} diff --git a/enos/modules/vault_test_ui/outputs.tf b/enos/modules/vault_test_ui/outputs.tf new file mode 100644 index 0000000..ae4f926 --- /dev/null +++ b/enos/modules/vault_test_ui/outputs.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "ui_test_stderr" { + value = var.ui_run_tests ? enos_local_exec.test_ui[0].stderr : "No std out tests where not run" +} + +output "ui_test_stdout" { + value = var.ui_run_tests ? enos_local_exec.test_ui[0].stdout : "No std out tests where not run" +} + +output "ui_test_environment" { + value = join(" \\ \n", local.escaped_ui_test_environment) + description = "The environment variables that are required in order to run the test:enos yarn target" +} diff --git a/enos/modules/vault_test_ui/scripts/test_ui.sh b/enos/modules/vault_test_ui/scripts/test_ui.sh new file mode 100755 index 0000000..9a98243 --- /dev/null +++ b/enos/modules/vault_test_ui/scripts/test_ui.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -eux -o pipefail + +project_root=$(git rev-parse --show-toplevel) +pushd "$project_root" > /dev/null + +echo "running test-ember-enos" +make test-ember-enos +popd > /dev/null diff --git a/enos/modules/vault_test_ui/variables.tf b/enos/modules/vault_test_ui/variables.tf new file mode 100644 index 0000000..99625b2 --- /dev/null +++ b/enos/modules/vault_test_ui/variables.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "vault_addr" { + description = "The local vault API listen address" + type = string +} + +variable "vault_root_token" { + description = "The vault root token" + type = string +} + +variable "ui_test_filter" { + type = string + description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f='" + default = null +} + +variable "vault_unseal_keys" { + description = "Base64 encoded recovery keys to use for the seal/unseal test" + type = list(string) +} + +variable "vault_recovery_threshold" { + description = "The number of recovery keys to require when unsealing Vault" + type = string +} + +variable "ui_run_tests" { + type = bool + description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run" + default = true +} diff --git a/enos/modules/vault_unseal_replication_followers/main.tf b/enos/modules/vault_unseal_replication_followers/main.tf new file mode 100644 index 0000000..59d34a7 --- /dev/null +++ b/enos/modules/vault_unseal_replication_followers/main.tf @@ -0,0 +1,129 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# This module unseals the replication secondary follower nodes +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster hosts to unseal" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_seal_type" { + type = string + description = "The Vault seal type" +} + +variable "vault_unseal_keys" {} + +locals { + vault_bin_path = "${var.vault_install_dir}/vault" +} + +# After replication is enabled the secondary follower nodes are expected to be sealed, +# so we wait for the secondary follower nodes to update the seal status +resource "enos_remote_exec" "wait_until_sealed" { + for_each = var.hosts + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-until-sealed.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# The follower nodes on secondary replication cluster incorrectly report +# unseal progress 2/3 (Issue: https://hashicorp.atlassian.net/browse/VAULT-12309), +# so we restart the followers to allow them to auto-unseal +resource "enos_remote_exec" "restart_followers" { + depends_on = [enos_remote_exec.wait_until_sealed] + for_each = { + for idx, host in var.hosts : idx => host + if var.vault_seal_type != "shamir" + } + + inline = ["sudo systemctl restart vault"] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# We cannot use the vault_unseal resouce due to the known issue +# (https://hashicorp.atlassian.net/browse/VAULT-12311). We use a custom +# script to allow retry for unsealing the secondary followers +resource "enos_remote_exec" "unseal_followers" { + depends_on = [enos_remote_exec.restart_followers] + # The unseal keys are required only for seal_type shamir + for_each = { + for idx, host in var.hosts : idx => host + if var.vault_seal_type == "shamir" + } + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + UNSEAL_KEYS = join(",", var.vault_unseal_keys) + } + + scripts = [abspath("${path.module}/scripts/unseal-node.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# This is a second attempt needed to unseal the secondary followers +# using a custom script due to get past the known issue +# (https://hashicorp.atlassian.net/browse/VAULT-12311) +resource "enos_remote_exec" "unseal_followers_again" { + depends_on = [enos_remote_exec.unseal_followers] + for_each = { + for idx, host in var.hosts : idx => host + if var.vault_seal_type == "shamir" + } + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + UNSEAL_KEYS = join(",", var.vault_unseal_keys) + } + + scripts = [abspath("${path.module}/scripts/unseal-node.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_unseal_replication_followers/scripts/unseal-node.sh b/enos/modules/vault_unseal_replication_followers/scripts/unseal-node.sh new file mode 100755 index 0000000..c6dafb0 --- /dev/null +++ b/enos/modules/vault_unseal_replication_followers/scripts/unseal-node.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +binpath=${VAULT_INSTALL_DIR}/vault + +IFS="," read -r -a keys <<< "${UNSEAL_KEYS}" + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +count=0 +retries=5 +while :; do + for key in "${keys[@]}"; do + + # Check the Vault seal status + seal_status=$($binpath status -format json | jq '.sealed') + + if [[ "$seal_status" == "true" ]]; then + echo "running unseal with $key count $count with retry $retries" >> /tmp/unseal_script.out + "$binpath" operator unseal "$key" > /dev/null 2>&1 + else + exit 0 + fi + done + + wait=$((1 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "failed to unseal node" + fi +done diff --git a/enos/modules/vault_unseal_replication_followers/scripts/wait-until-sealed.sh b/enos/modules/vault_unseal_replication_followers/scripts/wait-until-sealed.sh new file mode 100644 index 0000000..a507228 --- /dev/null +++ b/enos/modules/vault_unseal_replication_followers/scripts/wait-until-sealed.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +binpath=${VAULT_INSTALL_DIR}/vault + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +count=0 +retries=5 +while :; do + # Check the Vault seal status + seal_status=$($binpath status -format json | jq '.sealed') + + if [[ "$seal_status" == "true" ]]; then + exit 0 + fi + + wait=$((3 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Expected node to be sealed" + fi +done diff --git a/enos/modules/vault_upgrade/main.tf b/enos/modules/vault_upgrade/main.tf new file mode 100644 index 0000000..7f9dec0 --- /dev/null +++ b/enos/modules/vault_upgrade/main.tf @@ -0,0 +1,195 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.5.4" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_artifactory_release" { + type = object({ + username = string + token = string + url = string + sha256 = string + }) + description = "Vault release version and edition to install from artifactory.hashicorp.engineering" + default = null +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_local_artifact_path" { + type = string + description = "The path to a locally built vault artifact to install" + default = null +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "vault_seal_type" { + type = string + description = "The Vault seal type" +} + +variable "vault_unseal_keys" { + type = list(string) + description = "The keys to use to unseal Vault when not using auto-unseal" + default = null +} + +locals { + vault_bin_path = "${var.vault_install_dir}/vault" +} + +// Upgrade the Vault artifact in-place. With zip bundles we must use the same path of the original +// installation so that we can re-use the systemd unit that enos_vault_start created at +// /etc/systemd/system/vault.service. The path does not matter for package types as the systemd +// unit for the bianry is included and will be installed. +resource "enos_bundle_install" "upgrade_vault_binary" { + for_each = var.hosts + + destination = var.vault_install_dir + artifactory = var.vault_artifactory_release + path = var.vault_local_artifact_path + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +// We assume that our original Vault cluster used a zip bundle from releases.hashicorp.com and as +// such enos_vault_start will have created a systemd unit for it at /etc/systemd/systemd/vault.service. +// If we're upgrading to a package that contains its own systemd unit we'll need to remove the +// old unit file so that when we restart vault we pick up the new unit that points to the updated +// binary. +resource "enos_remote_exec" "maybe_remove_old_unit_file" { + for_each = var.hosts + depends_on = [enos_bundle_install.upgrade_vault_binary] + + environment = { + ARTIFACT_NAME = enos_bundle_install.upgrade_vault_binary[each.key].name + } + + scripts = [abspath("${path.module}/scripts/maybe-remove-old-unit-file.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +module "get_ip_addresses" { + source = "../vault_get_cluster_ips" + + depends_on = [enos_remote_exec.maybe_remove_old_unit_file] + + hosts = var.hosts + ip_version = var.ip_version + vault_addr = var.vault_addr + vault_install_dir = var.vault_install_dir + vault_root_token = var.vault_root_token +} + +module "restart_followers" { + source = "../restart_vault" + hosts = module.get_ip_addresses.follower_hosts + vault_addr = var.vault_addr + vault_install_dir = var.vault_install_dir +} + +resource "enos_vault_unseal" "followers" { + for_each = { + for idx, host in module.get_ip_addresses.follower_hosts : idx => host + if var.vault_seal_type == "shamir" + } + depends_on = [module.restart_followers] + + bin_path = local.vault_bin_path + vault_addr = var.vault_addr + seal_type = var.vault_seal_type + unseal_keys = var.vault_unseal_keys + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +module "wait_for_followers_unsealed" { + source = "../vault_wait_for_cluster_unsealed" + depends_on = [ + module.restart_followers, + enos_vault_unseal.followers, + ] + + hosts = module.get_ip_addresses.follower_hosts + vault_addr = var.vault_addr + vault_install_dir = var.vault_install_dir +} + +module "restart_leader" { + depends_on = [module.wait_for_followers_unsealed] + source = "../restart_vault" + hosts = module.get_ip_addresses.leader_hosts + vault_addr = var.vault_addr + vault_install_dir = var.vault_install_dir +} + +resource "enos_vault_unseal" "leader" { + count = var.vault_seal_type == "shamir" ? 1 : 0 + depends_on = [module.restart_leader] + + bin_path = local.vault_bin_path + vault_addr = var.vault_addr + seal_type = var.vault_seal_type + unseal_keys = var.vault_unseal_keys + + transport = { + ssh = { + host = module.get_ip_addresses.leader_public_ip + } + } +} diff --git a/enos/modules/vault_upgrade/scripts/maybe-remove-old-unit-file.sh b/enos/modules/vault_upgrade/scripts/maybe-remove-old-unit-file.sh new file mode 100644 index 0000000..e5c673a --- /dev/null +++ b/enos/modules/vault_upgrade/scripts/maybe-remove-old-unit-file.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$ARTIFACT_NAME" ]] && fail "ARTIFACT_NAME env variable has not been set" + +if [ "${ARTIFACT_NAME##*.}" == "zip" ]; then + echo "Skipped removing unit file because new artifact is a zip bundle" + exit 0 +fi + +# Get the unit file for the vault.service that is running. If it's not in /etc/systemd then it +# should be a package provided unit file so we don't need to delete anything. +# +# Note that we use -p instead of -P so that we support ancient amzn 2 systemctl. +if ! unit_path=$(systemctl show -p FragmentPath vault | cut -d = -f2 2>&1); then + echo "Skipped removing unit file because and existing path could not be found: $unit_path" + exit 0 +fi + +if [[ "$unit_path" == *"/etc/systemd"* ]]; then + if [ -f "$unit_path" ]; then + echo "Removing old systemd unit file: $unit_path" + if ! out=$(sudo rm "$unit_path" 2>&1); then + fail "Failed to remove old unit file: $unit_path: $out" + fi + else + echo "Skipped removing old systemd unit file because it no longer exists: $unit_path" + fi +else + echo "Skipped removing old systemd unit file because it was not created in /etc/systemd/: $unit_path" +fi diff --git a/enos/modules/vault_verify_agent_output/main.tf b/enos/modules/vault_verify_agent_output/main.tf new file mode 100644 index 0000000..68e0484 --- /dev/null +++ b/enos/modules/vault_verify_agent_output/main.tf @@ -0,0 +1,44 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_agent_expected_output" { + type = string + description = "The output that's expected in the rendered template at vault_agent_template_destination" +} + +variable "vault_agent_template_destination" { + type = string + description = "The destination of the template rendered by Agent" +} + +resource "enos_remote_exec" "verify_vault_agent_output" { + environment = { + VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination + VAULT_AGENT_EXPECTED_OUTPUT = var.vault_agent_expected_output + } + + scripts = [abspath("${path.module}/scripts/verify-vault-agent-output.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} diff --git a/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh b/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh new file mode 100644 index 0000000..7924e17 --- /dev/null +++ b/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + return 1 +} + +actual_output=$(cat "${VAULT_AGENT_TEMPLATE_DESTINATION}") +if [[ "$actual_output" != "${VAULT_AGENT_EXPECTED_OUTPUT}" ]]; then + fail "expected '${VAULT_AGENT_EXPECTED_OUTPUT}' to be the Agent output, but got: '$actual_output'" +fi diff --git a/enos/modules/vault_verify_autopilot/main.tf b/enos/modules/vault_verify_autopilot/main.tf new file mode 100644 index 0000000..236acf7 --- /dev/null +++ b/enos/modules/vault_verify_autopilot/main.tf @@ -0,0 +1,64 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_autopilot_upgrade_status" { + type = string + description = "The autopilot upgrade expected status" +} + +variable "vault_autopilot_upgrade_version" { + type = string + description = "The Vault upgraded version" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +resource "enos_remote_exec" "smoke-verify-autopilot" { + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_TOKEN = var.vault_root_token, + VAULT_AUTOPILOT_UPGRADE_STATUS = var.vault_autopilot_upgrade_status, + VAULT_AUTOPILOT_UPGRADE_VERSION = var.vault_autopilot_upgrade_version, + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-autopilot.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh b/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh new file mode 100755 index 0000000..6408c76 --- /dev/null +++ b/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_AUTOPILOT_UPGRADE_STATUS" ]] && fail "VAULT_AUTOPILOT_UPGRADE_STATUS env variable has not been set" +[[ -z "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]] && fail "VAULT_AUTOPILOT_UPGRADE_VERSION env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +count=0 +retries=8 +while :; do + state=$($binpath read -format=json sys/storage/raft/autopilot/state) + status="$(jq -r '.data.upgrade_info.status' <<< "$state")" + target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")" + + if [ "$status" = "$VAULT_AUTOPILOT_UPGRADE_STATUS" ] && [ "$target_version" = "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]; then + exit 0 + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status" + echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version" + sleep "$wait" + else + echo "$state" + echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status" + echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version" + fail "Autopilot did not get into the correct status" + fi +done diff --git a/enos/modules/vault_verify_billing_start_date/main.tf b/enos/modules/vault_verify_billing_start_date/main.tf new file mode 100644 index 0000000..0d72fa7 --- /dev/null +++ b/enos/modules/vault_verify_billing_start_date/main.tf @@ -0,0 +1,64 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string + default = "8201" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +resource "enos_remote_exec" "vault_verify_billing_start_date" { + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}" + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_LOCAL_BINARY_PATH = "${var.vault_install_dir}/vault" + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/verify-billing-start.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_billing_start_date/scripts/verify-billing-start.sh b/enos/modules/vault_verify_billing_start_date/scripts/verify-billing-start.sh new file mode 100644 index 0000000..c4334cc --- /dev/null +++ b/enos/modules/vault_verify_billing_start_date/scripts/verify-billing-start.sh @@ -0,0 +1,98 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep 30 + else + return "$exit" + fi + done + + return 0 +} + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +enable_debugging() { + echo "Turning debugging on.." + export PS4='+(${BASH_SOURCE}:${LINENO})> ${FUNCNAME[0]:+${FUNCNAME[0]}(): }' + set -x +} + +get_billing_start_date() { + "$binpath" read -format=json sys/internal/counters/config | jq -r ".data.billing_start_timestamp" +} + +get_target_platform() { + uname -s +} + +# Given the date as ARGV 1, return 1 year as a unix date +verify_date_is_in_current_year() { + local billing_start_unix + local one_year_ago_unix + + # Verify if the billing start date is in the latest billing year + case $(get_target_platform) in + Linux) + billing_start_unix=$(TZ=UTC date -d "$1" +'%s') # For "now", use $(date +'%s') + one_year_ago_unix=$(TZ=UTC date -d "1 year ago" +'%s') + ;; + Darwin) + one_year_ago_unix=$(TZ=UTC date -v -1y +'%s') + billing_start_unix=$(TZ=UTC date -j -f "%Y-%m-%dT%H:%M:%SZ" "${1}" +'%s') + ;; + *) + fail "Unsupported target host operating system: $(get_target_platform)" 1>&2 + ;; + esac + + if [ "$billing_start_unix" -gt "$one_year_ago_unix" ]; then + echo "Billing start date $1 has successfully rolled over to current year." + exit 0 + else + local vault_ps + vault_ps=$(pgrep vault | xargs) + echo "On version $version, pid $vault_ps, addr $VAULT_ADDR, Billing start date $1 did not roll over to current year" 1>&2 + fi +} + +verify_billing_start_date() { + local billing_start + billing_start=$(get_billing_start_date) + + if verify_date_is_in_current_year "$billing_start"; then + return 0 + fi + + local version + local vault_ps + version=$("$binpath" status -format=json | jq .version) + vault_ps=$(pgrep vault | xargs) + echo "On version $version, pid $vault_ps, addr $VAULT_ADDR, Billing start date $billing_start did not roll over to current year" 1>&2 + return 1 +} + +enable_debugging + +retry 10 verify_billing_start_date diff --git a/enos/modules/vault_verify_default_lcq/main.tf b/enos/modules/vault_verify_default_lcq/main.tf new file mode 100644 index 0000000..bb05726 --- /dev/null +++ b/enos/modules/vault_verify_default_lcq/main.tf @@ -0,0 +1,66 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_autopilot_default_max_leases" { + type = string + description = "The autopilot upgrade expected max_leases" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +resource "enos_remote_exec" "smoke_verify_default_lcq" { + for_each = var.hosts + + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + DEFAULT_LCQ = var.vault_autopilot_default_max_leases + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-default-lcq.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh b/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh new file mode 100755 index 0000000..64e8e0f --- /dev/null +++ b/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +# Exit early if we haven't been given an expected DEFAULT_LCQ +[[ -z "$DEFAULT_LCQ" ]] && exit 0 + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +getMaxLeases() { + curl --request GET --header "X-Vault-Token: $VAULT_TOKEN" \ + "$VAULT_ADDR/v1/sys/quotas/lease-count/default" | jq '.data.max_leases // empty' +} + +waitForMaxLeases() { + local max_leases + if ! max_leases=$(getMaxLeases); then + echo "failed getting /v1/sys/quotas/lease-count/default data" 1>&2 + return 1 + fi + + if [[ "$max_leases" == "$DEFAULT_LCQ" ]]; then + echo "$max_leases" + return 0 + else + echo "Expected Default LCQ $DEFAULT_LCQ but got $max_leases" + return 1 + fi +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if waitForMaxLeases; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for Default LCQ verification to complete. Data:\n\t$(getMaxLeases)" diff --git a/enos/modules/vault_verify_dr_replication/main.tf b/enos/modules/vault_verify_dr_replication/main.tf new file mode 100644 index 0000000..f7f99fd --- /dev/null +++ b/enos/modules/vault_verify_dr_replication/main.tf @@ -0,0 +1,117 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "secondary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The secondary cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "wrapping_token" { + type = string + description = "The wrapping token created on primary cluster" + default = null +} + +locals { + primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip + secondary_leader_addr = var.ip_version == 6 ? var.secondary_leader_host.ipv6 : var.secondary_leader_host.private_ip + primary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_primary.stdout) + secondary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_secondary.stdout) +} + +resource "enos_remote_exec" "verify_replication_status_on_primary" { + environment = { + IP_VERSION = var.ip_version + PRIMARY_LEADER_ADDR = local.primary_leader_addr + SECONDARY_LEADER_ADDR = local.secondary_leader_addr + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} + +resource "enos_remote_exec" "verify_replication_status_on_secondary" { + environment = { + IP_VERSION = var.ip_version + PRIMARY_LEADER_ADDR = local.primary_leader_addr + SECONDARY_LEADER_ADDR = local.secondary_leader_addr + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] + + transport = { + ssh = { + host = var.secondary_leader_host.public_ip + } + } +} + +output "primary_replication_status" { + value = local.primary_replication_status +} + +output "known_primary_cluster_addrs" { + value = local.secondary_replication_status.data.known_primary_cluster_addrs +} + +output "secondary_replication_status" { + value = local.secondary_replication_status +} + +output "primary_replication_data_secondaries" { + value = local.primary_replication_status.data.secondaries +} + +output "secondary_replication_data_primaries" { + value = local.secondary_replication_status.data.primaries +} diff --git a/enos/modules/vault_verify_dr_replication/scripts/verify-replication-status.sh b/enos/modules/vault_verify_dr_replication/scripts/verify-replication-status.sh new file mode 100644 index 0000000..f01a9cd --- /dev/null +++ b/enos/modules/vault_verify_dr_replication/scripts/verify-replication-status.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# This script waits for the replication status to be established +# then verifies the dr replication between primary and +# secondary clusters + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$PRIMARY_LEADER_ADDR" ]] && fail "PRIMARY_LEADER_ADDR env variable has not been set" +[[ -z "$SECONDARY_LEADER_ADDR" ]] && fail "SECONDARY_LEADER_ADDR env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "$($binpath read -format=json sys/replication/dr/status)" + fi + done +} + +check_dr_status() { + dr_status=$($binpath read -format=json sys/replication/dr/status) + cluster_state=$(jq -r '.data.state' <<< "$dr_status") + connection_mode=$(jq -r '.data.mode' <<< "$dr_status") + + if [[ "$cluster_state" == 'idle' ]]; then + echo "replication cluster state is idle" 1>&2 + return 1 + fi + + if [[ "$connection_mode" == "primary" ]]; then + connection_status=$(jq -r '.data.secondaries[0].connection_status' <<< "$dr_status") + if [[ "$connection_status" == 'disconnected' ]]; then + echo ".data.secondaries[0].connection_status from primary node is 'disconnected'" 1>&2 + return 1 + fi + # Confirm we are in a "running" state for the primary + if [[ "$cluster_state" != "running" ]]; then + echo "replication cluster primary state is not running" 1>&2 + return 1 + fi + else + connection_status=$(jq -r '.data.primaries[0].connection_status' <<< "$dr_status") + if [[ "$connection_status" == 'disconnected' ]]; then + echo ".data.primaries[0].connection_status from secondary node is 'disconnected'" 1>&2 + return 1 + fi + # Confirm we are in a "stream-wals" state for the secondary + if [[ "$cluster_state" != "stream-wals" ]]; then + echo "replication cluster primary state is not stream-wals" 1>&2 + return 1 + fi + known_primary_cluster_addrs=$(jq -r '.data.known_primary_cluster_addrs' <<< "$dr_status") + if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_ADDR"; then + echo "$PRIMARY_LEADER_ADDR is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2 + return 1 + fi + fi + + echo "$dr_status" + return 0 +} + +if [ "$IP_VERSION" != 4 ] && [ "$IP_VERSION" != 6 ]; then + fail "unsupported IP_VERSION: $IP_VERSION" +fi + +# Retry for a while because it can take some time for replication to sync +retry 10 check_dr_status diff --git a/enos/modules/vault_verify_performance_replication/main.tf b/enos/modules/vault_verify_performance_replication/main.tf new file mode 100644 index 0000000..f7f99fd --- /dev/null +++ b/enos/modules/vault_verify_performance_replication/main.tf @@ -0,0 +1,117 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "primary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The primary cluster leader host" +} + +variable "secondary_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The secondary cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "wrapping_token" { + type = string + description = "The wrapping token created on primary cluster" + default = null +} + +locals { + primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip + secondary_leader_addr = var.ip_version == 6 ? var.secondary_leader_host.ipv6 : var.secondary_leader_host.private_ip + primary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_primary.stdout) + secondary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_secondary.stdout) +} + +resource "enos_remote_exec" "verify_replication_status_on_primary" { + environment = { + IP_VERSION = var.ip_version + PRIMARY_LEADER_ADDR = local.primary_leader_addr + SECONDARY_LEADER_ADDR = local.secondary_leader_addr + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] + + transport = { + ssh = { + host = var.primary_leader_host.public_ip + } + } +} + +resource "enos_remote_exec" "verify_replication_status_on_secondary" { + environment = { + IP_VERSION = var.ip_version + PRIMARY_LEADER_ADDR = local.primary_leader_addr + SECONDARY_LEADER_ADDR = local.secondary_leader_addr + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] + + transport = { + ssh = { + host = var.secondary_leader_host.public_ip + } + } +} + +output "primary_replication_status" { + value = local.primary_replication_status +} + +output "known_primary_cluster_addrs" { + value = local.secondary_replication_status.data.known_primary_cluster_addrs +} + +output "secondary_replication_status" { + value = local.secondary_replication_status +} + +output "primary_replication_data_secondaries" { + value = local.primary_replication_status.data.secondaries +} + +output "secondary_replication_data_primaries" { + value = local.secondary_replication_status.data.primaries +} diff --git a/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh b/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh new file mode 100644 index 0000000..57b1b43 --- /dev/null +++ b/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# This script waits for the replication status to be established +# then verifies the performance replication between primary and +# secondary clusters + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" +[[ -z "$PRIMARY_LEADER_ADDR" ]] && fail "PRIMARY_LEADER_ADDR env variable has not been set" +[[ -z "$SECONDARY_LEADER_ADDR" ]] && fail "SECONDARY_LEADER_ADDR env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "$($binpath read -format=json sys/replication/performance/status)" + fi + done +} + +check_pr_status() { + pr_status=$($binpath read -format=json sys/replication/performance/status) + cluster_state=$(jq -r '.data.state' <<< "$pr_status") + connection_mode=$(jq -r '.data.mode' <<< "$pr_status") + + if [[ "$cluster_state" == 'idle' ]]; then + echo "replication cluster state is idle" 1>&2 + return 1 + fi + + if [[ "$connection_mode" == "primary" ]]; then + connection_status=$(jq -r '.data.secondaries[0].connection_status' <<< "$pr_status") + if [[ "$connection_status" == 'disconnected' ]]; then + echo ".data.secondaries[0].connection_status from primary node is 'disconnected'" 1>&2 + return 1 + fi + if [ "$IP_VERSION" == 4 ]; then + secondary_cluster_addr=$(jq -r '.data.secondaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")' <<< "$pr_status") + else + secondary_cluster_addr=$(jq -r '.data.secondaries[0].cluster_address | scan("\\[(.+)\\]") | .[0]' <<< "$pr_status") + fi + if [[ "$secondary_cluster_addr" != "$SECONDARY_LEADER_ADDR" ]]; then + echo ".data.secondaries[0].cluster_address should have an IP address of $SECONDARY_LEADER_ADDR, got: $secondary_cluster_addr" 1>&2 + return 1 + fi + else + connection_status=$(jq -r '.data.primaries[0].connection_status' <<< "$pr_status") + if [[ "$connection_status" == 'disconnected' ]]; then + echo ".data.primaries[0].connection_status from secondary node is 'disconnected'" 1>&2 + return 1 + fi + if [ "$IP_VERSION" == 4 ]; then + primary_cluster_addr=$(jq -r '.data.primaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")' <<< "$pr_status") + else + primary_cluster_addr=$(jq -r '.data.primaries[0].cluster_address | scan("\\[(.+)\\]") | .[0]' <<< "$pr_status") + fi + if [[ "$primary_cluster_addr" != "$PRIMARY_LEADER_ADDR" ]]; then + echo ".data.primaries[0].cluster_address should have an IP address of $PRIMARY_LEADER_ADDR, got: $primary_cluster_addr" 1>&2 + return 1 + fi + known_primary_cluster_addrs=$(jq -r '.data.known_primary_cluster_addrs' <<< "$pr_status") + if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_ADDR"; then + echo "$PRIMARY_LEADER_ADDR is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2 + return 1 + fi + fi + + echo "$pr_status" + return 0 +} + +if [ "$IP_VERSION" != 4 ] && [ "$IP_VERSION" != 6 ]; then + fail "unsupported IP_VERSION: $IP_VERSION" +fi + +# Retry for a while because it can take some time for replication to sync +retry 10 check_pr_status diff --git a/enos/modules/vault_verify_raft_auto_join_voter/main.tf b/enos/modules/vault_verify_raft_auto_join_voter/main.tf new file mode 100644 index 0000000..826b00b --- /dev/null +++ b/enos/modules/vault_verify_raft_auto_join_voter/main.tf @@ -0,0 +1,76 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "ip_version" { + type = number + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + cluster_addrs = { + 4 : { for k, v in var.hosts : k => "${v.private_ip}:${var.vault_cluster_addr_port}" }, + 6 : { for k, v in var.hosts : k => "[${v.ipv6}]:${var.vault_cluster_addr_port}" }, + } +} + +resource "enos_remote_exec" "verify_raft_auto_join_voter" { + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_CLUSTER_ADDR = local.cluster_addrs[var.ip_version][each.key] + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_LOCAL_BINARY_PATH = "${var.vault_install_dir}/vault" + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/verify-raft-auto-join-voter.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh b/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh new file mode 100644 index 0000000..c20aade --- /dev/null +++ b/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 2>&1 + return 1 +} + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + echo "retry $count" + else + return "$exit" + fi + done + + return 0 +} + +check_voter_status() { + voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR) | .voter == $expected') + + if [[ "$voter_status" != 'true' ]]; then + fail "expected $VAULT_CLUSTER_ADDR to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq -Mr --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR)')" + fi +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# Retry a few times because it can take some time for things to settle after +# all the nodes are unsealed +retry 10 check_voter_status diff --git a/enos/modules/vault_verify_removed_node/main.tf b/enos/modules/vault_verify_removed_node/main.tf new file mode 100644 index 0000000..ecbb0ef --- /dev/null +++ b/enos/modules/vault_verify_removed_node/main.tf @@ -0,0 +1,246 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were removed" +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "listener_port" { + type = number + description = "The listener port for vault" +} +variable "vault_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The leader's host information" +} +variable "vault_addr" { + type = string + description = "The local address to use to query vault" +} +variable "cluster_port" { + type = number + description = "The cluster port for vault" +} + + +variable "ip_version" { + type = number + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} +variable "vault_root_token" { + type = string + description = "The vault root token" +} +variable "vault_seal_type" { + type = string + description = "The Vault seal type" +} + +variable "add_back_nodes" { + type = bool + description = "whether to add the nodes back" +} + +variable "vault_unseal_keys" {} + +variable "vault_install_dir" { + type = string + description = "The directory where the vault binary is installed" +} + +resource "enos_remote_exec" "verify_raft_peer_removed" { + for_each = var.hosts + + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/verify_raft_remove_peer.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_remote_exec" "verify_unseal_fails" { + for_each = { + for idx, host in var.hosts : idx => host + if var.vault_seal_type == "shamir" + } + + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + UNSEAL_KEYS = join(",", var.vault_unseal_keys) + } + + scripts = [abspath("${path.module}/scripts/verify_unseal_fails.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_remote_exec" "verify_rejoin_fails" { + for_each = var.hosts + + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_LEADER_ADDR = "${var.ip_version == 4 ? "${var.vault_leader_host.private_ip}" : "[${var.vault_leader_host.ipv6}]"}:${var.listener_port}" + } + + scripts = [abspath("${path.module}/scripts/verify_manual_rejoin_fails.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} +module "restart" { + depends_on = [enos_remote_exec.verify_rejoin_fails, enos_remote_exec.verify_raft_peer_removed] + source = "../restart_vault" + hosts = var.hosts + vault_addr = var.vault_addr + vault_install_dir = var.vault_install_dir +} + +resource "enos_remote_exec" "verify_removed_after_restart" { + depends_on = [module.restart] + for_each = var.hosts + + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/verify_raft_remove_peer.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +module "stop" { + depends_on = [enos_remote_exec.verify_removed_after_restart] + source = "../stop_vault" + count = var.add_back_nodes ? 1 : 0 + + hosts = var.hosts +} + +resource "enos_remote_exec" "delete_data" { + depends_on = [module.stop] + for_each = { + for idx, host in var.hosts : idx => host + if var.add_back_nodes + } + + inline = ["sudo rm -rf /opt/raft/data/*"] + + transport = { + ssh = { + host = each.value.public_ip + } + } + +} + +resource "enos_remote_exec" "start" { + depends_on = [enos_remote_exec.delete_data] + for_each = { + for idx, host in var.hosts : idx => host + if var.add_back_nodes + } + inline = ["sudo systemctl start vault; sleep 5"] + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_vault_unseal" "unseal" { + depends_on = [ + enos_remote_exec.start + ] + for_each = { + for idx, host in var.hosts : idx => host + if var.vault_seal_type == "shamir" && var.add_back_nodes + } + + bin_path = "${var.vault_install_dir}/vault" + vault_addr = var.vault_addr + seal_type = var.vault_seal_type + unseal_keys = var.vault_seal_type != "shamir" ? null : var.vault_unseal_keys + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +module "verify_rejoin_succeeds" { + source = "../vault_verify_raft_auto_join_voter" + depends_on = [enos_vault_unseal.unseal] + count = var.add_back_nodes ? 1 : 0 + hosts = var.hosts + ip_version = var.ip_version + vault_root_token = var.vault_root_token + vault_install_dir = var.vault_install_dir + vault_addr = var.vault_addr + vault_cluster_addr_port = var.cluster_port +} diff --git a/enos/modules/vault_verify_removed_node/scripts/verify_manual_rejoin_fails.sh b/enos/modules/vault_verify_removed_node/scripts/verify_manual_rejoin_fails.sh new file mode 100644 index 0000000..ed3c359 --- /dev/null +++ b/enos/modules/vault_verify_removed_node/scripts/verify_manual_rejoin_fails.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_LEADER_ADDR" ]] && fail "VAULT_LEADER_ADDR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +result=$($binpath operator raft join "$VAULT_LEADER_ADDR") +output=$? +if [ $output -ne 2 ]; then + fail "Joining did not return code 2, instead $output: $result" +fi diff --git a/enos/modules/vault_verify_removed_node/scripts/verify_raft_remove_peer.sh b/enos/modules/vault_verify_removed_node/scripts/verify_raft_remove_peer.sh new file mode 100755 index 0000000..b853512 --- /dev/null +++ b/enos/modules/vault_verify_removed_node/scripts/verify_raft_remove_peer.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +getSysHealth() { + $binpath read -format=json sys/health sealedcode=299 haunhealthycode=299 removedcode=299 | jq -eMc '.data.removed_from_cluster' +} + +getStatus() { + $binpath status --format=json | jq -eMc '.removed_from_cluster' +} + +expectRemoved() { + local status + if ! status=$(getStatus); then + echo "failed to get vault status: $status" + return 1 + fi + if [[ "$status" != "true" ]]; then + echo "unexpected status $status" + return 1 + fi + + local health + health=$(getSysHealth) + if ! health=$(getSysHealth); then + echo "failed to get health: $health" + return 1 + fi + if [[ "$health" != "true" ]]; then + echo "unexpected health $health" + fi + + return 0 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if expectRemoved; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for raft removed status" diff --git a/enos/modules/vault_verify_removed_node/scripts/verify_unseal_fails.sh b/enos/modules/vault_verify_removed_node/scripts/verify_unseal_fails.sh new file mode 100644 index 0000000..c2eded9 --- /dev/null +++ b/enos/modules/vault_verify_removed_node/scripts/verify_unseal_fails.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +IFS="," read -r -a keys <<< "${UNSEAL_KEYS}" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +result=$($binpath operator unseal "${keys[0]}") +code=$? +if [ $code -eq 0 ]; then + fail "expected unseal to fail but got exit code $code: $result" +fi diff --git a/enos/modules/vault_verify_removed_node_shim/main.tf b/enos/modules/vault_verify_removed_node_shim/main.tf new file mode 100644 index 0000000..fae7944 --- /dev/null +++ b/enos/modules/vault_verify_removed_node_shim/main.tf @@ -0,0 +1,89 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster followers" +} + + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "listener_port" { + type = number + description = "The listener port for vault" +} + +variable "vault_leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The leader's host information" +} + +variable "vault_addr" { + type = string + description = "The local address to use to query vault" +} + +variable "cluster_port" { + type = number + description = "The cluster port for vault" +} + +variable "ip_version" { + type = number + description = "The IP version to use for the Vault TCP listeners" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} +variable "vault_root_token" { + type = string + description = "The vault root token" +} +variable "vault_seal_type" { + type = string + description = "The Vault seal type" +} + +variable "add_back_nodes" { + type = bool + description = "whether to add the nodes back" +} + +variable "vault_unseal_keys" {} + +variable "vault_install_dir" { + type = string + description = "The directory where the vault binary is installed" +} \ No newline at end of file diff --git a/enos/modules/vault_verify_replication/main.tf b/enos/modules/vault_verify_replication/main.tf new file mode 100644 index 0000000..f9377d8 --- /dev/null +++ b/enos/modules/vault_verify_replication/main.tf @@ -0,0 +1,48 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_edition" { + type = string + description = "The vault product edition" + default = null +} + +resource "enos_remote_exec" "smoke-verify-replication" { + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_EDITION = var.vault_edition + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-replication.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh b/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh new file mode 100644 index 0000000..72ecbd2 --- /dev/null +++ b/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_EDITION" ]] && fail "VAULT_EDITION env variable has not been set" + +# Replication status endpoint should have data.mode disabled for CE release +status=$(curl "${VAULT_ADDR}/v1/sys/replication/status") +if [ "$VAULT_EDITION" == "ce" ]; then + if [ "$(jq -r '.data.mode' <<< "$status")" != "disabled" ]; then + fail "replication data mode is not disabled for CE release!" + fi +else + if [ "$(jq -r '.data.dr' <<< "$status")" == "" ]; then + fail "DR replication should be available for an ENT release!" + fi + if [ "$(jq -r '.data.performance' <<< "$status")" == "" ]; then + fail "Performance replication should be available for an ENT release!" + fi +fi diff --git a/enos/modules/vault_verify_ui/main.tf b/enos/modules/vault_verify_ui/main.tf new file mode 100644 index 0000000..61d7361 --- /dev/null +++ b/enos/modules/vault_verify_ui/main.tf @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +resource "enos_remote_exec" "smoke-verify-ui" { + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr, + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-ui.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh new file mode 100644 index 0000000..7500788 --- /dev/null +++ b/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +url_effective=$(curl -w "%{url_effective}\n" -I -L -s -S "${VAULT_ADDR}" -o /dev/null) +expected="${VAULT_ADDR}/ui/" +if [ "${url_effective}" != "${expected}" ]; then + fail "Expecting Vault to redirect to UI.\nExpected: ${expected}\nGot: ${url_effective}" +fi + +if curl -s "${VAULT_ADDR}/ui/" | grep -q 'Vault UI is not available'; then + fail "Vault UI is not available" +fi diff --git a/enos/modules/vault_verify_undo_logs/main.tf b/enos/modules/vault_verify_undo_logs/main.tf new file mode 100644 index 0000000..5547321 --- /dev/null +++ b/enos/modules/vault_verify_undo_logs/main.tf @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "expected_state" { + type = number + description = "The expected state to have in vault.core.replication.write_undo_logs telemetry. Must be either 1 for enabled or 0 for disabled." + + validation { + condition = contains([0, 1], var.expected_state) + error_message = "The expected_state must be either 0 or 1" + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster target hosts to check" +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +resource "enos_remote_exec" "smoke-verify-undo-logs" { + for_each = var.hosts + + environment = { + EXPECTED_STATE = var.expected_state + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-undo-logs.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh new file mode 100644 index 0000000..7736331 --- /dev/null +++ b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$EXPECTED_STATE" ]] && fail "EXPECTED_STAE env variable has not been set" +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + state=$($binpath read sys/metrics -format=json | jq -r '.data.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")') + target_undo_logs_status="$(jq -r '.Value' <<< "$state")" + + if [ "$target_undo_logs_status" == "$EXPECTED_STATE" ]; then + echo "vault.core.replication.write_undo_logs has expected Value: \"${EXPECTED_STATE}\"" + exit 0 + fi + + echo "Waiting for vault.core.replication.write_undo_logs to have Value: \"${EXPECTED_STATE}\"" + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for vault.core.replication.write_undo_logs to have Value: \"${EXPECTED_STATE}\"" diff --git a/enos/modules/vault_verify_version/main.tf b/enos/modules/vault_verify_version/main.tf new file mode 100644 index 0000000..9c992bb --- /dev/null +++ b/enos/modules/vault_verify_version/main.tf @@ -0,0 +1,100 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_build_date" { + type = string + description = "The Vault artifact build date" + default = null +} + +variable "vault_edition" { + type = string + description = "The Vault product edition" + default = null +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_product_version" { + type = string + description = "The Vault product version" + default = null +} + +variable "vault_revision" { + type = string + description = "The Vault product revision" + default = null +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" + default = null +} + +resource "enos_remote_exec" "verify_cli_version" { + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr, + VAULT_BUILD_DATE = var.vault_build_date, + VAULT_EDITION = var.vault_edition, + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_REVISION = var.vault_revision, + VAULT_TOKEN = var.vault_root_token, + VAULT_VERSION = var.vault_product_version, + } + + scripts = [abspath("${path.module}/scripts/verify-cli-version.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_remote_exec" "verify_cluster_version" { + for_each = var.hosts + + environment = { + VAULT_ADDR = var.vault_addr, + VAULT_BUILD_DATE = var.vault_build_date, + VAULT_TOKEN = var.vault_root_token, + VAULT_VERSION = var.vault_product_version, + } + + scripts = [abspath("${path.module}/scripts/verify-cluster-version.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_version/scripts/verify-cli-version.sh b/enos/modules/vault_verify_version/scripts/verify-cli-version.sh new file mode 100644 index 0000000..ee8be4a --- /dev/null +++ b/enos/modules/vault_verify_version/scripts/verify-cli-version.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Verify the Vault "version" includes the correct base version, build date, +# revision SHA, and edition metadata. +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_BUILD_DATE" ]] && fail "VAULT_BUILD_DATE env variable has not been set" +[[ -z "$VAULT_EDITION" ]] && fail "VAULT_EDITION env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_REVISION" ]] && fail "VAULT_REVISION env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_VERSION" ]] && fail "VAULT_VERSION env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +edition=${VAULT_EDITION} +version=${VAULT_VERSION} +sha=${VAULT_REVISION} +build_date=${VAULT_BUILD_DATE} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" +version_expected="Vault v$version ($sha), built $build_date" + +case "$edition" in + *ce) ;; + *ent) ;; + *ent.hsm) version_expected="$version_expected (cgo)" ;; + *ent.fips1403) version_expected="$version_expected (cgo)" ;; + *ent.hsm.fips1403) version_expected="$version_expected (cgo)" ;; + *) fail "Unknown Vault edition: ($edition)" ;; +esac + +version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') +version_output=$("$binpath" version) + +if [[ "$version_output" == "$version_expected_nosha" ]] || [[ "$version_output" == "$version_expected" ]]; then + echo "Version verification succeeded!" +else + msg="$(printf "\nThe Vault cluster did not match the expected version, expected:\n%s\nor\n%s\ngot:\n%s" "$version_expected" "$version_expected_nosha" "$version_output")" + if type diff &> /dev/null; then + # Diff exits non-zero if we have a diff, which we want, so we'll guard against failing early. + if ! version_diff=$(diff <(echo "$version_expected") <(echo "$version_output") -u -L expected -L got); then + msg="$(printf "\nThe Vault cluster did not match the expected version:\n%s" "$version_diff")" + fi + fi + + fail "$msg" +fi diff --git a/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh b/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh new file mode 100644 index 0000000..f0afee6 --- /dev/null +++ b/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Verify the Vault "version" includes the correct base version, build date, +# revision SHA, and edition metadata. +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_BUILD_DATE" ]] && fail "VAULT_BUILD_DATE env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_VERSION" ]] && fail "VAULT_VERSION env variable has not been set" + +# The sys/version-history endpoint only includes major.minor.patch, any other semver fields need to +# be stripped out. +if ! version=$(cut -d + -f1 <<< "$VAULT_VERSION" | cut -d - -f1); then + fail "failed to parse the expected version: $version" +fi + +if ! vh=$(curl -s -X LIST -H "X-Vault-Token: $VAULT_TOKEN" http://127.0.0.1:8200/v1/sys/version-history | jq -eMc '.data'); then + fail "failed to Vault cluster version history: $vh" +fi + +if ! out=$(jq -eMc --arg version "$version" '.keys | contains([$version])' <<< "$vh"); then + fail "cluster version history does not include our expected version: expected: $version, versions: $(jq -eMc '.keys' <<< "$vh"): output: $out" +fi + +if ! out=$(jq -eMc --arg version "$version" --arg bd "$VAULT_BUILD_DATE" '.key_info[$version].build_date == $bd' <<< "$vh"); then + fail "cluster version history build date is not the expected date: expected: true, expected date: $VAULT_BUILD_DATE, key_info: $(jq -eMc '.key_info' <<< "$vh"), output: $out" +fi + +printf "Cluster version information is valid!: %s\n" "$vh" diff --git a/enos/modules/vault_wait_for_cluster_unsealed/main.tf b/enos/modules/vault_wait_for_cluster_unsealed/main.tf new file mode 100644 index 0000000..ce9ee25 --- /dev/null +++ b/enos/modules/vault_wait_for_cluster_unsealed/main.tf @@ -0,0 +1,62 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +resource "enos_remote_exec" "verify_node_unsealed" { + for_each = var.hosts + + scripts = [abspath("${path.module}/scripts/verify-vault-node-unsealed.sh")] + + environment = { + HOST_IPV4 = each.value.public_ip + HOST_IPV6 = each.value.ipv6 + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_wait_for_cluster_unsealed/scripts/verify-vault-node-unsealed.sh b/enos/modules/vault_wait_for_cluster_unsealed/scripts/verify-vault-node-unsealed.sh new file mode 100644 index 0000000..1bce520 --- /dev/null +++ b/enos/modules/vault_wait_for_cluster_unsealed/scripts/verify-vault-node-unsealed.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +getStatus() { + $binpath status -format json +} + +isUnsealed() { + local status + if ! status=$(getStatus); then + echo "failed to get vault status" 1>&2 + return 1 + fi + + if status=$(jq -Mre --argjson expected "false" '.sealed == $expected' <<< "$status"); then + echo "vault is unsealed: $status" + return 0 + fi + + echo "vault is sealed" 1>&2 + return 1 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + echo "waiting for vault to be unsealed..." + + if isUnsealed; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +if [ -n "$HOST_IPV6" ]; then + fail "timed out waiting for Vault cluster on ${HOST_IPV6} to be unsealed" +fi +if [ -n "$HOST_IPV4" ]; then + fail "timed out waiting for Vault cluster on ${HOST_IPV4} to be unsealed" +fi +fail "timed out waiting for Vault cluster to be unsealed" diff --git a/enos/modules/vault_wait_for_leader/main.tf b/enos/modules/vault_wait_for_leader/main.tf new file mode 100644 index 0000000..7c29280 --- /dev/null +++ b/enos/modules/vault_wait_for_leader/main.tf @@ -0,0 +1,82 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster hosts that can be expected as a leader" +} + +variable "ip_version" { + type = number + description = "The IP version used for the Vault TCP listener" + + validation { + condition = contains([4, 6], var.ip_version) + error_message = "The ip_version must be either 4 or 6" + } +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +locals { + ipv6s = [for k, v in values(tomap(var.hosts)) : tostring(v["ipv6"])] + private_ips = [for k, v in values(tomap(var.hosts)) : tostring(v["private_ip"])] +} + +resource "enos_remote_exec" "wait_for_leader_in_hosts" { + environment = { + IP_VERSION = var.ip_version + TIMEOUT_SECONDS = var.timeout + RETRY_INTERVAL = var.retry_interval + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTANCE_IPV6S = jsonencode(local.ipv6s) + VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips) + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-for-leader.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} diff --git a/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh b/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh new file mode 100644 index 0000000..dc97cb6 --- /dev/null +++ b/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +findLeaderInPrivateIPs() { + # Find the leader private IP address + local leader_private_ip + if ! leader_private_ip=$($binpath read sys/leader -format=json | jq -er '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + # Some older versions of vault don't support reading sys/leader. Fallback to the cli status. + if ! leader_private_ip=$($binpath status -format json | jq -er '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + return 1 + fi + fi + + if isIn=$(jq -er --arg ip "$leader_private_ip" 'map(select(. == $ip)) | length == 1' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then + if [[ "$isIn" == "true" ]]; then + echo "$leader_private_ip" + return 0 + fi + fi + + return 1 +} + +findLeaderInIPV6s() { + # Find the leader private IP address + local leader_ipv6 + if ! leader_ipv6=$($binpath read sys/leader -format=json | jq -er '.data.leader_address | scan("\\[(.+)\\]") | .[0]'); then + # Some older versions of vault don't support reading sys/leader. Fallback to the cli status. + if ! leader_ipv6=$($binpath status -format json | jq -er '.leader_address | scan("\\[(.+)\\]") | .[0]'); then + return 1 + fi + fi + + if isIn=$(jq -er --arg ip "$leader_ipv6" 'map(select(. == $ip)) | length == 1' <<< "$VAULT_INSTANCE_IPV6S"); then + if [[ "$isIn" == "true" ]]; then + echo "$leader_ipv6" + return 0 + fi + fi + + return 1 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + # Use the default package manager of the current Linux distro to install packages + case $IP_VERSION in + 4) + [[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set" + if findLeaderInPrivateIPs; then + exit 0 + fi + ;; + 6) + [[ -z "$VAULT_INSTANCE_IPV6S" ]] && fail "VAULT_INSTANCE_IPV6S env variable has not been set" + if findLeaderInIPV6s; then + exit 0 + fi + ;; + *) + fail "No matching package manager provided." + ;; + esac + + sleep "$RETRY_INTERVAL" +done + +case $IP_VERSION in + 4) + fail "Timed out waiting for one of $VAULT_INSTANCE_PRIVATE_IPS to be leader." + ;; + 6) + fail "Timed out waiting for one of $VAULT_INSTANCE_IPV6S to be leader." + ;; + *) + fail "Timed out waiting for leader" + ;; +esac diff --git a/enos/modules/vault_wait_for_seal_rewrap/main.tf b/enos/modules/vault_wait_for_seal_rewrap/main.tf new file mode 100644 index 0000000..920672a --- /dev/null +++ b/enos/modules/vault_wait_for_seal_rewrap/main.tf @@ -0,0 +1,78 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster hosts that can be expected as a leader" +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + private_ips = [for k, v in values(tomap(var.hosts)) : tostring(v["private_ip"])] + first_key = element(keys(enos_remote_exec.wait_for_seal_rewrap_to_be_completed), 0) +} + +resource "enos_remote_exec" "wait_for_seal_rewrap_to_be_completed" { + for_each = var.hosts + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-for-seal-rewrap.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +output "stdout" { + value = enos_remote_exec.wait_for_seal_rewrap_to_be_completed[local.first_key].stdout +} + +output "stderr" { + value = enos_remote_exec.wait_for_seal_rewrap_to_be_completed[local.first_key].stdout +} diff --git a/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh b/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh new file mode 100644 index 0000000..67bc144 --- /dev/null +++ b/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +getRewrapData() { + $binpath read sys/sealwrap/rewrap -format=json | jq -eMc '.data' +} + +waitForRewrap() { + local data + if ! data=$(getRewrapData); then + echo "failed getting /v1/sys/sealwrap/rewrap data" 1>&2 + return 1 + fi + + if ! jq -e '.is_running == false' <<< "$data" &> /dev/null; then + echo "rewrap is running" 1>&2 + return 1 + fi + + if ! jq -e '.entries.failed == 0' <<< "$data" &> /dev/null; then + local entries + entries=$(jq -Mc '.entries.failed' <<< "$data") + echo "rewrap has $entries failed entries" 1>&2 + return 1 + fi + + if ! jq -e '.entries.processed == .entries.succeeded' <<< "$data" &> /dev/null; then + local processed + local succeeded + processed=$(jq -Mc '.entries.processed' <<< "$data") + succeeded=$(jq -Mc '.entries.succeeded' <<< "$data") + echo "the number of processed entries ($processed) does not equal then number of succeeded ($succeeded)" 1>&2 + return 1 + fi + + if jq -e '.entries.processed == 0' <<< "$data" &> /dev/null; then + echo "A seal rewrap has not been started yet. Number of processed entries is zero and a rewrap is not yet running." + return 1 + fi + + echo "$data" + return 0 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if waitForRewrap; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for seal rewrap to be completed. Data:\n\t$(getRewrapData)" diff --git a/enos/modules/verify_log_secrets/main.tf b/enos/modules/verify_log_secrets/main.tf new file mode 100644 index 0000000..ef53bf0 --- /dev/null +++ b/enos/modules/verify_log_secrets/main.tf @@ -0,0 +1,96 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "audit_log_file_path" { + type = string +} + +variable "leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + description = "The cluster leader host. Only the leader write to the audit log" +} + +variable "radar_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" + default = "/opt/vault-radar/bin" +} + +variable "radar_license_path" { + description = "The path to a vault-radar license file" +} + +variable "radar_version" { + description = "The version of Vault Radar to install" + default = "0.24.0" # must be >= 0.17.0 + // NOTE: A `semverconstraint` validation condition would be very useful here + // when we get around to exporting our custom enos funcs in the provider. +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "vault_unit_name" { + type = string + description = "The vault unit name" + default = "vault" +} + +resource "enos_bundle_install" "radar" { + destination = var.radar_install_dir + + release = { + product = "vault-radar" + version = var.radar_version + // Radar doesn't have CE/Ent editions. CE is equivalent to no edition metadata. + edition = "ce" + } + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +resource "enos_remote_exec" "scan_logs_for_secrets" { + depends_on = [ + enos_bundle_install.radar, + ] + + environment = { + AUDIT_LOG_FILE_PATH = var.audit_log_file_path + VAULT_ADDR = var.vault_addr + VAULT_RADAR_INSTALL_DIR = var.radar_install_dir + VAULT_RADAR_LICENSE = file(var.radar_license_path) + VAULT_TOKEN = var.vault_root_token + VAULT_UNIT_NAME = var.vault_unit_name + } + + scripts = [abspath("${path.module}/scripts/scan_logs_for_secrets.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} diff --git a/enos/modules/verify_log_secrets/scripts/scan_logs_for_secrets.sh b/enos/modules/verify_log_secrets/scripts/scan_logs_for_secrets.sh new file mode 100644 index 0000000..f1b3b83 --- /dev/null +++ b/enos/modules/verify_log_secrets/scripts/scan_logs_for_secrets.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +fail() { + echo "$1" 1>&2 + exit 1 +} + +verify_radar_scan_output_file() { + # Given a file with a radar scan output, filter out tagged false positives and verify that no + # other secrets remain. + if ! jq -eMcn '[inputs] | [.[] | select(.type != "aws_access_key_id") | select((.tags == null) or (.tags | contains(["ignore_rule"]) | not ))] | length == 0' < "$2"; then + found=$(jq -eMn '[inputs] | [.[] | select(.type != "aws_access_key_id") | select((.tags == null) or (.tags | contains(["ignore_rule"]) | not ))]' < "$2") + fail "failed to radar secrets output: vault radar detected secrets in $1!: $found" + fi +} + +set -e + +[[ -z "$AUDIT_LOG_FILE_PATH" ]] && fail "AUDIT_LOG_FILE_PATH env variable has not been set" +[[ -z "$VAULT_RADAR_INSTALL_DIR" ]] && fail "VAULT_RADAR_INSTALL_DIR env variable has not been set" +# Radar implicitly requires the following for creating the index and running radar itself +[[ -z "$VAULT_RADAR_LICENSE" ]] && fail "VAULT_RADAR_LICENSE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_UNIT_NAME" ]] && fail "VAULT_UNIT_NAME env variable has not been set" + +radar_bin_path=${VAULT_RADAR_INSTALL_DIR}/vault-radar +test -x "$radar_bin_path" || fail "failed to scan vault audit log: unable to locate radar binary at $radar_bin_path" + +# Make sure our audit log file exists. +if [ ! -f "$AUDIT_LOG_FILE_PATH" ]; then + fail "failed to scan vault audit log: no audit logifile found at $AUDIT_LOG_FILE_PATH" +fi + +# Create a readable copy of the audit log. +if ! sudo cp "$AUDIT_LOG_FILE_PATH" audit.log; then + fail "failed to scan vault audit log: could not copy audit log for scanning" +fi + +if ! sudo chmod +r audit.log; then + fail "failed to scan vault audit log: could not make audit log copy readable" +fi + +# Create a radar index file of our KVv2 secret values. +if ! out=$($radar_bin_path index vault --offline --disable-ui --outfile index.jsonl 2>&1); then + fail "failed to generate vault-radar index of vault cluster: $out" +fi + +# Write our ignore rules to avoid known false positives. +mkdir -p "$HOME/.hashicorp/vault-radar" +cat >> "$HOME/.hashicorp/vault-radar/ignore.yaml" << EOF +- secret_values: + - "hmac-sha256:*" +EOF + +# Scan the audit log for known secrets via the audit log and other secrets using radars built-in +# secret types. +if ! out=$("$radar_bin_path" scan file --offline --disable-ui -p audit.log --index-file index.jsonl -f json -o audit-secrets.json 2>&1); then + fail "failed to scan vault audit log: vault-radar scan file failed: $out" +fi + +verify_radar_scan_output_file vault-audit-log audit-secrets.json + +# Scan the vault journal for known secrets via the audit log and other secrets using radars built-in +# secret types. +if ! out=$(sudo journalctl --no-pager -u "$VAULT_UNIT_NAME" -a | "$radar_bin_path" scan file --offline --disable-ui --index-file index.jsonl -f json -o journal-secrets.json 2>&1); then + fail "failed to scan vault journal: vault-radar scan file failed: $out" +fi + +verify_radar_scan_output_file vault-journal journal-secrets.json diff --git a/enos/modules/verify_seal_type/main.tf b/enos/modules/verify_seal_type/main.tf new file mode 100644 index 0000000..e8d8189 --- /dev/null +++ b/enos/modules/verify_seal_type/main.tf @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "seal_type" { + type = string + description = "The expected seal type" + default = "shamir" +} + + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +resource "enos_remote_exec" "verify_seal_type" { + for_each = var.hosts + + scripts = [abspath("${path.module}/scripts/verify-seal-type.sh")] + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + EXPECTED_SEAL_TYPE = var.seal_type + } + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_seal_type/scripts/verify-seal-type.sh b/enos/modules/verify_seal_type/scripts/verify-seal-type.sh new file mode 100644 index 0000000..82a7985 --- /dev/null +++ b/enos/modules/verify_seal_type/scripts/verify-seal-type.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$EXPECTED_SEAL_TYPE" ]] && fail "EXPECTED_SEAL_TYPE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +count=0 +retries=2 +while :; do + if seal_status=$($binpath read sys/seal-status -format=json); then + if jq -Mer --arg expected "$EXPECTED_SEAL_TYPE" '.data.type == $expected' <<< "$seal_status" &> /dev/null; then + exit 0 + fi + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + printf "Seal Status: %s\n" "$seal_status" + got=$(jq -Mer '.data.type' <<< "$seal_status") + fail "Expected seal type to be $EXPECTED_SEAL_TYPE, got: $got" + fi +done diff --git a/enos/modules/verify_secrets_engines/modules/create/auth.tf b/enos/modules/verify_secrets_engines/modules/create/auth.tf new file mode 100644 index 0000000..f81f389 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/create/auth.tf @@ -0,0 +1,233 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + // Variables + auth_userpass_path = "userpass" # auth/userpass + user_name = "testuser" # auth/userpass/users/testuser + user_password = "passtestuser1" # auth/userpass/login/passtestuser1 + user_policy_name = "reguser" # sys/policy/reguser + + auth_ldap_path = "ldap" # auth/ldap + + // Response data + user_login_data = jsondecode(enos_remote_exec.auth_login_testuser.stdout) + sys_auth_data = jsondecode(enos_remote_exec.read_sys_auth.stdout).data + + // Output + auth_output = { + sys = local.sys_auth_data + userpass = { + path = local.auth_userpass_path + user = { + name = local.user_name + password = local.user_password + policy_name = local.user_policy_name + login = local.user_login_data + } + } + } +} + +output "auth" { + value = local.auth_output +} + +# Enable userpass auth +resource "enos_remote_exec" "auth_enable_userpass" { + environment = { + AUTH_METHOD = "userpass" + AUTH_PATH = local.auth_userpass_path + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/auth-enable.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Get the sys/auth data after enabling our auth method +resource "enos_remote_exec" "read_sys_auth" { + depends_on = [ + enos_remote_exec.auth_enable_userpass, + ] + environment = { + REQPATH = "sys/auth" + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/read.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Create a default policy for our users that allows them to read and list. +resource "enos_remote_exec" "policy_read_reguser" { + environment = { + POLICY_NAME = local.user_policy_name + POLICY_CONFIG = <<-EOF + path "*" { + capabilities = ["read", "list"] + } + EOF + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/policy-write.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Create our user +resource "enos_remote_exec" "auth_create_testuser" { + depends_on = [ + enos_remote_exec.auth_enable_userpass, + enos_remote_exec.policy_read_reguser, + ] + + environment = { + AUTH_PATH = local.auth_userpass_path + PASSWORD = local.user_password + POLICIES = local.user_policy_name + USERNAME = local.user_name + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/auth-userpass-write.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +resource "enos_remote_exec" "auth_login_testuser" { + depends_on = [ + // Don't try to login until created our user and added it to the kv_writers group + enos_remote_exec.auth_create_testuser, + enos_remote_exec.identity_group_kv_writers, + ] + + environment = { + AUTH_PATH = local.auth_userpass_path + PASSWORD = local.user_password + USERNAME = local.user_name + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/auth-userpass-login.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Enable ldap auth +resource "enos_remote_exec" "auth_enable_ldap" { + environment = { + AUTH_METHOD = "ldap" + AUTH_PATH = local.auth_ldap_path + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/auth-enable.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Write the initial ldap config +# This is a one time write to the leader node. +resource "enos_remote_exec" "auth_write_ldap_config" { + depends_on = [ + enos_remote_exec.auth_enable_ldap + ] + + environment = { + AUTH_PATH = local.auth_ldap_path + GROUPATTR = "memberOf" + GROUPDN = "CN=Users,DC=corp,DC=example,DC=net" + INSECURE_TLS = "true" + POLICIES = local.auth_ldap_path + UPNDOMAIN = "corp.example.net" + URL = "ldaps://ldap.example.com" + USERATTR = "sAMAccountName" + USERDN = "CN=Users,DC=corp,DC=example,DC=net" + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/../../scripts/auth-ldap-write.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Update the ldap config. Choose a random node each time to ensure that writes +# to all nodes are forwarded correctly and behave as we expect. +resource "random_integer" "auth_update_ldap_config_idx" { + min = 0 + max = length(var.hosts) - 1 +} + +resource "enos_remote_exec" "auth_update_ldap_config" { + depends_on = [ + enos_remote_exec.auth_write_ldap_config + ] + + environment = { + AUTH_PATH = local.auth_ldap_path + GROUPATTR = "memberOf" + GROUPDN = "CN=Users,DC=corp,DC=example,DC=net" + INSECURE_TLS = "true" + POLICIES = local.auth_ldap_path + UPNDOMAIN = "corp.example.net" + URL = "ldaps://ldap2.example.com" + USERATTR = "sAMAccountName" + USERDN = "CN=Users,DC=corp,DC=example,DC=net" + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/../../scripts/auth-ldap-write.sh")] + + transport = { + ssh = { + host = var.hosts[random_integer.auth_update_ldap_config_idx.result].public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/create/aws.tf b/enos/modules/verify_secrets_engines/modules/create/aws.tf new file mode 100644 index 0000000..a96902e --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/create/aws.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +module "create_aws_secrets_engine" { + count = var.create_aws_secrets_engine ? 1 : 0 + source = "./aws" + + hosts = var.hosts + leader_host = var.leader_host + vault_addr = var.vault_addr + vault_root_token = var.vault_root_token + vault_install_dir = var.vault_install_dir +} + +locals { + aws_state = var.create_aws_secrets_engine ? module.create_aws_secrets_engine[0].state : null +} + +output "aws" { + value = local.aws_state +} diff --git a/enos/modules/verify_secrets_engines/modules/create/aws/aws.tf b/enos/modules/verify_secrets_engines/modules/create/aws/aws.tf new file mode 100644 index 0000000..afd9db9 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/create/aws/aws.tf @@ -0,0 +1,158 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + + description = "Vault cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" + default = null +} + +locals { + // Variables + aws_mount = "aws" + vault_aws_role = "enos_test_role" + my_email = split("/", data.aws_caller_identity.current.arn)[2] + + // State output + state = { + aws_role = data.aws_iam_role.premade_demo_assumed_role.name + aws_role_arn = data.aws_iam_role.premade_demo_assumed_role.arn + aws_policy_arn = data.aws_iam_policy.premade_demo_user_policy.arn + aws_user_name = aws_iam_user.aws_enos_test_user.name + aws_access_key = aws_iam_access_key.aws_enos_test_user.id + aws_secret_key = aws_iam_access_key.aws_enos_test_user.secret + mount = local.aws_mount + region = data.aws_region.current.name + vault_aws_role = local.vault_aws_role + } +} + +output "state" { + value = local.state +} + +resource "random_id" "unique_suffix" { + byte_length = 4 +} + +data "aws_caller_identity" "current" {} + +data "aws_region" "current" {} + +# The "DemoUser" policy is a predefined policy created by the security team. +# This policy grants the necessary AWS permissions required for role generation via Vault. +# Reference: https://github.com/hashicorp/honeybee-templates/blob/main/templates/iam_policy/DemoUser.yaml +data "aws_iam_policy" "premade_demo_user_policy" { + name = "DemoUser" +} + +# This role was provisioned by the security team using the repository referenced below. +# This role includes the necessary policies to enable AWS credential generation and rotation via Vault. +# Reference: https://github.com/hashicorp/honeybee-templates/blob/main/templates/iam_role/vault-assumed-role-credentials-demo.yaml +data "aws_iam_role" "premade_demo_assumed_role" { + name = "vault-assumed-role-credentials-demo" +} + +# Creating new test user +resource "aws_iam_user" "aws_enos_test_user" { + name = "demo-${local.my_email}-${random_id.unique_suffix.hex}" + permissions_boundary = data.aws_iam_policy.premade_demo_user_policy.arn + force_destroy = true +} + +resource "aws_iam_user_policy_attachment" "aws_enos_test_user" { + user = aws_iam_user.aws_enos_test_user.name + policy_arn = data.aws_iam_policy.premade_demo_user_policy.arn +} + +resource "aws_iam_access_key" "aws_enos_test_user" { + user = aws_iam_user.aws_enos_test_user.name + lifecycle { + prevent_destroy = false + } +} + +# Enable AWS secrets engine +resource "enos_remote_exec" "secrets_enable_aws_secret" { + environment = { + ENGINE = local.aws_mount + MOUNT = local.aws_mount + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../../scripts/secrets-enable.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Generate AWS Credentials +resource "enos_remote_exec" "aws_generate_roles" { + depends_on = [enos_remote_exec.secrets_enable_aws_secret] + for_each = var.hosts + + environment = { + AWS_REGION = local.state.region + ENGINE = local.aws_mount + MOUNT = local.aws_mount + AWS_USER_NAME = local.state.aws_user_name + AWS_POLICY_ARN = local.state.aws_policy_arn + AWS_ROLE_ARN = local.state.aws_role_arn + AWS_ACCESS_KEY_ID = local.state.aws_access_key + AWS_SECRET_ACCESS_KEY = local.state.aws_secret_key + VAULT_AWS_ROLE = local.vault_aws_role + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../../scripts/aws-generate-roles.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/create/identity.tf b/enos/modules/verify_secrets_engines/modules/create/identity.tf new file mode 100644 index 0000000..6ee8810 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/create/identity.tf @@ -0,0 +1,380 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + // Variables + identity_entity_metadata = { + "organization" = "vault", + "team" = "qt", + } + group_name_oidc_readers = "oidc_token_readers" // identity/group/name/oidc_token_readers + oidc_config_issuer_url = "https://enos.example.com:1234" // identity/oidc/config + oidc_key_algorithms = ["RS256", "RS384", "RS512", "ES256", "ES384", "ES512", "EdDSA"] + oidc_key_algorithm = local.oidc_key_algorithms[random_integer.oidc_key_algorithm_idx.result] + oidc_key_name = "reguser" // identity/oidc/key/reguser + oidc_key_rotation_period = 86400 // 24h + oidc_key_verification_ttl = 21600 // 6h + oidc_role_name = "reguser" // identity/oidc/role/reguser + oidc_role_ttl = 3600 // 1h + oidc_client_id = "reguser" // optional client ID but required if we want to scope a key and role together without a * + oidc_token_read_policy_name = "oidc_token_reader" + + // Response data + oidc_token_data = jsondecode(enos_remote_exec.oidc_token.stdout).data + group_oidc_token_readers_data = jsondecode(enos_remote_exec.identity_group_oidc_token_readers.stdout).data + initial_oidc_token_data = jsondecode(enos_remote_exec.initial_oidc_token.stdout).data + user_entity_data = jsondecode(enos_remote_exec.identity_entity_testuser.stdout).data + user_entity_alias_data = jsondecode(enos_remote_exec.identity_entity_alias_testuser.stdout).data + + // Output + identity_output = { + oidc = { + reader_group_name = local.group_name_oidc_readers + reader_policy_name = local.oidc_token_read_policy_name + issuer_url = local.oidc_config_issuer_url + key_algorithm = local.oidc_key_algorithm + key_name = local.oidc_key_name + key_rotation_period = local.oidc_key_rotation_period + key_verification_ttl = local.oidc_key_verification_ttl + role_name = local.oidc_role_name + role_ttl = local.oidc_role_ttl + client_id = local.oidc_client_id + } + identity_entity_metadata = local.identity_entity_metadata + data = { + entity = local.user_entity_data + entity_alias = local.user_entity_alias_data + oidc_token = local.oidc_token_data + group_oidc_token_readers = local.group_oidc_token_readers_data + } + } +} + +output "identity" { + value = local.identity_output +} + +// Get a random index for our algorithms so that we can randomly rotate through the various algorithms +resource "random_integer" "oidc_key_algorithm_idx" { + min = 0 + max = length(local.oidc_key_algorithms) - 1 +} + +// Create identity entity for our user +resource "enos_remote_exec" "identity_entity_testuser" { + depends_on = [ + enos_remote_exec.auth_create_testuser, + ] + + environment = { + REQPATH = "identity/entity" + PAYLOAD = jsonencode({ + name = local.user_name, + metadata = local.identity_entity_metadata, + policies = [local.user_policy_name], + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Create identity entity alias for our user +resource "enos_remote_exec" "identity_entity_alias_testuser" { + environment = { + REQPATH = "identity/entity-alias" + PAYLOAD = jsonencode({ + name = local.user_name, + canonical_id = local.user_entity_data.id + mount_accessor = local.sys_auth_data["${local.auth_userpass_path}/"].accessor + policies = [local.user_policy_name], + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Configure our the oidc token backend +resource "enos_remote_exec" "oidc_config" { + environment = { + REQPATH = "identity/oidc/config" + PAYLOAD = jsonencode({ + issuer = local.oidc_config_issuer_url, + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Create a named key that can sign OIDC identity token +resource "enos_remote_exec" "oidc_key" { + environment = { + REQPATH = "identity/oidc/key/${local.oidc_key_name}" + PAYLOAD = jsonencode({ + allowed_client_ids = [local.oidc_client_id], + algorithm = local.oidc_key_algorithm, + rotation_period = local.oidc_key_rotation_period, + verification_ttl = local.oidc_key_verification_ttl, + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Create a role with custom template and that uses the named key +resource "enos_remote_exec" "oidc_role" { + depends_on = [ + enos_remote_exec.oidc_key, + ] + + environment = { + REQPATH = "identity/oidc/role/${local.oidc_role_name}" + PAYLOAD = jsonencode({ + client_id = local.oidc_client_id, + key = local.oidc_key_name, + ttl = local.oidc_role_ttl + template = base64encode(<<-EOF + { + "team": {{identity.entity.metadata.team}}, + "organization": {{identity.entity.metadata.organization}}, + "groups": {{identity.entity.groups.names}} + } + EOF + ), + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Create a group policy that allows "reading" a new signed OIDC token +resource "enos_remote_exec" "policy_write_oidc_token" { + depends_on = [ + enos_remote_exec.secrets_enable_kv_secret, + ] + environment = { + POLICY_NAME = local.oidc_token_read_policy_name + POLICY_CONFIG = <<-EOF + path "identity/oidc/token/*" { + capabilities = ["read"] + } + EOF + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/policy-write.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Create oidc_token_readers group and add our testuser to it +resource "enos_remote_exec" "identity_group_oidc_token_readers" { + environment = { + REQPATH = "identity/group" + PAYLOAD = jsonencode({ + member_entity_ids = [local.user_entity_data.id], + name = local.group_name_oidc_readers, + policies = [local.oidc_token_read_policy_name], + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Generate a signed ID token with our test user +resource "enos_remote_exec" "initial_oidc_token" { + depends_on = [ + enos_remote_exec.oidc_role, + ] + + environment = { + REQPATH = "identity/oidc/token/${local.oidc_role_name}" + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = local.user_login_data.auth.client_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/read.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Introspect the signed ID and verify it +resource "enos_remote_exec" "oidc_introspect_initial_token" { + environment = { + ASSERT_ACTIVE = true // Our token should be "active" + PAYLOAD = jsonencode({ + token = local.initial_oidc_token_data.token, + client_id = local.initial_oidc_token_data.client_id + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/identity-oidc-introspect-token.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Rotate the key with a zero TTL to force expiration +resource "enos_remote_exec" "oidc_key_rotate" { + depends_on = [ + enos_remote_exec.oidc_introspect_initial_token, + ] + + environment = { + REQPATH = "identity/oidc/key/${local.oidc_key_name}/rotate" + PAYLOAD = jsonencode({ + verification_ttl = 0, + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Introspect it again to make sure it's no longer active +resource "enos_remote_exec" "oidc_introspect_initial_token_post_rotate" { + depends_on = [ + enos_remote_exec.oidc_key_rotate, + ] + + environment = { + ASSERT_ACTIVE = false // Our token should not be "active" + PAYLOAD = jsonencode({ + token = local.initial_oidc_token_data.token, + client_id = local.initial_oidc_token_data.client_id + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/identity-oidc-introspect-token.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Generate a new token that we can use later +resource "enos_remote_exec" "oidc_token" { + depends_on = [ + enos_remote_exec.oidc_introspect_initial_token_post_rotate, + ] + + environment = { + REQPATH = "identity/oidc/token/${local.oidc_role_name}" + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = local.user_login_data.auth.client_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/read.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Introspect the new token to ensure it's active before we export it for user later via outputs +resource "enos_remote_exec" "oidc_introspect_token" { + environment = { + ASSERT_ACTIVE = true // Our token should be "active" + PAYLOAD = jsonencode({ + token = local.oidc_token_data.token, + client_id = local.oidc_token_data.client_id + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/identity-oidc-introspect-token.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/create/kv.tf b/enos/modules/verify_secrets_engines/modules/create/kv.tf new file mode 100644 index 0000000..e2174c6 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/create/kv.tf @@ -0,0 +1,131 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + // Variables + group_name_kv_writers = "kv_writers" # identity/group/name/kv_writers + kv_mount = "secret" # secret + kv_write_policy_name = "kv_writer" # sys/policy/kv_writer + kv_test_data_path_prefix = "smoke" + kv_test_data_value_prefix = "fire" + kv_version = 2 + + // Response data + identity_group_kv_writers_data = jsondecode(enos_remote_exec.identity_group_kv_writers.stdout).data + + // Output + kv_output = { + reader_group_name = local.group_name_kv_writers + writer_policy_name = local.kv_write_policy_name + mount = local.kv_mount + version = local.kv_version + test = { + path_prefix = local.kv_test_data_path_prefix + value_prefix = local.kv_test_data_value_prefix + } + data = { + identity_group_kv_writers = local.identity_group_kv_writers_data + } + } +} + +output "kv" { + value = local.kv_output +} + +# Enable kv secrets engine +resource "enos_remote_exec" "secrets_enable_kv_secret" { + environment = { + ENGINE = "kv" + MOUNT = local.kv_mount + SECRETS_META = "-version=${local.kv_version}" + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/secrets-enable.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Create a group policy that allows writing to our kv store +resource "enos_remote_exec" "policy_write_kv_writer" { + depends_on = [ + enos_remote_exec.secrets_enable_kv_secret, + ] + environment = { + POLICY_NAME = local.kv_write_policy_name + POLICY_CONFIG = <<-EOF + path "${local.kv_mount}/*" { + capabilities = ["create", "update", "read", "delete", "list"] + } + EOF + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/policy-write.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Create kv_writers group and add our testuser to it +resource "enos_remote_exec" "identity_group_kv_writers" { + environment = { + REQPATH = "identity/group" + PAYLOAD = jsonencode({ + member_entity_ids = [local.user_entity_data.id], // Created in identity.tf + name = local.group_name_kv_writers, + policies = [local.kv_write_policy_name], + }) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +// Write test data as our user. +resource "enos_remote_exec" "kv_put_secret_test" { + depends_on = [ + enos_remote_exec.secrets_enable_kv_secret, + enos_remote_exec.policy_write_kv_writer, + enos_remote_exec.identity_group_kv_writers + ] + for_each = var.hosts + + environment = { + MOUNT = local.kv_mount + SECRET_PATH = "${local.kv_test_data_path_prefix}-${each.key}" + KEY = "${local.kv_test_data_path_prefix}-${each.key}" + VALUE = "${local.kv_test_data_value_prefix}-${each.key}" + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = local.user_login_data.auth.client_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/kv-put.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/create/main.tf b/enos/modules/verify_secrets_engines/modules/create/main.tf new file mode 100644 index 0000000..265d738 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/create/main.tf @@ -0,0 +1,68 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "create_aws_secrets_engine" { + type = bool + description = <<-EOF + Whether or not we'll verify the AWS secrets engine. Due to the various security requirements in + Doormat managed AWS accounts, our implementation of the verification requires us to use a + an external 'DemoUser' role and associated policy in order to create additional users. This is + configured in vault_ci and vault_enterprise_ci but does not exist in all AWS accounts. As such, + it's disabled by default. + See: https://github.com/hashicorp/honeybee-templates/blob/main/templates/iam_policy/DemoUser.yaml + EOF + default = false +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "leader_host" { + type = object({ + ipv6 = string + private_ip = string + public_ip = string + }) + + description = "Vault cluster leader host" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" + default = null +} + +output "state" { + value = { + auth = local.auth_output + identity = local.identity_output + kv = local.kv_output + pki = local.pki_output + aws = local.aws_state + } +} diff --git a/enos/modules/verify_secrets_engines/modules/create/pki.tf b/enos/modules/verify_secrets_engines/modules/create/pki.tf new file mode 100644 index 0000000..1a69ca4 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/create/pki.tf @@ -0,0 +1,69 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + // Variables + pki_mount = "pki" # secret + pki_issuer_name = "issuer" + pki_common_name = "common" + pki_default_ttl = "72h" + pki_test_dir = "tmp-test-results" + + // Output + pki_output = { + common_name = local.pki_common_name + issuer_name = local.pki_issuer_name + mount = local.pki_mount + ttl = local.pki_default_ttl + test_dir = local.pki_test_dir + } + +} + +output "pki" { + value = local.pki_output +} + +# Enable pki secrets engine +resource "enos_remote_exec" "secrets_enable_pki_secret" { + environment = { + ENGINE = local.pki_mount + MOUNT = local.pki_mount + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/secrets-enable.sh")] + + transport = { + ssh = { + host = var.leader_host.public_ip + } + } +} + +# Issue RSA Certificate +resource "enos_remote_exec" "pki_issue_certificates" { + depends_on = [enos_remote_exec.secrets_enable_pki_secret] + for_each = var.hosts + + environment = { + MOUNT = local.pki_mount + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + COMMON_NAME = local.pki_common_name + ISSUER_NAME = local.pki_issuer_name + TTL = local.pki_default_ttl + TEST_DIR = local.pki_test_dir + } + + scripts = [abspath("${path.module}/../../scripts/pki-issue-certificates.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/read/auth.tf b/enos/modules/verify_secrets_engines/modules/read/auth.tf new file mode 100644 index 0000000..2ea06de --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/read/auth.tf @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + user_login_data = jsondecode(enos_remote_exec.auth_login_testuser.stdout) +} + +resource "enos_remote_exec" "auth_login_testuser" { + environment = { + AUTH_PATH = var.create_state.auth.userpass.path + PASSWORD = var.create_state.auth.userpass.user.password + USERNAME = var.create_state.auth.userpass.user.name + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/auth-userpass-login.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/read/aws.tf b/enos/modules/verify_secrets_engines/modules/read/aws.tf new file mode 100644 index 0000000..e2e9a8a --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/read/aws.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +module "verify_aws_secrets_engine" { + count = var.verify_aws_secrets_engine ? 1 : 0 + source = "./aws" + + create_state = var.create_state + vault_addr = var.vault_addr + vault_root_token = var.vault_root_token + vault_install_dir = var.vault_install_dir + verify_aws_engine_creds = var.verify_aws_engine_creds + + hosts = var.hosts +} diff --git a/enos/modules/verify_secrets_engines/modules/read/aws/aws.tf b/enos/modules/verify_secrets_engines/modules/read/aws/aws.tf new file mode 100644 index 0000000..5a3dbe4 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/read/aws/aws.tf @@ -0,0 +1,69 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "create_state" { + description = "The state of the secrets engines from the 'create' module" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" + default = null +} + +variable "verify_aws_engine_creds" { + type = bool +} + +# Verify AWS Engine +resource "enos_remote_exec" "aws_verify_new_creds" { + for_each = var.hosts + + environment = { + AWS_REGION = "${var.create_state.aws.region}" + MOUNT = "${var.create_state.aws.mount}" + AWS_USER_NAME = "${var.create_state.aws.aws_user_name}" + AWS_ACCESS_KEY_ID = "${var.create_state.aws.aws_access_key}" + AWS_SECRET_ACCESS_KEY = "${var.create_state.aws.aws_secret_key}" + VAULT_AWS_ROLE = "${var.create_state.aws.vault_aws_role}" + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + VERIFY_AWS_ENGINE_CERTS = var.verify_aws_engine_creds + } + + scripts = [abspath("${path.module}/../../../scripts/aws-verify-new-creds.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/read/identity.tf b/enos/modules/verify_secrets_engines/modules/read/identity.tf new file mode 100644 index 0000000..0f34796 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/read/identity.tf @@ -0,0 +1,56 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +// Read our testuser identity entity and verify that it matches our expected alias, groups, policy, +// and metadata. +resource "enos_remote_exec" "identity_verify_entity" { + for_each = var.hosts + + environment = { + ENTITY_ALIAS_ID = var.create_state.identity.data.entity_alias.id + ENTITY_GROUP_IDS = jsonencode([ + var.create_state.kv.data.identity_group_kv_writers.id, + var.create_state.identity.data.group_oidc_token_readers.id, + ]) + ENTITY_METADATA = jsonencode(var.create_state.identity.identity_entity_metadata) + ENTITY_NAME = var.create_state.identity.data.entity.name + ENTITY_POLICIES = jsonencode([var.create_state.auth.userpass.user.policy_name]) + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = local.user_login_data.auth.client_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/identity-verify-entity.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +// Read our OIDC key and role and verify that they have the correct configuration, TTLs, and algorithms. +resource "enos_remote_exec" "identity_verify_oidc" { + for_each = var.hosts + + environment = { + OIDC_ISSUER_URL = var.create_state.identity.oidc.issuer_url + OIDC_KEY_NAME = var.create_state.identity.oidc.key_name + OIDC_KEY_ROTATION_PERIOD = var.create_state.identity.oidc.key_rotation_period + OIDC_KEY_VERIFICATION_TTL = var.create_state.identity.oidc.key_verification_ttl + OIDC_KEY_ALGORITHM = var.create_state.identity.oidc.key_algorithm + OIDC_ROLE_NAME = var.create_state.identity.oidc.role_name + OIDC_ROLE_TTL = var.create_state.identity.oidc.role_ttl + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = local.user_login_data.auth.client_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/identity-verify-oidc.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/read/kv.tf b/enos/modules/verify_secrets_engines/modules/read/kv.tf new file mode 100644 index 0000000..6983e74 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/read/kv.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +resource "enos_remote_exec" "kv_get_verify_test_data" { + for_each = var.hosts + + environment = { + MOUNT = var.create_state.kv.mount + SECRET_PATH = "${var.create_state.kv.test.path_prefix}-${each.key}" + KEY = "${var.create_state.kv.test.path_prefix}-${each.key}" + KV_VERSION = var.create_state.kv.version + VALUE = "${var.create_state.kv.test.value_prefix}-${each.key}" + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = local.user_login_data.auth.client_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/../../scripts/kv-verify-value.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_secrets_engines/modules/read/main.tf b/enos/modules/verify_secrets_engines/modules/read/main.tf new file mode 100644 index 0000000..66a3c29 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/read/main.tf @@ -0,0 +1,67 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "create_state" { + description = "The state of the secrets engines from the 'create' module" +} + +variable "vault_addr" { + type = string + description = "The local vault API listen address" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" + default = null +} + +variable "verify_aws_secrets_engine" { + type = bool + description = <<-EOF + Whether or not we'll verify the AWS secrets engine. Due to the various security requirements in + Doormat managed AWS accounts, our implementation of the verification requires us to use a + an external 'DemoUser' role and associated policy in order to create additional users. This is + configured in vault_ci and vault_enterprise_ci but does not exist in all AWS accounts. As such, + it's disabled by default. + See: https://github.com/hashicorp/honeybee-templates/blob/main/templates/iam_policy/DemoUser.yaml + EOF + default = false +} + +variable "verify_aws_engine_creds" { + type = bool + default = true +} + +variable "verify_pki_certs" { + type = bool + description = "Flag to verify pki certificates" + default = true +} + +locals { + vault_bin_path = "${var.vault_install_dir}/vault" +} diff --git a/enos/modules/verify_secrets_engines/modules/read/pki.tf b/enos/modules/verify_secrets_engines/modules/read/pki.tf new file mode 100644 index 0000000..cde0cc9 --- /dev/null +++ b/enos/modules/verify_secrets_engines/modules/read/pki.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Verify PKI Certificate +resource "enos_remote_exec" "pki_verify_certificates" { + for_each = var.hosts + + environment = { + MOUNT = var.create_state.pki.mount + AUTH_PATH = "${var.create_state.auth.userpass.path}" + USERNAME = "${var.create_state.auth.userpass.user.name}" + PASSWORD = "${var.create_state.auth.userpass.user.password}" + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + COMMON_NAME = var.create_state.pki.common_name + ISSUER_NAME = var.create_state.pki.issuer_name + TTL = var.create_state.pki.ttl + TEST_DIR = var.create_state.pki.test_dir + VERIFY_PKI_CERTS = var.verify_pki_certs + } + + scripts = [abspath("${path.module}/../../scripts/pki-verify-certificates.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + diff --git a/enos/modules/verify_secrets_engines/scripts/auth-enable.sh b/enos/modules/verify_secrets_engines/scripts/auth-enable.sh new file mode 100644 index 0000000..5601715 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/auth-enable.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$AUTH_METHOD" ]] && fail "AUTH_METHOD env variable has not been set" +[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +"$binpath" auth enable -path="$AUTH_PATH" "$AUTH_METHOD" diff --git a/enos/modules/verify_secrets_engines/scripts/auth-ldap-write.sh b/enos/modules/verify_secrets_engines/scripts/auth-ldap-write.sh new file mode 100644 index 0000000..dac712b --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/auth-ldap-write.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" +[[ -z "$GROUPATTR" ]] && fail "GROUPATTR env variable has not been set" +[[ -z "$GROUPDN" ]] && fail "GROUPDN env variable has not been set" +[[ -z "$INSECURE_TLS" ]] && fail "INSECURE_TLS env variable has not been set" +[[ -z "$UPNDOMAIN" ]] && fail "UPNDOMAIN env variable has not been set" +[[ -z "$URL" ]] && fail "URL env variable has not been set" +[[ -z "$USERATTR" ]] && fail "USERATTR env variable has not been set" +[[ -z "$USERDN" ]] && fail "USERDN env variable has not been set" + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +"$binpath" write "auth/$AUTH_PATH/config" \ + url="$URL" \ + userdn="$USERDN" \ + userattr="$USERATTR" \ + groupdn="$GROUPDN" \ + groupattr="$GROUPATTR" \ + upndomain="$UPNDOMAIN" \ + insecure_tls="$INSECURE_TLS" diff --git a/enos/modules/verify_secrets_engines/scripts/auth-userpass-login.sh b/enos/modules/verify_secrets_engines/scripts/auth-userpass-login.sh new file mode 100644 index 0000000..31b756f --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/auth-userpass-login.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" +[[ -z "$PASSWORD" ]] && fail "PASSWORD env variable has not been set" +[[ -z "$USERNAME" ]] && fail "USERNAME env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +"$binpath" write "auth/$AUTH_PATH/login/$USERNAME" password="$PASSWORD" diff --git a/enos/modules/verify_secrets_engines/scripts/auth-userpass-write.sh b/enos/modules/verify_secrets_engines/scripts/auth-userpass-write.sh new file mode 100644 index 0000000..b8cca8b --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/auth-userpass-write.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" +[[ -z "$PASSWORD" ]] && fail "PASSWORD env variable has not been set" +[[ -z "$POLICIES" ]] && fail "POLICIES env variable has not been set" +[[ -z "$USERNAME" ]] && fail "USERNAME env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +"$binpath" write "auth/$AUTH_PATH/users/$USERNAME" password="$PASSWORD" policies="$POLICIES" diff --git a/enos/modules/verify_secrets_engines/scripts/aws-generate-roles.sh b/enos/modules/verify_secrets_engines/scripts/aws-generate-roles.sh new file mode 100755 index 0000000..806d45c --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/aws-generate-roles.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_AWS_ROLE" ]] && fail "VAULT_AWS_ROLE env variable has not been set" +[[ -z "$AWS_REGION" ]] && fail "AWS_REGION env variable has not been set" +[[ -z "$AWS_POLICY_ARN" ]] && fail "AWS_POLICY_ARN env variable has not been set" +[[ -z "$AWS_ROLE_ARN" ]] && fail "AWS_ROLE_ARN env variable has not been set" +[[ -z "$AWS_USER_NAME" ]] && fail "AWS_USER_NAME env variable has not been set" +[[ -z "$AWS_ACCESS_KEY_ID" ]] && fail "AWS_ACCESS_KEY_ID env variable has not been set" +[[ -z "$AWS_SECRET_ACCESS_KEY" ]] && fail "AWS_SECRET_ACCESS_KEY env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json + +while true; do + echo -e "Waiting for IAM user to be done setting up...\n" + # Fetch the IAM user creation date and convert it to a Unix timestamp + create_timestamp=$(aws iam get-user --user-name "${AWS_USER_NAME}" --query 'User.CreateDate' --output text | sed 's/\([+-][0-9]\{2\}:[0-9]\{2\}\)$//' | date -f - "+%s") + if (($(date +%s) - create_timestamp > 75)); then + break + fi + sleep 2 +done + +echo -e "Configuring Vault AWS \n" +USERNAME_TEMPLATE="{{ if (eq .Type \"STS\") }}{{ printf \"${AWS_USER_NAME}-%s-%s\" (random 20) (unix_time) | truncate 32 }}{{ else }}{{ printf \"${AWS_USER_NAME}-%s-%s\" (unix_time) (random 20) | truncate 60 }}{{ end }}" +"$binpath" write "${MOUNT}/config/root" access_key="${AWS_ACCESS_KEY_ID}" secret_key="${AWS_SECRET_ACCESS_KEY}" region="${AWS_REGION}" username_template="${USERNAME_TEMPLATE}" + +echo -e "Creating Role to create user \n" +"$binpath" write "aws/roles/${VAULT_AWS_ROLE}" \ + credential_type=iam_user \ + permissions_boundary_arn="${AWS_POLICY_ARN}" \ + policy_document=- << EOF +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": ["ec2:DescribeRegions"], + "Resource": ["*"] + } + ] +} +EOF + +echo -e "Verifying root config \n" +"$binpath" read "${MOUNT}/config/root" +ROOT_USERNAME_TEMPLATE=$("$binpath" read "${MOUNT}/config/root" | jq -r '.data.username_template') +[[ "$ROOT_USERNAME_TEMPLATE" == *"$AWS_USER_NAME"* ]] || fail "Uername Template does not include the current role" diff --git a/enos/modules/verify_secrets_engines/scripts/aws-verify-new-creds.sh b/enos/modules/verify_secrets_engines/scripts/aws-verify-new-creds.sh new file mode 100755 index 0000000..6484704 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/aws-verify-new-creds.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_AWS_ROLE" ]] && fail "VAULT_AWS_ROLE env variable has not been set" +[[ -z "$VERIFY_AWS_ENGINE_CERTS" ]] && fail "VERIFY_AWS_ENGINE_CERTS env variable has not been set" +[[ -z "$AWS_REGION" ]] && fail "AWS_REGION env variable has not been set" +[[ -z "$AWS_USER_NAME" ]] && fail "AWS_USER_NAME env variable has not been set" +[[ -z "$AWS_ACCESS_KEY_ID" ]] && fail "AWS_ACCESS_KEY_ID env variable has not been set" +[[ -z "$AWS_SECRET_ACCESS_KEY" ]] && fail "AWS_SECRET_ACCESS_KEY env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json + +if [ "${VERIFY_AWS_ENGINE_CERTS}" = false ]; then + echo "AWS Engine certificate verification is disabled. Skipping verification." + exit 0 +fi + +echo -e "Configuring Vault AWS \n" +USERNAME_TEMPLATE="{{ if (eq .Type \"STS\") }}{{ printf \"${AWS_USER_NAME}-%s-%s\" (random 20) (unix_time) | truncate 32 }}{{ else }}{{ printf \"${AWS_USER_NAME}-%s-%s\" (unix_time) (random 20) | truncate 60 }}{{ end }}" +"$binpath" write "${MOUNT}/config/root" access_key="${AWS_ACCESS_KEY_ID}" secret_key="${AWS_SECRET_ACCESS_KEY}" username_template="${USERNAME_TEMPLATE}" + +echo -e "Verifying root config \n" +"$binpath" read "${MOUNT}/config/root" +ROOT_USERNAME_TEMPLATE=$("$binpath" read "${MOUNT}/config/root" | jq -r '.data.username_template') +[[ "$ROOT_USERNAME_TEMPLATE" == *"$AWS_USER_NAME"* ]] || fail "Uername Template does not include the current role" + +echo -e "Verifying roles list \n" +"$binpath" list "${MOUNT}/roles" +ROLE=$("$binpath" list "${MOUNT}/roles" | jq -r '.[]') +[[ -z "$ROLE" ]] && fail "No AWS roles created!" + +echo -e "Generate New Credentials \n" +TEMP_IAM_USER=$("$binpath" read "${MOUNT}/creds/${VAULT_AWS_ROLE}") || fail "Failed to generate new credentials for iam user: ${VAULT_AWS_ROLE}" +TEMP_ACCESS_KEY=$(echo "${TEMP_IAM_USER}" | jq -r '.data.access_key') || fail "Failed to get access key from: ${VAULT_AWS_ROLE}" +if [[ -z "$TEMP_ACCESS_KEY" && "$TEMP_ACCESS_KEY" != "$AWS_USER_NAME" ]]; then + failed "The new access key is empty or is matching the old one: ${TEMP_ACCESS_KEY}" +fi diff --git a/enos/modules/verify_secrets_engines/scripts/identity-oidc-introspect-token.sh b/enos/modules/verify_secrets_engines/scripts/identity-oidc-introspect-token.sh new file mode 100644 index 0000000..0e6e1ea --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/identity-oidc-introspect-token.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PAYLOAD" ]] && fail "PAYLOAD env variable has not been set" +[[ -z "$ASSERT_ACTIVE" ]] && fail "ASSERT_ACTIVE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +if ! output=$("$binpath" write identity/oidc/introspect - <<< "$PAYLOAD" 2>&1); then + # Attempt to write our error on stdout as JSON as our consumers of the script expect it to be JSON + printf '{"data":{"error":"%s"}}' "$output" + # Fail on stderr with a human readable message + fail "failed to write payload to identity/oidc/introspect: payload=$PAYLOAD output=$output" +fi + +printf "%s\n" "$output" # Write our response output JSON to stdout +if ! jq -Me --argjson ACTIVE "$ASSERT_ACTIVE" '.data.active == $ACTIVE' <<< "$output" &> /dev/null; then + # Write a failure message on STDERR + fail "token active state is invalid, expected .data.active='$ASSERT_ACTIVE'" +fi diff --git a/enos/modules/verify_secrets_engines/scripts/identity-verify-entity.sh b/enos/modules/verify_secrets_engines/scripts/identity-verify-entity.sh new file mode 100644 index 0000000..2ee9503 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/identity-verify-entity.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$ENTITY_ALIAS_ID" ]] && fail "ENTITY_ALIAS_ID env variable has not been set" +[[ -z "$ENTITY_GROUP_IDS" ]] && fail "ENTITY_GROUP_IDS env variable has not been set" +[[ -z "$ENTITY_METADATA" ]] && fail "ENTITY_METADATA env variable has not been set" +[[ -z "$ENTITY_NAME" ]] && fail "ENTITY_NAME env variable has not been set" +[[ -z "$ENTITY_POLICIES" ]] && fail "ENTITY_POLICIES env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +if ! output=$("$binpath" read "identity/entity/name/$ENTITY_NAME" 2>&1); then + fail "failed to read identity/entity/name/$ENTITY_NAME: $output" +fi + +if ! jq -Mec --arg ALIAS "$ENTITY_ALIAS_ID" '.data.aliases[0].id == $ALIAS' <<< "$output"; then + fail "entity alias ID does not match, expected: $ENTITY_ALIAS_ID, got: $(jq -Mrc '.data.aliases' <<< "$output")" +fi + +if ! jq -Mec --argjson GROUPS "$ENTITY_GROUP_IDS" '.data.group_ids | sort as $have | $GROUPS | sort as $want | $have == $want' <<< "$output"; then + fail "entity group ID's do not match, expected: $ENTITY_GROUP_IDS, got: $(jq -Mrc '.data.group_ids' <<< "$output")" +fi + +if ! jq -Mec --argjson METADATA "$ENTITY_METADATA" '.data.metadata == $METADATA' <<< "$output"; then + fail "entity metadata does not match, expected: $ENTITY_METADATA, got: $(jq -Mrc '.data.metadata' <<< "$output")" +fi + +if ! jq -Mec --argjson POLICIES "$ENTITY_POLICIES" '.data.policies == $POLICIES' <<< "$output"; then + fail "entity policies do not match, expected: $ENTITY_POLICIES, got: $(jq -Mrc '.data.policies' <<< "$output")" +fi diff --git a/enos/modules/verify_secrets_engines/scripts/identity-verify-oidc.sh b/enos/modules/verify_secrets_engines/scripts/identity-verify-oidc.sh new file mode 100644 index 0000000..3b09557 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/identity-verify-oidc.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$OIDC_ISSUER_URL" ]] && fail "OIDC_ISSUER_URL env variable has not been set" +[[ -z "$OIDC_KEY_NAME" ]] && fail "OIDC_KEY_NAME env variable has not been set" +[[ -z "$OIDC_KEY_ROTATION_PERIOD" ]] && fail "OIDC_KEY_ROTATION_PERIOD env variable has not been set" +[[ -z "$OIDC_KEY_VERIFICATION_TTL" ]] && fail "OIDC_KEY_VERIFICATION_TTL env variable has not been set" +[[ -z "$OIDC_KEY_ALGORITHM" ]] && fail "OIDC_KEY_ALGORITHM env variable has not been set" +[[ -z "$OIDC_ROLE_NAME" ]] && fail "OIDC_ROLE_NAME env variable has not been set" +[[ -z "$OIDC_ROLE_TTL" ]] && fail "OIDC_ROLE_TTL env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json + +# Verify that we have the correct issuer URL +if ! cfg=$("$binpath" read identity/oidc/config); then + fail "failed to read identity/oidc/config: $cfg" +elif ! jq -Merc --arg URL "$OIDC_ISSUER_URL" '.data.issuer == $URL' <<< "$cfg"; then + fail "oidc issuer URL is incorrect, expected: $OIDC_ISSUER_URL, got $(jq -Mrc '.data.issuer' <<< "$cfg")" +fi + +# Verify that our token algorithm, rotation period and verification TTL are correct +if ! key_res=$("$binpath" read "identity/oidc/key/$OIDC_KEY_NAME"); then + fail "failed to read identity/oidc/key/$OIDC_KEY_NAME: $key_res" +fi + +if ! jq -Merc --arg ALG "$OIDC_KEY_ALGORITHM" '.data.algorithm == $ALG' <<< "$key_res"; then + fail "oidc token algorithm is incorrect, expected: $OIDC_KEY_ALGORITHM, got $(jq -Mrc '.data.algorithm' <<< "$key_res")" +fi + +if ! jq -Merc --argjson RP "$OIDC_KEY_ROTATION_PERIOD" '.data.rotation_period == $RP' <<< "$key_res"; then + fail "oidc token rotation_period is incorrect, expected: $OIDC_KEY_ROTATION_PERIOD, got $(jq -Mrc '.data.rotation_period' <<< "$key_res")" +fi + +if ! jq -Merc --argjson TTL "$OIDC_KEY_VERIFICATION_TTL" '.data.verification_ttl == $TTL' <<< "$key_res"; then + fail "oidc token verification_ttl is incorrect, expected: $OIDC_KEY_VERIFICATION_TTL, got $(jq -Mrc '.data.verification_ttl' <<< "$key_res")" +fi + +# Verify that our role key and TTL are correct. +if ! role_res=$("$binpath" read "identity/oidc/role/$OIDC_ROLE_NAME"); then + fail "failed to read identity/oidc/role/$OIDC_ROLE_NAME: $role_res" +fi + +if ! jq -Merc --arg KEY "$OIDC_KEY_NAME" '.data.key == $KEY' <<< "$role_res"; then + fail "oidc role key is incorrect, expected: $OIDC_KEY_NAME, got $(jq -Mrc '.data.key' <<< "$role_res")" +fi + +if ! jq -Merc --argjson TTL "$OIDC_ROLE_TTL" '.data.ttl == $TTL' <<< "$role_res"; then + fail "oidc role ttl is incorrect, expected: $OIDC_ROLE_TTL, got $(jq -Mrc '.data.ttl' <<< "$role_res")" +fi diff --git a/enos/modules/verify_secrets_engines/scripts/kv-put.sh b/enos/modules/verify_secrets_engines/scripts/kv-put.sh new file mode 100644 index 0000000..46e858f --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/kv-put.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$KEY" ]] && fail "KEY env variable has not been set" +[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" +[[ -z "$SECRET_PATH" ]] && fail "SECRET_PATH env variable has not been set" +[[ -z "$VALUE" ]] && fail "VALUE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json + +"$binpath" kv put -mount="$MOUNT" "$SECRET_PATH" "$KEY=$VALUE" diff --git a/enos/modules/verify_secrets_engines/scripts/kv-verify-value.sh b/enos/modules/verify_secrets_engines/scripts/kv-verify-value.sh new file mode 100644 index 0000000..64d6f29 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/kv-verify-value.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" +[[ -z "$SECRET_PATH" ]] && fail "SECRET_PATH env variable has not been set" +[[ -z "$KEY" ]] && fail "KEY env variable has not been set" +[[ -z "$VALUE" ]] && fail "VALUE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +if res=$("$binpath" kv get -mount="$MOUNT" "$SECRET_PATH"); then + # Note that this expects KVv2 response payloads. KVv1 does not include doubly nested .data + if jq -Merc --arg VALUE "$VALUE" --arg KEY "$KEY" '.data.data[$KEY] == $VALUE' <<< "$res"; then + printf "kv %s/%s %s=%s is valid\n" "$MOUNT" "$SECRET_PATH" "$KEY" "$VALUE" + exit 0 + fi + fail "kv $MOUNT/$SECRET_PATH $KEY=$VALUE invalid! Got: $(jq -Mrc --arg KEY "$KEY" '.data[$KEY]' <<< "$res")" +else + fail "failed to read kv data for $MOUNT/$SECRET_PATH: $res" +fi diff --git a/enos/modules/verify_secrets_engines/scripts/pki-issue-certificates.sh b/enos/modules/verify_secrets_engines/scripts/pki-issue-certificates.sh new file mode 100755 index 0000000..f4592eb --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/pki-issue-certificates.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$COMMON_NAME" ]] && fail "COMMON_NAME env variable has not been set" +[[ -z "$ISSUER_NAME" ]] && fail "ISSUER_NAME env variable has not been set" +[[ -z "$TTL" ]] && fail "TTL env variable has not been set" +[[ -z "$TEST_DIR" ]] && fail "TEST_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" +export VAULT_FORMAT=json + +# ------ Generate and sign certificate ------ +CA_NAME="${MOUNT}-ca.pem" +ISSUED_CERT_NAME="${MOUNT}-issued.pem" +ROLE_NAME="${COMMON_NAME}-role" +SUBJECT="test.${COMMON_NAME}" +TMP_TTL="1h" +rm -rf "${TEST_DIR}" +mkdir "${TEST_DIR}" + +## Setting AIA fields for Certificate +"$binpath" write "${MOUNT}/config/urls" issuing_certificates="${VAULT_ADDR}/v1/pki/ca" crl_distribution_points="${VAULT_ADDR}/v1/pki/crl" + +# Generating CA Certificate +"$binpath" write "${MOUNT}/root/generate/internal" common_name="${COMMON_NAME}.com" issuer_name="${ISSUER_NAME}" ttl="${TTL}" | jq -r '.data.issuing_ca' > "${TEST_DIR}/${CA_NAME}" +# Creating a role +"$binpath" write "${MOUNT}/roles/${ROLE_NAME}" allowed_domains="${COMMON_NAME}.com" allow_subdomains=true max_ttl="${TMP_TTL}" +# Issuing Signed Certificate +"$binpath" write "${MOUNT}/issue/${ROLE_NAME}" common_name="${SUBJECT}.com" ttl="${TMP_TTL}" | jq -r '.data.certificate' > "${TEST_DIR}/${ISSUED_CERT_NAME}" + +# ------ Generate and sign intermediate ------ +INTERMEDIATE_COMMON_NAME="intermediate-${COMMON_NAME}" +INTERMEDIATE_ISSUER_NAME="intermediate-${ISSUER_NAME}" +INTERMEDIATE_ROLE_NAME="intermediate-${COMMON_NAME}-role" +INTERMEDIATE_CA_NAME="${MOUNT}-${INTERMEDIATE_COMMON_NAME}.pem" +INTERMEDIATE_SIGNED_NAME="${MOUNT}-${INTERMEDIATE_COMMON_NAME}-ca.pem" +INTERMEDIATE_ISSUED_NAME="${MOUNT}-${INTERMEDIATE_COMMON_NAME}-issued.pem" + +# Generate Intermediate CSR +"$binpath" write "${MOUNT}/intermediate/generate/internal" common_name="${INTERMEDIATE_COMMON_NAME}.com" issuer_name="${INTERMEDIATE_ISSUER_NAME}" ttl="${TTL}" | jq -r '.data.csr' > "${TEST_DIR}/${INTERMEDIATE_CA_NAME}" +# Creating a intermediate role +"$binpath" write "${MOUNT}/roles/${INTERMEDIATE_ROLE_NAME}" allowed_domains="${INTERMEDIATE_COMMON_NAME}.com" allow_subdomains=true max_ttl="${TMP_TTL}" +# Sign Intermediate Certificate +"$binpath" write "${MOUNT}/root/sign-intermediate" csr="@${TEST_DIR}/${INTERMEDIATE_CA_NAME}" format=pem_bundle ttl="${TMP_TTL}" | jq -r '.data.certificate' > "${TEST_DIR}/${INTERMEDIATE_SIGNED_NAME}" +# Import Signed Intermediate Certificate into Vault +"$binpath" write "${MOUNT}/intermediate/set-signed" certificate="@${TEST_DIR}/${INTERMEDIATE_SIGNED_NAME}" +# Issuing Signed Certificate with the intermediate role +"$binpath" write "${MOUNT}/issue/${INTERMEDIATE_ROLE_NAME}" common_name="www.${INTERMEDIATE_COMMON_NAME}.com" ttl="${TMP_TTL}" | jq -r '.data.certificate' > "${TEST_DIR}/${INTERMEDIATE_ISSUED_NAME}" diff --git a/enos/modules/verify_secrets_engines/scripts/pki-verify-certificates.sh b/enos/modules/verify_secrets_engines/scripts/pki-verify-certificates.sh new file mode 100755 index 0000000..e738bd7 --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/pki-verify-certificates.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" +[[ -z "$USERNAME" ]] && fail "USERNAME env variable has not been set" +[[ -z "$PASSWORD" ]] && fail "PASSWORD env variable has not been set" +[[ -z "$VERIFY_PKI_CERTS" ]] && fail "VERIFY_CERT_DETAILS env variable has not been set" +[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$COMMON_NAME" ]] && fail "COMMON_NAME env variable has not been set" +[[ -z "$ISSUER_NAME" ]] && fail "ISSUER_NAME env variable has not been set" +[[ -z "$TTL" ]] && fail "TTL env variable has not been set" +[[ -z "$TEST_DIR" ]] && fail "TEST_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" || fail "The certificate appears to be improperly configured or contains errors" +export VAULT_FORMAT=json + +# Log in so this vault instance have access to the primary pki roles, issuers, and etc +if [ "${VERIFY_PKI_CERTS}" = false ]; then + echo "Logging in Vault with username and password: ${USERNAME}" + VAULT_TOKEN=$("$binpath" write "auth/$AUTH_PATH/login/$USERNAME" password="$PASSWORD" | jq -r '.auth.client_token') +fi + +# Verifying List Roles +ROLE=$("$binpath" list "${MOUNT}/roles" | jq -r '.[]') +[[ -z "$ROLE" ]] && fail "No roles created!" + +# Verifying List Issuer +ISSUER=$("$binpath" list "${MOUNT}/issuers" | jq -r '.[]') +[[ -z "$ISSUER" ]] && fail "No issuers created!" + +# Verifying Root CA Certificate +ROOT_CA_CERT=$("$binpath" read pki/cert/ca | jq -r '.data.certificate') +[[ -z "$ROOT_CA_CERT" ]] && fail "No root ca certificate generated" + +# Verifying Certificates +if [ "${VERIFY_PKI_CERTS}" = true ]; then + if [ ! -d "${TEST_DIR}" ]; then + echo "Directory does not exist. Creating it now." + mkdir -p "${TEST_DIR}" # Need to create this directory for Enterprise test + fi + TMP_FILE="tmp-vault-cert.pem" + + # Verify List Certificate + VAULT_CERTS=$("$binpath" list "${MOUNT}/certs" | jq -r '.[]') + [[ -z "$VAULT_CERTS" ]] && fail "VAULT_CERTS should include vault certificates" + for CERT in $VAULT_CERTS; do + echo "Getting certificate from Vault PKI: ${CERT}" + "$binpath" read "${MOUNT}/cert/${CERT}" | jq -r '.data.certificate' > "${TEST_DIR}/${TMP_FILE}" + echo "Verifying certificate contents..." + openssl x509 -in "${TEST_DIR}/${TMP_FILE}" -text -noout || fail "The certificate appears to be improperly configured or contains errors" + CURR_CERT_SERIAL=$(echo "${CERT}" | tr -d ':' | tr '[:lower:]' '[:upper:]') + if ! TMP_CERT_SUBJECT=$(openssl x509 -in "${TEST_DIR}/${TMP_FILE}" -noout -subject | cut -d '=' -f2-); then + fail "failed to read certificate subject: $TMP_CERT_SUBJECT" + fi + TMP_CERT_ISSUER=$(openssl x509 -in "${TEST_DIR}/${TMP_FILE}" -noout -issuer | cut -d '=' -f2-) + TMP_CERT_SERIAL=$(openssl x509 -in "${TEST_DIR}/${TMP_FILE}" -noout -serial | cut -d '=' -f2-) + [[ "${TMP_CERT_SUBJECT}" == *"${COMMON_NAME}.com"* ]] || fail "Subject is incorrect. Actual Subject: ${TMP_CERT_SUBJECT}" + [[ "${TMP_CERT_ISSUER}" == *"${COMMON_NAME}.com"* ]] || fail "Issuer is incorrect. Actual Issuer: ${TMP_CERT_ISSUER}" + [[ "${TMP_CERT_SERIAL}" == *"${CURR_CERT_SERIAL}"* ]] || fail "Certificate Serial is incorrect. Actual certificate Serial: ${CURR_CERT_SERIAL},${TMP_CERT_SERIAL}" + echo "Successfully verified certificate contents." + + # Setting up variables for types of certificates + IS_CA=$(openssl x509 -in "${TEST_DIR}/${TMP_FILE}" -text -noout | grep -q "CA:TRUE" && echo "TRUE" || echo "FALSE") + if [[ "${IS_CA}" == "TRUE" ]]; then + if [[ "${COMMON_NAME}.com" == "${TMP_CERT_SUBJECT}" ]]; then + CA_CERT=${CERT} + elif [[ "intermediate-${COMMON_NAME}.com" == "${TMP_CERT_SUBJECT}" ]]; then + INTERMEDIATE_CA_CERT=${CERT} + fi + elif [[ "${IS_CA}" == "FALSE" ]]; then + INTERMEDIATE_ISSUED_CERT=${CERT} + fi + + done + + echo "Verifying that Vault PKI has successfully generated valid certificates for the CA, Intermediate CA, and issued certificates..." + if [[ -n "${CA_CERT}" ]] && [[ -n "${INTERMEDIATE_CA_CERT}" ]] && [[ -n "${INTERMEDIATE_ISSUED_CERT}" ]]; then + CA_NAME="ca.pem" + INTERMEDIATE_CA_NAME="intermediate-ca.pem" + ISSUED_NAME="issued.pem" + "$binpath" read "${MOUNT}/cert/${CA_CERT}" | jq -r '.data.certificate' > "${TEST_DIR}/${CA_NAME}" + "$binpath" read "${MOUNT}/cert/${INTERMEDIATE_CA_CERT}" | jq -r '.data.certificate' > "${TEST_DIR}/${INTERMEDIATE_CA_NAME}" + "$binpath" read "${MOUNT}/cert/${INTERMEDIATE_ISSUED_CERT}" | jq -r '.data.certificate' > "${TEST_DIR}/${ISSUED_NAME}" + openssl verify --CAfile "${TEST_DIR}/${CA_NAME}" -untrusted "${TEST_DIR}/${INTERMEDIATE_CA_NAME}" "${TEST_DIR}/${ISSUED_NAME}" || fail "One or more Certificate is not valid." + else + echo "CA Cert: ${CA_CERT}, Intermedidate Cert: ${INTERMEDIATE_CA_CERT}, Issued Cert: ${INTERMEDIATE_ISSUED_CERT}" + fi + + echo "Revoking certificate: ${INTERMEDIATE_ISSUED_CERT}" + "$binpath" write "${MOUNT}/revoke" serial_number="${INTERMEDIATE_ISSUED_CERT}" || fail "Could not revoke certificate ${INTERMEDIATE_ISSUED_CERT}" + echo "Verifying Revoked Certificate" + REVOKED_CERT_FROM_LIST=$("$binpath" list "${MOUNT}/certs/revoked" | jq -r '.[0]') + [[ "${INTERMEDIATE_ISSUED_CERT}" == "${REVOKED_CERT_FROM_LIST}" ]] || fail "Expected: ${INTERMEDIATE_ISSUED_CERT}, actual: ${REVOKED_CERT_FROM_LIST}" + echo "Successfully verified revoked certificate" +else + echo "Skipping verify certificates!" +fi diff --git a/enos/modules/verify_secrets_engines/scripts/policy-write.sh b/enos/modules/verify_secrets_engines/scripts/policy-write.sh new file mode 100644 index 0000000..18e011c --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/policy-write.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$POLICY_NAME" ]] && fail "POLICY_NAME env variable has not been set" +[[ -z "$POLICY_CONFIG" ]] && fail "POLICY_CONFIG env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +"$binpath" policy write "$POLICY_NAME" - <<< "$POLICY_CONFIG" diff --git a/enos/modules/verify_secrets_engines/scripts/read.sh b/enos/modules/verify_secrets_engines/scripts/read.sh new file mode 100644 index 0000000..b522c6f --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/read.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$REQPATH" ]] && fail "REQPATH env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +"$binpath" read "$REQPATH" diff --git a/enos/modules/verify_secrets_engines/scripts/secrets-enable.sh b/enos/modules/verify_secrets_engines/scripts/secrets-enable.sh new file mode 100644 index 0000000..0e8174a --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/secrets-enable.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" +[[ -z "$ENGINE" ]] && fail "MOUNT env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +eval "$binpath" secrets enable -path="$MOUNT" "$SECRETS_META" "$ENGINE" diff --git a/enos/modules/verify_secrets_engines/scripts/write-payload.sh b/enos/modules/verify_secrets_engines/scripts/write-payload.sh new file mode 100644 index 0000000..922fb2e --- /dev/null +++ b/enos/modules/verify_secrets_engines/scripts/write-payload.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$REQPATH" ]] && fail "REQPATH env variable has not been set" +[[ -z "$PAYLOAD" ]] && fail "PAYLOAD env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_FORMAT=json +if output=$("$binpath" write "$REQPATH" - <<< "$PAYLOAD" 2>&1); then + printf "%s\n" "$output" +else + fail "failed to write payload: path=$REQPATH payload=$PAYLOAD out=$output" +fi From ffff0e5a1fa142c7285a0f2219bc61eba150537b Mon Sep 17 00:00:00 2001 From: Hamza Shili Date: Wed, 25 Jun 2025 15:29:03 -0700 Subject: [PATCH 02/26] ignore the .enos dir --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index dd5b666..40ea26c 100644 --- a/.gitignore +++ b/.gitignore @@ -81,3 +81,6 @@ tmp/ scripts/custom.sh +# enos +/enos/.enos/* + From c2a4127ff967e542a58fc4f99b30a249d67d0828 Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Wed, 25 Jun 2025 15:58:10 -0700 Subject: [PATCH 03/26] copy target_ec2_instances module from enos folder in vault --- enos/modules/target_ec2_instances/locals.tf | 11 + enos/modules/target_ec2_instances/main.tf | 223 ++++++++++++++++++ enos/modules/target_ec2_instances/outputs.tf | 11 + .../modules/target_ec2_instances/variables.tf | 85 +++++++ 4 files changed, 330 insertions(+) create mode 100644 enos/modules/target_ec2_instances/locals.tf create mode 100644 enos/modules/target_ec2_instances/main.tf create mode 100644 enos/modules/target_ec2_instances/outputs.tf create mode 100644 enos/modules/target_ec2_instances/variables.tf diff --git a/enos/modules/target_ec2_instances/locals.tf b/enos/modules/target_ec2_instances/locals.tf new file mode 100644 index 0000000..8831b7e --- /dev/null +++ b/enos/modules/target_ec2_instances/locals.tf @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + hosts = { for idx in range(var.instance_count) : idx => { + ipv6 = try(aws_instance.targets[idx].ipv6_addresses[0], "") + public_ip = aws_instance.targets[idx].public_ip + private_ip = aws_instance.targets[idx].private_ip + } + } +} diff --git a/enos/modules/target_ec2_instances/main.tf b/enos/modules/target_ec2_instances/main.tf new file mode 100644 index 0000000..649b871 --- /dev/null +++ b/enos/modules/target_ec2_instances/main.tf @@ -0,0 +1,223 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_ami" "ami" { + filter { + name = "image-id" + values = [var.ami_id] + } +} + +data "aws_ec2_instance_type_offerings" "instance" { + filter { + name = "instance-type" + values = [local.instance_type] + } + + location_type = "availability-zone" +} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-name" + values = data.aws_ec2_instance_type_offerings.instance.locations + } +} + +data "aws_subnets" "vpc" { + filter { + name = "availability-zone" + values = data.aws_availability_zones.available.names + } + + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + dynamic "statement" { + for_each = var.seal_key_names + + content { + resources = [statement.value] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } + } +} + +data "aws_iam_policy_document" "target_instance_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +locals { + cluster_name = coalesce(var.cluster_name, random_string.cluster_name.result) + instance_type = local.instance_types[data.aws_ami.ami.architecture] + instance_types = { + "arm64" = var.instance_types["arm64"] + "x86_64" = var.instance_types["amd64"] + } + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" +} + +resource "random_string" "cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +resource "aws_iam_role" "target_instance_role" { + name = "${local.name_prefix}-instance-role" + assume_role_policy = data.aws_iam_policy_document.target_instance_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-instance-profile" + role = aws_iam_role.target_instance_role.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-role-policy" + role = aws_iam_role.target_instance_role.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-sg" + description = "Target instance security group" + vpc_id = var.vpc_id + + # External ingress + dynamic "ingress" { + for_each = var.ports_ingress + + content { + from_port = ingress.value.port + to_port = ingress.value.port + protocol = ingress.value.protocol + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + ipv6_cidr_blocks = data.aws_vpc.vpc.ipv6_cidr_block != "" ? [data.aws_vpc.vpc.ipv6_cidr_block] : null + } + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = ["::/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_instance" "targets" { + for_each = local.instances + + ami = var.ami_id + iam_instance_profile = aws_iam_instance_profile.target.name + // Some scenarios (autopilot, pr_replication) shutdown instances to simulate failure. In those + // cases we should terminate the instance entirely rather than get stuck in stopped limbo. + instance_initiated_shutdown_behavior = "terminate" + instance_type = local.instance_type + key_name = var.ssh_keypair + subnet_id = data.aws_subnets.vpc.ids[tonumber(each.key) % length(data.aws_subnets.vpc.ids)] + vpc_security_group_ids = [aws_security_group.target.id] + + root_block_device { + encrypted = true + } + + metadata_options { + http_tokens = "required" + http_endpoint = "enabled" + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-${var.cluster_tag_key}-instance-target" + "${var.cluster_tag_key}" = local.cluster_name + }, + ) +} + +module "disable_selinux" { + depends_on = [aws_instance.targets] + source = "../disable_selinux" + count = var.disable_selinux == true ? 1 : 0 + + hosts = local.hosts +} diff --git a/enos/modules/target_ec2_instances/outputs.tf b/enos/modules/target_ec2_instances/outputs.tf new file mode 100644 index 0000000..674c5cf --- /dev/null +++ b/enos/modules/target_ec2_instances/outputs.tf @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 instance target hosts" + value = local.hosts +} diff --git a/enos/modules/target_ec2_instances/variables.tf b/enos/modules/target_ec2_instances/variables.tf new file mode 100644 index 0000000..9718f2f --- /dev/null +++ b/enos/modules/target_ec2_instances/variables.tf @@ -0,0 +1,85 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The key name for the cluster tag" + default = "TargetCluster" +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { "Project" : "vault-ci" } +} + +variable "ports_ingress" { + description = "Ports mappings to allow for ingress" + type = list(object({ + description = string + port = number + protocol = string + })) +} + +variable "disable_selinux" { + description = "Optionally disable SELinux for certain distros/versions" + type = bool + default = true +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "instance_types" { + description = "The instance types to use depending on architecture" + type = object({ + amd64 = string + arm64 = string + }) + default = { + amd64 = "t3a.medium" + arm64 = "t4g.medium" + } +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "seal_key_names" { + type = list(string) + description = "The key management seal key names" + default = [] +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} From 93ab1f703179de64962e49a0e35eabc83119322b Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Fri, 18 Jul 2025 09:21:21 -0700 Subject: [PATCH 04/26] add remote reference of tf moduled borrowed from Vault --- enos/enos-modules.hcl | 399 +++++++++++++++ enos/modules/artifact/metadata/main.tf | 229 --------- .../autopilot_upgrade_storageconfig/main.tf | 10 - enos/modules/backend_consul/main.tf | 56 --- enos/modules/backend_consul/outputs.tf | 18 - enos/modules/backend_consul/variables.tf | 77 --- enos/modules/backend_raft/main.tf | 70 --- .../build_artifactory_artifact/main.tf | 101 ---- .../modules/build_artifactory_package/main.tf | 115 ----- enos/modules/build_crt/main.tf | 37 -- enos/modules/build_local/main.tf | 69 --- enos/modules/build_local/scripts/build.sh | 24 - enos/modules/choose_follower_host/main.tf | 17 - enos/modules/create_vpc/main.tf | 114 ----- enos/modules/create_vpc/outputs.tf | 22 - enos/modules/create_vpc/variables.tf | 37 -- enos/modules/disable_selinux/main.tf | 31 -- .../scripts/make-selinux-permissive.sh | 18 - enos/modules/ec2_info/main.tf | 264 ---------- .../generate_dr_operation_token/main.tf | 82 --- .../scripts/configure-vault-dr-primary.sh | 50 -- .../generate_failover_secondary_token/main.tf | 98 ---- .../generate-failover-secondary-token.sh | 33 -- .../generate_secondary_public_key/main.tf | 77 --- enos/modules/generate_secondary_token/main.tf | 86 ---- enos/modules/get_local_metadata/main.tf | 58 --- .../get_local_metadata/scripts/build_date.sh | 9 - .../get_local_metadata/scripts/version.sh | 97 ---- enos/modules/install_packages/main.tf | 136 ----- .../install_packages/scripts/add-repos.sh | 84 ---- .../scripts/install-packages.sh | 105 ---- .../scripts/synchronize-repos.sh | 151 ------ enos/modules/k8s_deploy_vault/main.tf | 165 ------- enos/modules/k8s_deploy_vault/variables.tf | 42 -- .../k8s_vault_verify_replication/main.tf | 42 -- .../scripts/smoke-verify-replication.sh | 27 - .../k8s_vault_verify_replication/variables.tf | 30 -- enos/modules/k8s_vault_verify_ui/main.tf | 45 -- .../scripts/smoke-verify-ui.sh | 17 - enos/modules/k8s_vault_verify_ui/variables.tf | 25 - enos/modules/k8s_vault_verify_version/main.tf | 51 -- .../scripts/get-status.sh | 10 - .../scripts/smoke-verify-version.sh | 45 -- .../k8s_vault_verify_version/variables.tf | 62 --- .../k8s_vault_verify_write_data/main.tf | 53 -- .../k8s_vault_verify_write_data/variables.tf | 36 -- enos/modules/load_docker_image/main.tf | 53 -- enos/modules/local_kind_cluster/main.tf | 53 -- enos/modules/read_license/main.tf | 8 - enos/modules/replication_data/main.tf | 51 -- enos/modules/restart_vault/main.tf | 51 -- .../restart_vault/scripts/restart-vault.sh | 48 -- enos/modules/seal_awskms/main.tf | 68 --- enos/modules/seal_pkcs11/main.tf | 133 ----- enos/modules/seal_shamir/main.tf | 27 - enos/modules/shutdown_multiple_nodes/main.tf | 29 -- enos/modules/shutdown_node/main.tf | 29 -- .../modules/softhsm_create_vault_keys/main.tf | 129 ----- .../scripts/create-keys.sh | 82 --- .../scripts/get-keys.sh | 20 - .../softhsm_distribute_vault_keys/main.tf | 110 ----- .../scripts/distribute-token.sh | 31 -- enos/modules/softhsm_init/main.tf | 83 ---- .../softhsm_init/scripts/init-softhsm.sh | 30 -- enos/modules/softhsm_install/main.tf | 116 ----- .../scripts/find-shared-object.sh | 26 - enos/modules/start_vault/main.tf | 276 ----------- enos/modules/start_vault/outputs.tf | 63 --- enos/modules/start_vault/variables.tf | 193 -------- enos/modules/stop_vault/main.tf | 39 -- enos/modules/target_ec2_fleet/main.tf | 339 ------------- enos/modules/target_ec2_fleet/outputs.tf | 15 - enos/modules/target_ec2_fleet/variables.tf | 107 ---- enos/modules/target_ec2_shim/main.tf | 52 -- enos/modules/target_ec2_spot_fleet/main.tf | 466 ------------------ enos/modules/target_ec2_spot_fleet/outputs.tf | 15 - .../target_ec2_spot_fleet/variables.tf | 96 ---- enos/modules/vault_agent/main.tf | 91 ---- .../scripts/set-up-approle-and-agent.sh | 99 ---- enos/modules/vault_cluster/main.tf | 414 ---------------- enos/modules/vault_cluster/outputs.tf | 102 ---- .../scripts/create-audit-log-dir.sh | 40 -- .../scripts/enable-audit-devices.sh | 53 -- .../scripts/set-up-login-shell-profile.sh | 57 --- .../scripts/start-audit-socket-listener.sh | 92 ---- enos/modules/vault_cluster/variables.tf | 291 ----------- .../vault_failover_demote_dr_primary/main.tf | 63 --- .../main.tf | 69 --- .../vault_failover_update_dr_primary/main.tf | 76 --- enos/modules/vault_get_cluster_ips/main.tf | 185 ------- .../scripts/get-follower-ipv4s.sh | 85 ---- .../scripts/get-follower-ipv6s.sh | 87 ---- .../scripts/get-leader-ipv4.sh | 66 --- .../scripts/get-leader-ipv6.sh | 66 --- enos/modules/vault_proxy/main.tf | 100 ---- .../scripts/set-up-approle-and-proxy.sh | 86 ---- enos/modules/vault_proxy/scripts/use-proxy.sh | 36 -- .../vault_raft_remove_node_and_verify/main.tf | 125 ----- enos/modules/vault_raft_remove_peer/main.tf | 80 --- .../scripts/raft-remove-peer.sh | 48 -- enos/modules/vault_setup_dr_primary/main.tf | 61 --- .../vault_setup_dr_primary/scripts/enable.sh | 17 - enos/modules/vault_setup_perf_primary/main.tf | 60 --- .../scripts/configure-vault-pr-primary.sh | 17 - .../vault_setup_replication_secondary/main.tf | 114 ----- .../scripts/wait-for-leader-ready.sh | 65 --- enos/modules/vault_step_down/main.tf | 50 -- .../scripts/operator-step-down.sh | 19 - enos/modules/vault_test_ui/main.tf | 34 -- enos/modules/vault_test_ui/outputs.tf | 15 - enos/modules/vault_test_ui/scripts/test_ui.sh | 12 - enos/modules/vault_test_ui/variables.tf | 34 -- .../main.tf | 129 ----- .../scripts/unseal-node.sh | 37 -- .../scripts/wait-until-sealed.sh | 29 -- enos/modules/vault_upgrade/main.tf | 195 -------- .../scripts/maybe-remove-old-unit-file.sh | 39 -- .../modules/vault_verify_agent_output/main.tf | 44 -- .../scripts/verify-vault-agent-output.sh | 15 - enos/modules/vault_verify_autopilot/main.tf | 64 --- .../scripts/smoke-verify-autopilot.sh | 42 -- .../vault_verify_billing_start_date/main.tf | 64 --- .../scripts/verify-billing-start.sh | 98 ---- enos/modules/vault_verify_default_lcq/main.tf | 66 --- .../scripts/smoke-verify-default-lcq.sh | 49 -- .../vault_verify_dr_replication/main.tf | 117 ----- .../scripts/verify-replication-status.sh | 89 ---- .../main.tf | 117 ----- .../scripts/verify-replication-status.sh | 97 ---- .../vault_verify_raft_auto_join_voter/main.tf | 76 --- .../scripts/verify-raft-auto-join-voter.sh | 49 -- .../modules/vault_verify_removed_node/main.tf | 246 --------- .../scripts/verify_manual_rejoin_fails.sh | 21 - .../scripts/verify_raft_remove_peer.sh | 60 --- .../scripts/verify_unseal_fails.sh | 22 - .../vault_verify_removed_node_shim/main.tf | 89 ---- enos/modules/vault_verify_replication/main.tf | 48 -- .../scripts/smoke-verify-replication.sh | 28 -- enos/modules/vault_verify_ui/main.tf | 41 -- .../scripts/smoke-verify-ui.sh | 20 - enos/modules/vault_verify_undo_logs/main.tf | 77 --- .../scripts/smoke-verify-undo-logs.sh | 35 -- enos/modules/vault_verify_version/main.tf | 100 ---- .../scripts/verify-cli-version.sh | 55 --- .../scripts/verify-cluster-version.sh | 37 -- .../vault_wait_for_cluster_unsealed/main.tf | 62 --- .../scripts/verify-vault-node-unsealed.sh | 58 --- enos/modules/vault_wait_for_leader/main.tf | 82 --- .../scripts/wait-for-leader.sh | 96 ---- .../vault_wait_for_seal_rewrap/main.tf | 78 --- .../scripts/wait-for-seal-rewrap.sh | 72 --- enos/modules/verify_log_secrets/main.tf | 96 ---- .../scripts/scan_logs_for_secrets.sh | 72 --- enos/modules/verify_seal_type/main.tf | 54 -- .../scripts/verify-seal-type.sh | 37 -- .../modules/create/auth.tf | 233 --------- .../modules/create/aws.tf | 21 - .../modules/create/aws/aws.tf | 158 ------ .../modules/create/identity.tf | 380 -------------- .../modules/create/kv.tf | 131 ----- .../modules/create/main.tf | 68 --- .../modules/create/pki.tf | 69 --- .../modules/read/auth.tf | 24 - .../modules/read/aws.tf | 15 - .../modules/read/aws/aws.tf | 69 --- .../modules/read/identity.tf | 56 --- .../verify_secrets_engines/modules/read/kv.tf | 25 - .../modules/read/main.tf | 67 --- .../modules/read/pki.tf | 31 -- .../scripts/auth-enable.sh | 22 - .../scripts/auth-ldap-write.sh | 36 -- .../scripts/auth-userpass-login.sh | 22 - .../scripts/auth-userpass-write.sh | 24 - .../scripts/aws-generate-roles.sh | 63 --- .../scripts/aws-verify-new-creds.sh | 52 -- .../scripts/identity-oidc-introspect-token.sh | 33 -- .../scripts/identity-verify-entity.sh | 43 -- .../scripts/identity-verify-oidc.sh | 63 --- .../verify_secrets_engines/scripts/kv-put.sh | 25 - .../scripts/kv-verify-value.sh | 33 -- .../scripts/pki-issue-certificates.sh | 61 --- .../scripts/pki-verify-certificates.sh | 109 ---- .../scripts/policy-write.sh | 22 - .../verify_secrets_engines/scripts/read.sh | 21 - .../scripts/secrets-enable.sh | 22 - .../scripts/write-payload.sh | 26 - 186 files changed, 399 insertions(+), 13798 deletions(-) create mode 100644 enos/enos-modules.hcl delete mode 100644 enos/modules/artifact/metadata/main.tf delete mode 100644 enos/modules/autopilot_upgrade_storageconfig/main.tf delete mode 100644 enos/modules/backend_consul/main.tf delete mode 100644 enos/modules/backend_consul/outputs.tf delete mode 100644 enos/modules/backend_consul/variables.tf delete mode 100644 enos/modules/backend_raft/main.tf delete mode 100644 enos/modules/build_artifactory_artifact/main.tf delete mode 100644 enos/modules/build_artifactory_package/main.tf delete mode 100644 enos/modules/build_crt/main.tf delete mode 100644 enos/modules/build_local/main.tf delete mode 100755 enos/modules/build_local/scripts/build.sh delete mode 100644 enos/modules/choose_follower_host/main.tf delete mode 100644 enos/modules/create_vpc/main.tf delete mode 100644 enos/modules/create_vpc/outputs.tf delete mode 100644 enos/modules/create_vpc/variables.tf delete mode 100644 enos/modules/disable_selinux/main.tf delete mode 100644 enos/modules/disable_selinux/scripts/make-selinux-permissive.sh delete mode 100644 enos/modules/ec2_info/main.tf delete mode 100644 enos/modules/generate_dr_operation_token/main.tf delete mode 100755 enos/modules/generate_dr_operation_token/scripts/configure-vault-dr-primary.sh delete mode 100644 enos/modules/generate_failover_secondary_token/main.tf delete mode 100644 enos/modules/generate_failover_secondary_token/scripts/generate-failover-secondary-token.sh delete mode 100644 enos/modules/generate_secondary_public_key/main.tf delete mode 100644 enos/modules/generate_secondary_token/main.tf delete mode 100644 enos/modules/get_local_metadata/main.tf delete mode 100755 enos/modules/get_local_metadata/scripts/build_date.sh delete mode 100755 enos/modules/get_local_metadata/scripts/version.sh delete mode 100644 enos/modules/install_packages/main.tf delete mode 100644 enos/modules/install_packages/scripts/add-repos.sh delete mode 100644 enos/modules/install_packages/scripts/install-packages.sh delete mode 100644 enos/modules/install_packages/scripts/synchronize-repos.sh delete mode 100644 enos/modules/k8s_deploy_vault/main.tf delete mode 100644 enos/modules/k8s_deploy_vault/variables.tf delete mode 100644 enos/modules/k8s_vault_verify_replication/main.tf delete mode 100755 enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh delete mode 100644 enos/modules/k8s_vault_verify_replication/variables.tf delete mode 100644 enos/modules/k8s_vault_verify_ui/main.tf delete mode 100755 enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh delete mode 100644 enos/modules/k8s_vault_verify_ui/variables.tf delete mode 100644 enos/modules/k8s_vault_verify_version/main.tf delete mode 100755 enos/modules/k8s_vault_verify_version/scripts/get-status.sh delete mode 100755 enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh delete mode 100644 enos/modules/k8s_vault_verify_version/variables.tf delete mode 100644 enos/modules/k8s_vault_verify_write_data/main.tf delete mode 100644 enos/modules/k8s_vault_verify_write_data/variables.tf delete mode 100644 enos/modules/load_docker_image/main.tf delete mode 100644 enos/modules/local_kind_cluster/main.tf delete mode 100644 enos/modules/read_license/main.tf delete mode 100644 enos/modules/replication_data/main.tf delete mode 100644 enos/modules/restart_vault/main.tf delete mode 100644 enos/modules/restart_vault/scripts/restart-vault.sh delete mode 100644 enos/modules/seal_awskms/main.tf delete mode 100644 enos/modules/seal_pkcs11/main.tf delete mode 100644 enos/modules/seal_shamir/main.tf delete mode 100644 enos/modules/shutdown_multiple_nodes/main.tf delete mode 100644 enos/modules/shutdown_node/main.tf delete mode 100644 enos/modules/softhsm_create_vault_keys/main.tf delete mode 100644 enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh delete mode 100644 enos/modules/softhsm_create_vault_keys/scripts/get-keys.sh delete mode 100644 enos/modules/softhsm_distribute_vault_keys/main.tf delete mode 100644 enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh delete mode 100644 enos/modules/softhsm_init/main.tf delete mode 100644 enos/modules/softhsm_init/scripts/init-softhsm.sh delete mode 100644 enos/modules/softhsm_install/main.tf delete mode 100644 enos/modules/softhsm_install/scripts/find-shared-object.sh delete mode 100644 enos/modules/start_vault/main.tf delete mode 100644 enos/modules/start_vault/outputs.tf delete mode 100644 enos/modules/start_vault/variables.tf delete mode 100644 enos/modules/stop_vault/main.tf delete mode 100644 enos/modules/target_ec2_fleet/main.tf delete mode 100644 enos/modules/target_ec2_fleet/outputs.tf delete mode 100644 enos/modules/target_ec2_fleet/variables.tf delete mode 100644 enos/modules/target_ec2_shim/main.tf delete mode 100644 enos/modules/target_ec2_spot_fleet/main.tf delete mode 100644 enos/modules/target_ec2_spot_fleet/outputs.tf delete mode 100644 enos/modules/target_ec2_spot_fleet/variables.tf delete mode 100644 enos/modules/vault_agent/main.tf delete mode 100644 enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh delete mode 100644 enos/modules/vault_cluster/main.tf delete mode 100644 enos/modules/vault_cluster/outputs.tf delete mode 100755 enos/modules/vault_cluster/scripts/create-audit-log-dir.sh delete mode 100644 enos/modules/vault_cluster/scripts/enable-audit-devices.sh delete mode 100644 enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh delete mode 100644 enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh delete mode 100644 enos/modules/vault_cluster/variables.tf delete mode 100644 enos/modules/vault_failover_demote_dr_primary/main.tf delete mode 100644 enos/modules/vault_failover_promote_dr_secondary/main.tf delete mode 100644 enos/modules/vault_failover_update_dr_primary/main.tf delete mode 100644 enos/modules/vault_get_cluster_ips/main.tf delete mode 100644 enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv4s.sh delete mode 100644 enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv6s.sh delete mode 100644 enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv4.sh delete mode 100644 enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv6.sh delete mode 100644 enos/modules/vault_proxy/main.tf delete mode 100644 enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh delete mode 100644 enos/modules/vault_proxy/scripts/use-proxy.sh delete mode 100644 enos/modules/vault_raft_remove_node_and_verify/main.tf delete mode 100644 enos/modules/vault_raft_remove_peer/main.tf delete mode 100644 enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh delete mode 100644 enos/modules/vault_setup_dr_primary/main.tf delete mode 100644 enos/modules/vault_setup_dr_primary/scripts/enable.sh delete mode 100644 enos/modules/vault_setup_perf_primary/main.tf delete mode 100644 enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh delete mode 100644 enos/modules/vault_setup_replication_secondary/main.tf delete mode 100644 enos/modules/vault_setup_replication_secondary/scripts/wait-for-leader-ready.sh delete mode 100644 enos/modules/vault_step_down/main.tf delete mode 100644 enos/modules/vault_step_down/scripts/operator-step-down.sh delete mode 100644 enos/modules/vault_test_ui/main.tf delete mode 100644 enos/modules/vault_test_ui/outputs.tf delete mode 100755 enos/modules/vault_test_ui/scripts/test_ui.sh delete mode 100644 enos/modules/vault_test_ui/variables.tf delete mode 100644 enos/modules/vault_unseal_replication_followers/main.tf delete mode 100755 enos/modules/vault_unseal_replication_followers/scripts/unseal-node.sh delete mode 100644 enos/modules/vault_unseal_replication_followers/scripts/wait-until-sealed.sh delete mode 100644 enos/modules/vault_upgrade/main.tf delete mode 100644 enos/modules/vault_upgrade/scripts/maybe-remove-old-unit-file.sh delete mode 100644 enos/modules/vault_verify_agent_output/main.tf delete mode 100644 enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh delete mode 100644 enos/modules/vault_verify_autopilot/main.tf delete mode 100755 enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh delete mode 100644 enos/modules/vault_verify_billing_start_date/main.tf delete mode 100644 enos/modules/vault_verify_billing_start_date/scripts/verify-billing-start.sh delete mode 100644 enos/modules/vault_verify_default_lcq/main.tf delete mode 100755 enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh delete mode 100644 enos/modules/vault_verify_dr_replication/main.tf delete mode 100644 enos/modules/vault_verify_dr_replication/scripts/verify-replication-status.sh delete mode 100644 enos/modules/vault_verify_performance_replication/main.tf delete mode 100644 enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh delete mode 100644 enos/modules/vault_verify_raft_auto_join_voter/main.tf delete mode 100644 enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh delete mode 100644 enos/modules/vault_verify_removed_node/main.tf delete mode 100644 enos/modules/vault_verify_removed_node/scripts/verify_manual_rejoin_fails.sh delete mode 100755 enos/modules/vault_verify_removed_node/scripts/verify_raft_remove_peer.sh delete mode 100644 enos/modules/vault_verify_removed_node/scripts/verify_unseal_fails.sh delete mode 100644 enos/modules/vault_verify_removed_node_shim/main.tf delete mode 100644 enos/modules/vault_verify_replication/main.tf delete mode 100644 enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh delete mode 100644 enos/modules/vault_verify_ui/main.tf delete mode 100644 enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh delete mode 100644 enos/modules/vault_verify_undo_logs/main.tf delete mode 100644 enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh delete mode 100644 enos/modules/vault_verify_version/main.tf delete mode 100644 enos/modules/vault_verify_version/scripts/verify-cli-version.sh delete mode 100644 enos/modules/vault_verify_version/scripts/verify-cluster-version.sh delete mode 100644 enos/modules/vault_wait_for_cluster_unsealed/main.tf delete mode 100644 enos/modules/vault_wait_for_cluster_unsealed/scripts/verify-vault-node-unsealed.sh delete mode 100644 enos/modules/vault_wait_for_leader/main.tf delete mode 100644 enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh delete mode 100644 enos/modules/vault_wait_for_seal_rewrap/main.tf delete mode 100644 enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh delete mode 100644 enos/modules/verify_log_secrets/main.tf delete mode 100644 enos/modules/verify_log_secrets/scripts/scan_logs_for_secrets.sh delete mode 100644 enos/modules/verify_seal_type/main.tf delete mode 100644 enos/modules/verify_seal_type/scripts/verify-seal-type.sh delete mode 100644 enos/modules/verify_secrets_engines/modules/create/auth.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/create/aws.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/create/aws/aws.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/create/identity.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/create/kv.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/create/main.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/create/pki.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/read/auth.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/read/aws.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/read/aws/aws.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/read/identity.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/read/kv.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/read/main.tf delete mode 100644 enos/modules/verify_secrets_engines/modules/read/pki.tf delete mode 100644 enos/modules/verify_secrets_engines/scripts/auth-enable.sh delete mode 100644 enos/modules/verify_secrets_engines/scripts/auth-ldap-write.sh delete mode 100644 enos/modules/verify_secrets_engines/scripts/auth-userpass-login.sh delete mode 100644 enos/modules/verify_secrets_engines/scripts/auth-userpass-write.sh delete mode 100755 enos/modules/verify_secrets_engines/scripts/aws-generate-roles.sh delete mode 100755 enos/modules/verify_secrets_engines/scripts/aws-verify-new-creds.sh delete mode 100644 enos/modules/verify_secrets_engines/scripts/identity-oidc-introspect-token.sh delete mode 100644 enos/modules/verify_secrets_engines/scripts/identity-verify-entity.sh delete mode 100644 enos/modules/verify_secrets_engines/scripts/identity-verify-oidc.sh delete mode 100644 enos/modules/verify_secrets_engines/scripts/kv-put.sh delete mode 100644 enos/modules/verify_secrets_engines/scripts/kv-verify-value.sh delete mode 100755 enos/modules/verify_secrets_engines/scripts/pki-issue-certificates.sh delete mode 100755 enos/modules/verify_secrets_engines/scripts/pki-verify-certificates.sh delete mode 100644 enos/modules/verify_secrets_engines/scripts/policy-write.sh delete mode 100644 enos/modules/verify_secrets_engines/scripts/read.sh delete mode 100644 enos/modules/verify_secrets_engines/scripts/secrets-enable.sh delete mode 100644 enos/modules/verify_secrets_engines/scripts/write-payload.sh diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl new file mode 100644 index 0000000..9ea9f12 --- /dev/null +++ b/enos/enos-modules.hcl @@ -0,0 +1,399 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +module "autopilot_upgrade_storageconfig" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/autopilot_upgrade_storageconfig?ref=main" +} + +module "backend_consul" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/backend_consul?ref=main" + + license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path)) + log_level = var.backend_log_level +} + +module "backend_raft" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/backend_raft?ref=main" +} + +// Find any artifact in Artifactory. Requires the version, revision, and edition. +module "build_artifactory" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_artifactory_artifact?ref=main" +} + +// Find any released RPM or Deb in Artifactory. Requires the version, edition, distro, and distro +// version. +module "build_artifactory_package" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_artifactory_package?ref=main" +} + +// A shim "build module" suitable for use when using locally pre-built artifacts or a zip bundle +// from releases.hashicorp.com. When using a local pre-built artifact it requires the local +// artifact path. When using a release zip it does nothing as you'll need to configure the +// vault_cluster module with release info instead. +module "build_crt" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_crt?ref=main" +} + +// Build the local branch and package it into a zip artifact. Requires the goarch, goos, build tags, +// and bundle path. +module "build_local" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_local?ref=main" +} + +module "create_vpc" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/create_vpc?ref=main" + + environment = "ci" + common_tags = var.tags +} + +module "choose_follower_host" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/choose_follower_host?ref=main" +} + +module "ec2_info" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/ec2_info?ref=main" +} + +module "get_local_metadata" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/get_local_metadata?ref=main" +} + +module "generate_dr_operation_token" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/generate_dr_operation_token?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "generate_failover_secondary_token" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/generate_failover_secondary_token?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "generate_secondary_public_key" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/generate_secondary_public_key?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "generate_secondary_token" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/generate_secondary_token?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "install_packages" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/install_packages?ref=main" +} + +module "read_license" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/read_license?ref=main" +} + +module "replication_data" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/replication_data?ref=main" +} + +module "restart_vault" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/restart_vault?ref=main" + vault_install_dir = var.vault_install_dir +} + +module "seal_awskms" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_awskms?ref=main" + + cluster_ssh_keypair = var.aws_ssh_keypair_name + common_tags = var.tags +} + +module "seal_shamir" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_shamir?ref=main" + + cluster_ssh_keypair = var.aws_ssh_keypair_name + common_tags = var.tags +} + +module "seal_pkcs11" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_pkcs11?ref=main" + + cluster_ssh_keypair = var.aws_ssh_keypair_name + common_tags = var.tags +} + +module "shutdown_node" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/shutdown_node?ref=main" +} + +module "shutdown_multiple_nodes" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/shutdown_multiple_nodes?ref=main" +} + +module "start_vault" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/start_vault?ref=main" + + install_dir = var.vault_install_dir + log_level = var.vault_log_level +} + +module "stop_vault" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/stop_vault?ref=main" +} + +// create target instances using ec2:CreateFleet +module "target_ec2_fleet" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/target_ec2_fleet?ref=main" + + common_tags = var.tags + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +// create target instances using ec2:RunInstances +module "target_ec2_instances" { + source = "./modules/target_ec2_instances" + + common_tags = var.tags + ports_ingress = values(global.ports) + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +// don't create instances but satisfy the module interface +module "target_ec2_shim" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/target_ec2_shim?ref=main" + + common_tags = var.tags + ports_ingress = values(global.ports) + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +// create target instances using ec2:RequestSpotFleet +module "target_ec2_spot_fleet" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/target_ec2_spot_fleet?ref=main" + + common_tags = var.tags + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +module "vault_agent" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_agent?ref=main" + + vault_install_dir = var.vault_install_dir + vault_agent_port = global.ports["vault_agent"]["port"] +} + +module "vault_proxy" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_proxy?ref=main" + + vault_install_dir = var.vault_install_dir + vault_proxy_port = global.ports["vault_proxy"]["port"] +} + +module "vault_verify_agent_output" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_agent_output?ref=main" +} + +module "vault_cluster" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_cluster?ref=main" + + install_dir = var.vault_install_dir + consul_license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path)) + cluster_tag_key = global.vault_tag_key + log_level = var.vault_log_level +} + +module "vault_get_cluster_ips" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_get_cluster_ips?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_failover_demote_dr_primary" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_failover_demote_dr_primary?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_failover_promote_dr_secondary" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_failover_promote_dr_secondary?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_failover_update_dr_primary" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_failover_update_dr_primary?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_raft_remove_node_and_verify" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_raft_remove_node_and_verify?ref=main" + vault_install_dir = var.vault_install_dir +} + +module "vault_raft_remove_peer" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_raft_remove_peer?ref=main" + vault_install_dir = var.vault_install_dir +} + +module "vault_setup_dr_primary" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_setup_dr_primary?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_setup_perf_primary" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_setup_perf_primary?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_setup_replication_secondary" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_setup_replication_secondary?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_step_down" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_step_down?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_test_ui" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_test_ui?ref=main" + + ui_run_tests = var.ui_run_tests +} + +module "vault_unseal_replication_followers" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_unseal_replication_followers?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_upgrade" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_upgrade?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_autopilot" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_autopilot?ref=main" + + vault_autopilot_upgrade_status = "await-server-removal" + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_dr_replication" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_dr_replication?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_removed_node" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_removed_node?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_removed_node_shim" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_removed_node_shim?ref=main" + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_secrets_engines_create" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/verify_secrets_engines/modules/create?ref=main" + + create_aws_secrets_engine = var.verify_aws_secrets_engine + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_secrets_engines_read" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/verify_secrets_engines/modules/read?ref=main" + + verify_aws_secrets_engine = var.verify_aws_secrets_engine + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_default_lcq" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_default_lcq?ref=main" + + vault_autopilot_default_max_leases = "300000" +} + +module "vault_verify_performance_replication" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_performance_replication?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_raft_auto_join_voter" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_raft_auto_join_voter?ref=main" + + vault_install_dir = var.vault_install_dir + vault_cluster_addr_port = global.ports["vault_cluster"]["port"] +} + +module "vault_verify_replication" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_replication?ref=main" +} + +module "vault_verify_ui" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_ui?ref=main" +} + +module "vault_verify_undo_logs" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_undo_logs?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_wait_for_cluster_unsealed" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_wait_for_cluster_unsealed?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_version" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_version?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_wait_for_leader" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_wait_for_leader?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_wait_for_seal_rewrap" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_wait_for_seal_rewrap?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "verify_log_secrets" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/verify_log_secrets?ref=main" + + radar_license_path = var.vault_radar_license_path != null ? abspath(var.vault_radar_license_path) : null +} + +module "verify_seal_type" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/verify_seal_type?ref=main" + + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_billing_start_date" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_billing_start_date" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count + vault_cluster_addr_port = global.ports["vault_cluster"]["port"] +} + diff --git a/enos/modules/artifact/metadata/main.tf b/enos/modules/artifact/metadata/main.tf deleted file mode 100644 index 111ed3a..0000000 --- a/enos/modules/artifact/metadata/main.tf +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -// Given the architecture, distro, version, edition, and desired package type, -// return the metadata for an artifact. - -variable "arch" { - description = "The artifact platform architecture" - type = string - - validation { - condition = contains(["amd64", "arm64", "s390x"], var.arch) - error_message = <<-EOF - distro must be one of "amd64", "arm64", "s390x" - EOF - } -} - -variable "distro" { - description = "The target operating system distro" - type = string - - validation { - condition = contains(["amzn", "leap", "rhel", "sles", "ubuntu"], var.distro) - error_message = <<-EOF - distro must be one of "amzn", "leap", "rhel", "sles", "ubuntu" - EOF - } -} - -variable "edition" { - description = "The Vault edition. E.g. ent or ent.hsm.fips1403" - type = string - - validation { - condition = contains(["oss", "ce", "ent", "ent.fips1402", "ent.fips1403", "ent.hsm", "ent.hsm.fips1402", "ent.hsm.fips1403"], var.edition) - error_message = <<-EOF - edition must be one of "oss", "ce", "ent", "ent.fips1402", "ent.fips1403", "ent.hsm", "ent.hsm.fips1402", "ent.hsm.fips1403" - EOF - } -} - -variable "package_type" { - description = "The artifact packaging type" - type = string - - validation { - condition = contains(["package", "rpm", "deb", "zip", "bundle"], var.package_type) - error_message = <<-EOF - package_type must be one of "package", "rpm", "deb", "zip", "bundle" - EOF - } -} - -variable "vault_version" { - description = "The version of Vault or Vault Enterprise. E.g 1.18.2, 1.19.0-rc1, 1.18.5+ent.hsm" - type = string -} - -locals { - package_extension_amd64_deb = "-1_amd64.deb" - package_extension_amd64_rpm = "-1.x86_64.rpm" - package_extension_arm64_deb = "-1_arm64.deb" - package_extension_arm64_rpm = "-1.aarch64.rpm" - package_extension_s390x_deb = "-1_s390x.deb" - package_extension_s390x_rpm = "-1.s390x.rpm" - - // file name extensions for the install packages of vault for the various architectures, distributions and editions - package_extensions = { - amd64 = { - amzn = local.package_extension_amd64_rpm - leap = local.package_extension_amd64_rpm - rhel = local.package_extension_amd64_rpm - sles = local.package_extension_amd64_rpm - ubuntu = local.package_extension_amd64_deb - } - arm64 = { - amzn = local.package_extension_arm64_rpm - leap = local.package_extension_arm64_rpm - rhel = local.package_extension_arm64_rpm - sles = local.package_extension_arm64_rpm - ubuntu = local.package_extension_arm64_deb - } - s390x = { - amzn = null - leap = local.package_extension_s390x_rpm - rhel = local.package_extension_s390x_rpm - sles = local.package_extension_s390x_rpm - ubuntu = local.package_extension_s390x_deb - } - } - - package_prefixes_rpm = { - "ce" = "vault-" - "ent" = "vault-enterprise-", - "ent.fips1402" = "vault-enterprise-fips1402-", - "ent.fips1403" = "vault-enterprise-fips1403-", - "ent.hsm" = "vault-enterprise-hsm-", - "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402-", - "ent.hsm.fips1403" = "vault-enterprise-hsm-fips1403-", - "oss" = "vault-" - } - - package_prefixes_deb = { - "ce" = "vault_" - "ent" = "vault-enterprise_", - "ent.fips1402" = "vault-enterprise-fips1402_", - "ent.fips1403" = "vault-enterprise-fips1403_", - "ent.hsm" = "vault-enterprise-hsm_", - "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402_", - "ent.hsm.fips1403" = "vault-enterprise-hsm-fips1403_", - "oss" = "vault_" - } - - // file name prefixes for the install packages of vault for the various distributions and artifact types (package or bundle) - package_prefixes = { - amzn = local.package_prefixes_rpm, - leap = local.package_prefixes_rpm, - rhel = local.package_prefixes_rpm, - sles = local.package_prefixes_rpm, - ubuntu = local.package_prefixes_deb, - } - - // Stable release Artifactory repos for packages - release_repo_rpm = "hashicorp-rpm-release-local*" - release_repo_apt = "hashicorp-apt-release-local*" - release_repos = { - amzn = local.release_repo_rpm - leap = local.release_repo_rpm - rhel = local.release_repo_rpm - sles = local.release_repo_rpm - ubuntu = local.release_repo_apt - } - release_repo = local.release_repos[var.distro] - - // Stable release Artifactory paths for packages - release_package_rpm_arch = { - "amd64" = "x86_64", - "arm64" = "aarch64", - "s390x" = "s390x", - } - release_path_deb = "pool/${var.arch}/main" - release_sub_path_rpm = "${local.release_package_rpm_arch[var.arch]}/stable" - release_path_distro = { - amzn = { - "2" = "AmazonLinux/2/${local.release_sub_path_rpm}" - "2023" = "AmazonLinux/latest/${local.release_sub_path_rpm}" - "latest" = "AmazonLinux/latest/${local.release_sub_path_rpm}" - } - leap = { - "15.6" = "RHEL/9/${local.release_sub_path_rpm}" - } - rhel = { - "8.10" = "RHEL/8/${local.release_sub_path_rpm}" - "9.5" = "RHEL/9/${local.release_sub_path_rpm}" - } - sles = { - "15.6" = "RHEL/9/${local.release_sub_path_rpm}" - } - ubuntu = { - "20.04" = local.release_path_deb, - "22.04" = local.release_path_deb, - "24.04" = local.release_path_deb, - } - } - release_paths = local.release_path_distro[var.distro] - - // Reduce our supported inputs into two classes: system packages or a binary bundled into a zip archive. - package_type = contains(["package", "deb", "rpm"], var.package_type) ? "package" : "bundle" - - // Get the base version. This might still include pre-release metadata - // E.g. 1.18.2 => 1.18.2, 1.18.0-rc1 => 1.18.0-rc1, 1.18.0+ent.hsm => 1.18.0 - semverish_version = try(split("+", var.vault_version)[0], var.vault_version) - - // Determine the "product name". This corresponds properties on the artifactory artifact. - product_name = strcontains(var.edition, "ent") ? "vault-enterprise" : "vault" - - // Create the "product version", which is corresponds to properties on the artifactory artifact. - // It's the version along with edition metadata. We normalize all enterprise editions to .ent. - // E.g. 1.16.0-beta1+ent.hsm.fips1403 -> 1.16.0-beta+ent - product_version = strcontains(var.edition, "ent") ? "${local.semverish_version}+ent" : local.semverish_version - - // Convert product version strings to a syntax that matches deb and rpm packaging. - // E.g. 1.16.0-beta+ent -> 1.16.0~beta+ent - package_version = replace(local.product_version, "-", "~") - - // Get the bundle version. If the vault_version includes metadata, use it. Otherwise add the edition to it. - bundle_version = strcontains(var.vault_version, "+") ? var.vault_version : strcontains(var.edition, "ent") ? "${var.vault_version}+${var.edition}" : var.vault_version - - // Prefix for the artifact name. E.g.: vault_, vault-, vault-enterprise_, vault-enterprise-hsm-fips1402-, etc - artifact_name_prefix = local.package_type == "package" ? local.package_prefixes[var.distro][var.edition] : "vault_" - - // The version for the artifact name. - artifact_version = local.package_type == "package" ? local.package_version : local.bundle_version - - // Suffix and extension for the artifact name. E.g.: _linux_.zip, - artifact_name_extension = local.package_type == "package" ? local.package_extensions[var.arch][var.distro] : "_linux_${var.arch}.zip" - - // Combine prefix/suffix/extension together to form the artifact name - artifact_name = "${local.artifact_name_prefix}${local.artifact_version}${local.artifact_name_extension}" - -} -output "artifact_name" { - value = local.artifact_name -} - -output "package_type" { - value = local.package_type -} - -output "package_version" { - value = local.package_version -} - -output "product_name" { - value = local.product_name -} - -output "product_version" { - value = local.product_version -} - -output "release_repo" { - value = local.release_repo -} - -output "release_paths" { - value = local.release_paths -} diff --git a/enos/modules/autopilot_upgrade_storageconfig/main.tf b/enos/modules/autopilot_upgrade_storageconfig/main.tf deleted file mode 100644 index 3fcb77a..0000000 --- a/enos/modules/autopilot_upgrade_storageconfig/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "vault_product_version" {} - -output "storage_addl_config" { - value = { - autopilot_upgrade_version = var.vault_product_version - } -} diff --git a/enos/modules/backend_consul/main.tf b/enos/modules/backend_consul/main.tf deleted file mode 100644 index 1d0a514..0000000 --- a/enos/modules/backend_consul/main.tf +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_version = ">= 1.2.0" - - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.4.4" - } - } -} - -locals { - bin_path = "${var.install_dir}/consul" -} - -resource "enos_bundle_install" "consul" { - for_each = var.hosts - - destination = var.install_dir - release = merge(var.release, { product = "consul" }) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -resource "enos_consul_start" "consul" { - for_each = enos_bundle_install.consul - - bin_path = local.bin_path - data_dir = var.data_dir - config_dir = var.config_dir - config = { - data_dir = var.data_dir - datacenter = "dc1" - retry_join = ["provider=aws tag_key=${var.cluster_tag_key} tag_value=${var.cluster_name}"] - server = true - bootstrap_expect = length(var.hosts) - log_level = var.log_level - log_file = var.log_dir - } - license = var.license - unit_name = "consul" - username = "consul" - - transport = { - ssh = { - host = var.hosts[each.key].public_ip - } - } -} diff --git a/enos/modules/backend_consul/outputs.tf b/enos/modules/backend_consul/outputs.tf deleted file mode 100644 index 5f78e3f..0000000 --- a/enos/modules/backend_consul/outputs.tf +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -output "private_ips" { - description = "Consul cluster target host private_ips" - value = [for host in var.hosts : host.private_ip] -} - -output "public_ips" { - description = "Consul cluster target host public_ips" - value = [for host in var.hosts : host.public_ip] -} - -output "hosts" { - description = "The Consul cluster instances that were created" - - value = var.hosts -} diff --git a/enos/modules/backend_consul/variables.tf b/enos/modules/backend_consul/variables.tf deleted file mode 100644 index c404c0f..0000000 --- a/enos/modules/backend_consul/variables.tf +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "cluster_name" { - type = string - description = "The name of the Consul cluster" - default = null -} - -variable "cluster_tag_key" { - type = string - description = "The tag key for searching for Consul nodes" - default = null -} - -variable "config_dir" { - type = string - description = "The directory where the consul will write config files" - default = "/etc/consul.d" -} - -variable "data_dir" { - type = string - description = "The directory where the consul will store data" - default = "/opt/consul/data" -} - -variable "hosts" { - description = "The target machines host addresses to use for the consul cluster" - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) -} - -variable "install_dir" { - type = string - description = "The directory where the consul binary will be installed" - default = "/opt/consul/bin" -} - -variable "license" { - type = string - sensitive = true - description = "The consul enterprise license" - default = null -} - -variable "log_dir" { - type = string - description = "The directory where the consul will write log files" - default = "/var/log/consul.d" -} - -variable "log_level" { - type = string - description = "The consul service log level" - default = "info" - - validation { - condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) - error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." - } -} - -variable "release" { - type = object({ - version = string - edition = string - }) - description = "Consul release version and edition to install from releases.hashicorp.com" - default = { - version = "1.15.3" - edition = "ce" - } -} diff --git a/enos/modules/backend_raft/main.tf b/enos/modules/backend_raft/main.tf deleted file mode 100644 index 415b058..0000000 --- a/enos/modules/backend_raft/main.tf +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -// Shim module to handle the fact that Vault doesn't actually need a backend module when we use raft. -terraform { - required_version = ">= 1.2.0" - - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.4.0" - } - } -} - -variable "cluster_name" { - default = null -} - -variable "cluster_tag_key" { - default = null -} - -variable "config_dir" { - default = null -} - -variable "consul_log_level" { - default = null -} - -variable "data_dir" { - default = null -} - -variable "install_dir" { - default = null -} - -variable "license" { - default = null -} - -variable "log_dir" { - default = null -} - -variable "log_level" { - default = null -} - -variable "release" { - default = null -} - -variable "hosts" { - default = null -} - -output "private_ips" { - value = [for host in var.hosts : host.private_ip] -} - -output "public_ips" { - value = [for host in var.hosts : host.public_ip] -} - -output "hosts" { - value = var.hosts -} diff --git a/enos/modules/build_artifactory_artifact/main.tf b/enos/modules/build_artifactory_artifact/main.tf deleted file mode 100644 index 97d4e04..0000000 --- a/enos/modules/build_artifactory_artifact/main.tf +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.2.3" - } - } -} - -variable "artifactory_username" { - type = string - description = "The username to use when connecting to artifactory" - default = null -} - -variable "artifactory_token" { - type = string - description = "The token to use when connecting to artifactory" - default = null - sensitive = true -} - -variable "artifactory_host" { - type = string - description = "The artifactory host to search for vault artifacts" - default = "https://artifactory.hashicorp.engineering/artifactory" -} - -variable "artifactory_repo" { - type = string - description = "The artifactory repo to search for vault artifacts" - default = "hashicorp-crt-stable-local*" -} - -variable "arch" {} -variable "artifact_type" {} -variable "artifact_path" {} -variable "distro" {} -variable "edition" {} -variable "revision" {} -variable "product_version" {} -variable "build_tags" { default = null } -variable "bundle_path" { default = null } -variable "goarch" { default = null } -variable "goos" { default = null } - -module "artifact_metadata" { - source = "../artifact/metadata" - - arch = var.arch - distro = var.distro - edition = var.edition - package_type = var.artifact_type - vault_version = var.product_version -} - -data "enos_artifactory_item" "vault" { - username = var.artifactory_username - token = var.artifactory_token - name = module.artifact_metadata.artifact_name - host = var.artifactory_host - repo = var.artifactory_repo - path = "${module.artifact_metadata.product_name}/*" - properties = tomap({ - "commit" = var.revision, - "product-name" = module.artifact_metadata.product_name, - "product-version" = module.artifact_metadata.product_version, - }) -} - -output "url" { - value = data.enos_artifactory_item.vault.results[0].url - description = "The artifactory download url for the artifact" -} - -output "sha256" { - value = data.enos_artifactory_item.vault.results[0].sha256 - description = "The sha256 checksum for the artifact" -} - -output "size" { - value = data.enos_artifactory_item.vault.results[0].size - description = "The size in bytes of the artifact" -} - -output "name" { - value = data.enos_artifactory_item.vault.results[0].name - description = "The name of the artifact" -} - -output "vault_artifactory_release" { - value = { - url = data.enos_artifactory_item.vault.results[0].url - sha256 = data.enos_artifactory_item.vault.results[0].sha256 - username = var.artifactory_username - token = var.artifactory_token - } -} diff --git a/enos/modules/build_artifactory_package/main.tf b/enos/modules/build_artifactory_package/main.tf deleted file mode 100644 index 2444b1e..0000000 --- a/enos/modules/build_artifactory_package/main.tf +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "arch" { - type = string - description = "The architecture for the desired artifact" -} - -variable "artifactory_username" { - type = string - description = "The username to use when connecting to Artifactory" -} - -variable "artifactory_token" { - type = string - description = "The token to use when connecting to Artifactory" - sensitive = true -} - -variable "artifactory_host" { - type = string - description = "The Artifactory host to search for Vault artifacts" - default = "https://artifactory.hashicorp.engineering/artifactory" -} - -variable "distro" { - type = string - description = "The distro for the desired artifact (ubuntu or rhel)" -} - -variable "distro_version" { - type = string - description = "The RHEL version for .rpm packages" -} - -variable "edition" { - type = string - description = "The edition of Vault to use" -} - -variable "product_version" { - type = string - description = "The version of Vault to use" -} - -// Shim variables that we don't use but include to satisfy the build module "interface" -variable "artifact_path" { default = null } -variable "artifact_type" { default = null } -variable "artifactory_repo" { default = null } -variable "build_tags" { default = null } -variable "build_ui" { default = null } -variable "bundle_path" { default = null } -variable "goarch" { default = null } -variable "goos" { default = null } -variable "revision" { default = null } - -module "artifact_metadata" { - source = "../artifact/metadata" - - arch = var.arch - distro = var.distro - edition = var.edition - package_type = var.artifact_type != null ? var.artifact_type : "package" - vault_version = var.product_version -} - -data "enos_artifactory_item" "vault" { - username = var.artifactory_username - token = var.artifactory_token - name = module.artifact_metadata.artifact_name - host = var.artifactory_host - repo = module.artifact_metadata.release_repo - path = module.artifact_metadata.release_paths[var.distro_version] -} - -output "results" { - value = data.enos_artifactory_item.vault.results -} - -output "url" { - value = data.enos_artifactory_item.vault.results[0].url - description = "The artifactory download url for the artifact" -} - -output "sha256" { - value = data.enos_artifactory_item.vault.results[0].sha256 - description = "The sha256 checksum for the artifact" -} - -output "size" { - value = data.enos_artifactory_item.vault.results[0].size - description = "The size in bytes of the artifact" -} - -output "name" { - value = data.enos_artifactory_item.vault.results[0].name - description = "The name of the artifact" -} - -output "release" { - value = { - url = data.enos_artifactory_item.vault.results[0].url - sha256 = data.enos_artifactory_item.vault.results[0].sha256 - username = var.artifactory_username - token = var.artifactory_token - } -} diff --git a/enos/modules/build_crt/main.tf b/enos/modules/build_crt/main.tf deleted file mode 100644 index d113c9c..0000000 --- a/enos/modules/build_crt/main.tf +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# Shim module since CRT provided things will use the crt_bundle_path variable -variable "bundle_path" { - default = "/tmp/vault.zip" -} - -variable "build_tags" { - default = ["ui"] -} - -variable "goarch" { - type = string - description = "The Go architecture target" - default = "amd64" -} - -variable "goos" { - type = string - description = "The Go OS target" - default = "linux" -} - -variable "artifactory_host" { default = null } -variable "artifactory_repo" { default = null } -variable "artifactory_username" { default = null } -variable "artifactory_token" { default = null } -variable "arch" { default = null } -variable "artifact_path" { default = null } -variable "artifact_type" { default = null } -variable "build_ui" { default = null } -variable "distro" { default = null } -variable "distro_version" { default = null } -variable "edition" { default = null } -variable "revision" { default = null } -variable "product_version" { default = null } diff --git a/enos/modules/build_local/main.tf b/enos/modules/build_local/main.tf deleted file mode 100644 index 1ad1338..0000000 --- a/enos/modules/build_local/main.tf +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "artifact_path" { - description = "Where to create the zip bundle of the Vault build" -} - -variable "build_tags" { - type = list(string) - description = "The build tags to pass to the Go compiler" -} - -variable "build_ui" { - type = bool - description = "Whether or not we should build the UI when creating the local build" - default = true -} - -variable "goarch" { - type = string - description = "The Go architecture target" - default = "amd64" -} - -variable "goos" { - type = string - description = "The Go OS target" - default = "linux" -} - -variable "artifactory_host" { default = null } -variable "artifactory_repo" { default = null } -variable "artifactory_username" { default = null } -variable "artifactory_token" { default = null } -variable "arch" { default = null } -variable "artifact_type" { default = null } -variable "distro" { default = null } -variable "distro_version" { default = null } -variable "edition" { default = null } -variable "revision" { default = null } -variable "product_version" { default = null } - -module "local_metadata" { - source = "../get_local_metadata" -} - -resource "enos_local_exec" "build" { - scripts = [abspath("${path.module}/scripts/build.sh")] - - environment = { - BASE_VERSION = module.local_metadata.version_base - BIN_PATH = abspath("${path.module}/../../../dist") - BUILD_UI = tostring(var.build_ui) - BUNDLE_PATH = abspath(var.artifact_path) - GO_TAGS = join(" ", var.build_tags) - GOARCH = var.goarch - GOOS = var.goos - PRERELEASE_VERSION = module.local_metadata.version_pre - VERSION_METADATA = module.local_metadata.version_meta - } -} diff --git a/enos/modules/build_local/scripts/build.sh b/enos/modules/build_local/scripts/build.sh deleted file mode 100755 index b7b0950..0000000 --- a/enos/modules/build_local/scripts/build.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -eux -o pipefail - -# Install yarn so we can build the UI -npm install --global yarn || true - -export CGO_ENABLED=0 - -root_dir="$(git rev-parse --show-toplevel)" -pushd "$root_dir" > /dev/null - -if [ -n "$BUILD_UI" ] && [ "$BUILD_UI" = "true" ]; then - make ci-build-ui -fi - -make ci-build - -popd > /dev/null - -echo "--> Bundling $BIN_PATH/* to $BUNDLE_PATH" -zip -r -j "$BUNDLE_PATH" "$BIN_PATH/" diff --git a/enos/modules/choose_follower_host/main.tf b/enos/modules/choose_follower_host/main.tf deleted file mode 100644 index 881d5ca..0000000 --- a/enos/modules/choose_follower_host/main.tf +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "followers" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault follower hosts" -} - -output "chosen_follower" { - value = { - 0 : try(var.followers[0], null) - } -} diff --git a/enos/modules/create_vpc/main.tf b/enos/modules/create_vpc/main.tf deleted file mode 100644 index 55cbf01..0000000 --- a/enos/modules/create_vpc/main.tf +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "zone-name" - values = ["*"] - } -} - -resource "random_string" "cluster_id" { - length = 8 - lower = true - upper = false - numeric = false - special = false -} - -resource "aws_vpc" "vpc" { - // Always set the ipv4 cidr block as it's required in "dual-stack" VPCs which we create. - cidr_block = var.ipv4_cidr - enable_dns_hostnames = true - enable_dns_support = true - assign_generated_ipv6_cidr_block = var.ip_version == 6 - - tags = merge( - var.common_tags, - { - "Name" = var.name - }, - ) -} - -resource "aws_subnet" "subnet" { - count = length(data.aws_availability_zones.available.names) - vpc_id = aws_vpc.vpc.id - availability_zone = data.aws_availability_zones.available.names[count.index] - - // IPV4, but since we need to support ipv4 connections from the machine running enos, we're - // always going to need ipv4 available. - map_public_ip_on_launch = true - cidr_block = cidrsubnet(var.ipv4_cidr, 8, count.index) - - // IPV6, only set these when we want to run in ipv6 mode. - assign_ipv6_address_on_creation = var.ip_version == 6 - ipv6_cidr_block = var.ip_version == 6 ? cidrsubnet(aws_vpc.vpc.ipv6_cidr_block, 4, count.index) : null - - tags = merge( - var.common_tags, - { - "Name" = "${var.name}-subnet-${data.aws_availability_zones.available.names[count.index]}" - }, - ) -} - -resource "aws_internet_gateway" "ipv4" { - vpc_id = aws_vpc.vpc.id - - tags = merge( - var.common_tags, - { - "Name" = "${var.name}-igw" - }, - ) -} - -resource "aws_egress_only_internet_gateway" "ipv6" { - count = var.ip_version == 6 ? 1 : 0 - vpc_id = aws_vpc.vpc.id -} - -resource "aws_route" "igw_ipv4" { - route_table_id = aws_vpc.vpc.default_route_table_id - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.ipv4.id -} - -resource "aws_route" "igw_ipv6" { - count = var.ip_version == 6 ? 1 : 0 - route_table_id = aws_vpc.vpc.default_route_table_id - destination_ipv6_cidr_block = "::/0" - egress_only_gateway_id = aws_egress_only_internet_gateway.ipv6[0].id -} - -resource "aws_security_group" "default" { - vpc_id = aws_vpc.vpc.id - - ingress { - description = "allow_ingress_from_all" - from_port = 0 - to_port = 0 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = var.ip_version == 6 ? ["::/0"] : null - } - - egress { - description = "allow_egress_from_all" - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = var.ip_version == 6 ? ["::/0"] : null - } - - tags = merge( - var.common_tags, - { - "Name" = "${var.name}-default" - }, - ) -} diff --git a/enos/modules/create_vpc/outputs.tf b/enos/modules/create_vpc/outputs.tf deleted file mode 100644 index d54fbd8..0000000 --- a/enos/modules/create_vpc/outputs.tf +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -output "id" { - description = "Created VPC ID" - value = aws_vpc.vpc.id -} - -output "ipv4_cidr" { - description = "The VPC subnet CIDR for ipv4 mode" - value = var.ipv4_cidr -} - -output "ipv6_cidr" { - description = "The VPC subnet CIDR for ipv6 mode" - value = aws_vpc.vpc.ipv6_cidr_block -} - -output "cluster_id" { - description = "A unique string associated with the VPC" - value = random_string.cluster_id.result -} diff --git a/enos/modules/create_vpc/variables.tf b/enos/modules/create_vpc/variables.tf deleted file mode 100644 index 80c64ea..0000000 --- a/enos/modules/create_vpc/variables.tf +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "name" { - type = string - default = "vault-ci" - description = "The name of the VPC" -} - -variable "ip_version" { - type = number - default = 4 - description = "The IP version to use for the default subnet" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "ipv4_cidr" { - type = string - default = "10.13.0.0/16" - description = "The CIDR block for the VPC when using IPV4 mode" -} - -variable "environment" { - description = "Name of the environment." - type = string - default = "vault-ci" -} - -variable "common_tags" { - description = "Tags to set for all resources" - type = map(string) - default = { "Project" : "vault-ci" } -} diff --git a/enos/modules/disable_selinux/main.tf b/enos/modules/disable_selinux/main.tf deleted file mode 100644 index 7ed2f52..0000000 --- a/enos/modules/disable_selinux/main.tf +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The hosts to install packages on" -} - -resource "enos_remote_exec" "make_selinux_permissive" { - for_each = var.hosts - - scripts = [abspath("${path.module}/scripts/make-selinux-permissive.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/disable_selinux/scripts/make-selinux-permissive.sh b/enos/modules/disable_selinux/scripts/make-selinux-permissive.sh deleted file mode 100644 index cedc23d..0000000 --- a/enos/modules/disable_selinux/scripts/make-selinux-permissive.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -if ! type getenforce &> /dev/null; then - exit 0 -fi - -if sudo getenforce | grep Enforcing; then - sudo setenforce 0 -fi diff --git a/enos/modules/ec2_info/main.tf b/enos/modules/ec2_info/main.tf deleted file mode 100644 index 12fecf0..0000000 --- a/enos/modules/ec2_info/main.tf +++ /dev/null @@ -1,264 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# Note: in order to use the openSUSE Leap AMIs, the AWS account in use must "subscribe" -# and accept SUSE's terms of use. You can do this at the links below. If the AWS account -# you are using is already subscribed, this confirmation will be displayed on each page. -# openSUSE Leap arm64 subscription: https://aws.amazon.com/marketplace/server/procurement?productId=a516e959-df54-4035-bb1a-63599b7a6df9 -# openSUSE Leap amd64 subscription: https://aws.amazon.com/marketplace/server/procurement?productId=5535c495-72d4-4355-b169-54ffa874f849 - -locals { - architectures = toset(["arm64", "x86_64"]) - amazon_owner_id = "591542846629" - canonical_owner_id = "099720109477" - suse_owner_id = "013907871322" - opensuse_owner_id = "679593333241" - redhat_owner_id = "309956199498" - ids = { - // NOTE: If you modify these versions you'll probably also need to update the `softhsm_install` - // module to match. - "arm64" = { - "amzn" = { - "2" = data.aws_ami.amzn_2["arm64"].id - "2023" = data.aws_ami.amzn_2023["arm64"].id - } - "leap" = { - "15.6" = data.aws_ami.leap_15["arm64"].id - } - "rhel" = { - "8.10" = data.aws_ami.rhel_8["arm64"].id - "9.5" = data.aws_ami.rhel_9["arm64"].id - } - "sles" = { - "15.6" = data.aws_ami.sles_15["arm64"].id - } - "ubuntu" = { - "20.04" = data.aws_ami.ubuntu_2004["arm64"].id - "22.04" = data.aws_ami.ubuntu_2204["arm64"].id - "24.04" = data.aws_ami.ubuntu_2404["arm64"].id - } - } - "amd64" = { - "amzn" = { - "2" = data.aws_ami.amzn_2["x86_64"].id - "2023" = data.aws_ami.amzn_2023["x86_64"].id - } - "leap" = { - "15.6" = data.aws_ami.leap_15["x86_64"].id - } - "rhel" = { - "8.10" = data.aws_ami.rhel_8["x86_64"].id - "9.5" = data.aws_ami.rhel_9["x86_64"].id - } - "sles" = { - "15.6" = data.aws_ami.sles_15["x86_64"].id - } - "ubuntu" = { - "20.04" = data.aws_ami.ubuntu_2004["x86_64"].id - "22.04" = data.aws_ami.ubuntu_2204["x86_64"].id - "24.04" = data.aws_ami.ubuntu_2404["x86_64"].id - } - } - } -} - -data "aws_ami" "amzn_2" { - most_recent = true - for_each = local.architectures - - filter { - name = "name" - values = ["amzn2-ami-ecs-hvm-2.0*"] - } - - filter { - name = "architecture" - values = [each.value] - } - - owners = [local.amazon_owner_id] -} - -data "aws_ami" "amzn_2023" { - most_recent = true - for_each = local.architectures - - filter { - name = "name" - values = ["al2023-ami-ecs-hvm*"] - } - - filter { - name = "architecture" - values = [each.value] - } - - owners = [local.amazon_owner_id] -} - -data "aws_ami" "leap_15" { - most_recent = true - for_each = local.architectures - - filter { - name = "name" - values = ["openSUSE-Leap-15-6*"] - } - - filter { - name = "architecture" - values = [each.value] - } - - owners = [local.opensuse_owner_id] -} - -data "aws_ami" "rhel_8" { - most_recent = true - for_each = local.architectures - - filter { - name = "name" - values = ["RHEL-8.10*HVM-20*"] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - filter { - name = "architecture" - values = [each.value] - } - - owners = [local.redhat_owner_id] -} - -data "aws_ami" "rhel_9" { - most_recent = true - for_each = local.architectures - - filter { - name = "name" - values = ["RHEL-9.5*HVM-20*"] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - filter { - name = "architecture" - values = [each.value] - } - - owners = [local.redhat_owner_id] -} - -data "aws_ami" "sles_15" { - most_recent = true - for_each = local.architectures - - filter { - name = "name" - values = ["suse-sles-15-sp6-v*-hvm-*"] - } - - filter { - name = "architecture" - values = [each.value] - } - - owners = [local.suse_owner_id] -} - -data "aws_ami" "ubuntu_2004" { - most_recent = true - for_each = local.architectures - - filter { - name = "name" - values = ["ubuntu/images/hvm-ssd/ubuntu-*-20.04-*-server-*"] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - filter { - name = "architecture" - values = [each.value] - } - - owners = [local.canonical_owner_id] -} - -data "aws_ami" "ubuntu_2204" { - most_recent = true - for_each = local.architectures - - filter { - name = "name" - values = ["ubuntu/images/hvm-ssd/ubuntu-*-22.04-*-server-*"] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - filter { - name = "architecture" - values = [each.value] - } - - owners = [local.canonical_owner_id] -} - -data "aws_ami" "ubuntu_2404" { - most_recent = true - for_each = local.architectures - - filter { - name = "name" - values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-*-server-*"] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - filter { - name = "architecture" - values = [each.value] - } - - owners = [local.canonical_owner_id] -} - -data "aws_region" "current" {} - -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "zone-name" - values = ["*"] - } -} - -output "ami_ids" { - value = local.ids -} - -output "current_region" { - value = data.aws_region.current -} - -output "availability_zones" { - value = data.aws_availability_zones.available -} diff --git a/enos/modules/generate_dr_operation_token/main.tf b/enos/modules/generate_dr_operation_token/main.tf deleted file mode 100644 index c582c0c..0000000 --- a/enos/modules/generate_dr_operation_token/main.tf +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - random = { - source = "hashicorp/random" - version = ">= 3.4.3" - } - } -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "primary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The primary cluster leader host" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -variable "storage_backend" { - type = string - description = "The storage backend to use for the Vault cluster" -} - -locals { - token_id = random_uuid.token_id.id - dr_operation_token = enos_remote_exec.fetch_dr_operation_token.stdout -} - -resource "random_uuid" "token_id" {} - -resource "enos_remote_exec" "fetch_dr_operation_token" { - depends_on = [random_uuid.token_id] - environment = { - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_TOKEN = var.vault_root_token - STORAGE_BACKEND = var.storage_backend - } - - scripts = [abspath("${path.module}/scripts/configure-vault-dr-primary.sh")] - - transport = { - ssh = { - host = var.primary_leader_host.public_ip - } - } -} - -output "dr_operation_token" { - value = local.dr_operation_token -} diff --git a/enos/modules/generate_dr_operation_token/scripts/configure-vault-dr-primary.sh b/enos/modules/generate_dr_operation_token/scripts/configure-vault-dr-primary.sh deleted file mode 100755 index eae9b10..0000000 --- a/enos/modules/generate_dr_operation_token/scripts/configure-vault-dr-primary.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -binpath="${VAULT_INSTALL_DIR}/vault" - -fail() { - echo "$1" >&2 - exit 1 -} - -# Check required environment variables -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$STORAGE_BACKEND" ]] && fail "STORAGE_BACKEND env variable has not been set" - -# Define the policy content -policy_content() { - cat << EOF -path "sys/replication/dr/secondary/promote" { - capabilities = [ "update" ] -} - -path "sys/replication/dr/secondary/update-primary" { - capabilities = [ "update" ] -} -EOF - if [ "$STORAGE_BACKEND" = "raft" ]; then - cat << EOF -path "sys/storage/raft/autopilot/state" { - capabilities = [ "update", "read" ] -} -EOF - fi -} - -# Write the policy -$binpath policy write dr-secondary-promotion - <<< "$(policy_content)" &> /dev/null - -# Configure the failover handler token role -$binpath write auth/token/roles/failover-handler \ - allowed_policies=dr-secondary-promotion \ - orphan=true \ - renewable=false \ - token_type=batch &> /dev/null - -# Create a token for the failover handler role and output the token only -$binpath token create -field=token -role=failover-handler -ttl=8h diff --git a/enos/modules/generate_failover_secondary_token/main.tf b/enos/modules/generate_failover_secondary_token/main.tf deleted file mode 100644 index 537b0af..0000000 --- a/enos/modules/generate_failover_secondary_token/main.tf +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - random = { - source = "hashicorp/random" - version = ">= 3.4.3" - } - } -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "primary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The primary cluster leader host" -} - -variable "retry_interval" { - type = string - default = "2" - description = "How long to wait between retries" -} - -variable "secondary_public_key" { - type = string - description = "The secondary public key" -} - -variable "timeout" { - type = string - default = "15" - description = "How many seconds to wait before timing out" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -locals { - primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip - token_id = random_uuid.token_id.id - secondary_token = enos_remote_exec.fetch_secondary_token.stdout -} - -resource "random_uuid" "token_id" {} - -resource "enos_remote_exec" "fetch_secondary_token" { - depends_on = [random_uuid.token_id] - environment = { - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - RETRY_INTERVAL = var.retry_interval - TIMEOUT_SECONDS = var.timeout - SECONDARY_PUBLIC_KEY = var.secondary_public_key - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/generate-failover-secondary-token.sh")] - - - transport = { - ssh = { - host = var.primary_leader_host.public_ip - } - } -} - -output "secondary_token" { - value = local.secondary_token -} diff --git a/enos/modules/generate_failover_secondary_token/scripts/generate-failover-secondary-token.sh b/enos/modules/generate_failover_secondary_token/scripts/generate-failover-secondary-token.sh deleted file mode 100644 index 05da4a4..0000000 --- a/enos/modules/generate_failover_secondary_token/scripts/generate-failover-secondary-token.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -## Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -[[ -z "${VAULT_INSTALL_DIR}" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "${RETRY_INTERVAL}" ]] && fail "RETRY_INTERVAL env variable has not been set" -[[ -z "${TIMEOUT_SECONDS}" ]] && fail "TIMEOUT_SECONDS env variable has not been set" -[[ -z "${VAULT_ADDR}" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "${VAULT_TOKEN}" ]] && fail "VAULT_TOKEN env variable has not been set" -[[ -z "${SECONDARY_PUBLIC_KEY}" ]] && fail "SECONDARY_PUBLIC_KEY env variable has not been set" - -fail() { - echo "$1" 1>&2 - exit 1 -} - -binpath="${VAULT_INSTALL_DIR}"/vault -test -x "${binpath}" || fail "unable to locate vault binary at ${binpath}" - -begin_time=$(date +%s) -end_time=$((begin_time + TIMEOUT_SECONDS)) -while [ "$(date +%s)" -lt "${end_time}" ]; do - if secondary_token=$(${binpath} write -field token sys/replication/dr/primary/secondary-token id="${VAULT_TOKEN}" secondary_public_key="${SECONDARY_PUBLIC_KEY}"); then - echo "${secondary_token}" - exit 0 - fi - - sleep "${RETRY_INTERVAL}" -done - -fail "Timed out trying to generate secondary token" diff --git a/enos/modules/generate_secondary_public_key/main.tf b/enos/modules/generate_secondary_public_key/main.tf deleted file mode 100644 index 761972d..0000000 --- a/enos/modules/generate_secondary_public_key/main.tf +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - random = { - source = "hashicorp/random" - version = ">= 3.4.3" - } - } -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "primary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The primary cluster leader host" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -locals { - primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip - token_id = random_uuid.token_id.id - secondary_public_key = enos_remote_exec.fetch_secondary_public_key.stdout -} - -resource "random_uuid" "token_id" {} - -resource "enos_remote_exec" "fetch_secondary_public_key" { - depends_on = [random_uuid.token_id] - environment = { - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - } - - inline = ["${var.vault_install_dir}/vault write -field secondary_public_key -f sys/replication/dr/secondary/generate-public-key"] - - transport = { - ssh = { - host = var.primary_leader_host.public_ip - } - } -} - -output "secondary_public_key" { - value = local.secondary_public_key -} diff --git a/enos/modules/generate_secondary_token/main.tf b/enos/modules/generate_secondary_token/main.tf deleted file mode 100644 index 41b2774..0000000 --- a/enos/modules/generate_secondary_token/main.tf +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - random = { - source = "hashicorp/random" - version = ">= 3.4.3" - } - } -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "primary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The primary cluster leader host" -} - -variable "replication_type" { - type = string - description = "The type of replication to perform" - - validation { - condition = contains(["dr", "performance"], var.replication_type) - error_message = "The replication_type must be either dr or performance" - } -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -locals { - primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip - token_id = random_uuid.token_id.id - secondary_token = enos_remote_exec.fetch_secondary_token.stdout -} - -resource "random_uuid" "token_id" {} - -resource "enos_remote_exec" "fetch_secondary_token" { - depends_on = [random_uuid.token_id] - environment = { - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - } - - inline = ["${var.vault_install_dir}/vault write sys/replication/${var.replication_type}/primary/secondary-token id=${local.token_id} |sed -n '/^wrapping_token:/p' |awk '{print $2}'"] - - transport = { - ssh = { - host = var.primary_leader_host.public_ip - } - } -} - -output "secondary_token" { - value = local.secondary_token -} diff --git a/enos/modules/get_local_metadata/main.tf b/enos/modules/get_local_metadata/main.tf deleted file mode 100644 index 2b1ee6d..0000000 --- a/enos/modules/get_local_metadata/main.tf +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -resource "enos_local_exec" "get_build_date" { - scripts = [abspath("${path.module}/scripts/build_date.sh")] -} - -resource "enos_local_exec" "get_revision" { - inline = ["git rev-parse HEAD"] -} - -resource "enos_local_exec" "get_version" { - inline = ["${abspath("${path.module}/scripts/version.sh")} version"] -} - -resource "enos_local_exec" "get_version_base" { - inline = ["${abspath("${path.module}/scripts/version.sh")} version-base"] -} - -resource "enos_local_exec" "get_version_pre" { - inline = ["${abspath("${path.module}/scripts/version.sh")} version-pre"] -} - -resource "enos_local_exec" "get_version_meta" { - inline = ["${abspath("${path.module}/scripts/version.sh")} version-meta"] -} - -output "build_date" { - value = trimspace(enos_local_exec.get_build_date.stdout) -} - -output "revision" { - value = trimspace(enos_local_exec.get_revision.stdout) -} - -output "version" { - value = trimspace(enos_local_exec.get_version.stdout) -} - -output "version_base" { - value = trimspace(enos_local_exec.get_version_base.stdout) -} - -output "version_pre" { - value = trimspace(enos_local_exec.get_version_pre.stdout) -} - -output "version_meta" { - value = trimspace(enos_local_exec.get_version_meta.stdout) -} diff --git a/enos/modules/get_local_metadata/scripts/build_date.sh b/enos/modules/get_local_metadata/scripts/build_date.sh deleted file mode 100755 index ea63c74..0000000 --- a/enos/modules/get_local_metadata/scripts/build_date.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -eu -o pipefail - -pushd "$(git rev-parse --show-toplevel)" > /dev/null -make ci-get-date -popd > /dev/null diff --git a/enos/modules/get_local_metadata/scripts/version.sh b/enos/modules/get_local_metadata/scripts/version.sh deleted file mode 100755 index ed1238b..0000000 --- a/enos/modules/get_local_metadata/scripts/version.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -euo pipefail - -# Get the full version information -# this is only needed for local enos builds in order to get the default version from version_base.go -# this should match the default version that the binary has been built with -# CRT release builds use the new static version from ./release/VERSION -function version() { - local version - local prerelease - local metadata - - version=$(version_base) - prerelease=$(version_pre) - metadata=$(version_metadata) - - if [ -n "$metadata" ] && [ -n "$prerelease" ]; then - echo "$version-$prerelease+$metadata" - elif [ -n "$metadata" ]; then - echo "$version+$metadata" - elif [ -n "$prerelease" ]; then - echo "$version-$prerelease" - else - echo "$version" - fi -} - -# Get the base version -function version_base() { - : "${VAULT_VERSION:=""}" - - if [ -n "$VAULT_VERSION" ]; then - echo "$VAULT_VERSION" - return - fi - - : "${VERSION_FILE:=$(repo_root)/version/VERSION}" - awk -F- '{ print $1 }' < "$VERSION_FILE" -} - -# Get the version pre-release -function version_pre() { - : "${VAULT_PRERELEASE:=""}" - - if [ -n "$VAULT_PRERELEASE" ]; then - echo "$VAULT_PRERELEASE" - return - fi - - : "${VERSION_FILE:=$(repo_root)/version/VERSION}" - awk -F- '{ print $2 }' < "$VERSION_FILE" -} - -# Get the version metadata, which is commonly the edition -function version_metadata() { - : "${VAULT_METADATA:=""}" - - if [ -n "$VAULT_METADATA" ]; then - echo "$VAULT_METADATA" - return - fi - - : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" - awk '$1 == "VersionMetadata" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" -} - -# Determine the root directory of the repository -function repo_root() { - git rev-parse --show-toplevel -} - -# Run Enos local -function main() { - case $1 in - version) - version - ;; - version-base) - version_base - ;; - version-pre) - version_pre - ;; - version-meta) - version_metadata - ;; - *) - echo "unknown sub-command" >&2 - exit 1 - ;; - esac -} - -main "$@" diff --git a/enos/modules/install_packages/main.tf b/enos/modules/install_packages/main.tf deleted file mode 100644 index 007012b..0000000 --- a/enos/modules/install_packages/main.tf +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -locals { - arch = { - "amd64" = "x86_64" - "arm64" = "aarch64" - } - package_manager = { - "amzn" = "yum" - "opensuse-leap" = "zypper" - "rhel" = "dnf" - "sles" = "zypper" - "ubuntu" = "apt" - } - distro_repos = { - "sles" = { - "15.6" = "https://download.opensuse.org/repositories/network:utilities/SLE_15_SP6/network:utilities.repo" - } - "rhel" = { - "8.10" = "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm" - "9.5" = "https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm" - } - } -} - -variable "packages" { - type = list(string) - default = [] -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The hosts to install packages on" -} - -variable "timeout" { - type = number - description = "The max number of seconds to wait before timing out. This is applied to each step so total timeout will be longer." - default = 120 -} - -variable "retry_interval" { - type = number - description = "How many seconds to wait between each retry" - default = 2 -} - -resource "enos_host_info" "hosts" { - for_each = var.hosts - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -# Synchronize repositories on remote machines. This does not update packages but only ensures that -# the remote hosts are configured with default upstream repositories that have been refreshed to -# the latest metedata. -resource "enos_remote_exec" "synchronize_repos" { - for_each = var.hosts - - environment = { - DISTRO = enos_host_info.hosts[each.key].distro - PACKAGE_MANAGER = local.package_manager[enos_host_info.hosts[each.key].distro] - RETRY_INTERVAL = var.retry_interval - TIMEOUT_SECONDS = var.timeout - } - - scripts = [abspath("${path.module}/scripts/synchronize-repos.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -# Add any additional repositories. -resource "enos_remote_exec" "add_repos" { - for_each = var.hosts - depends_on = [enos_remote_exec.synchronize_repos] - - environment = { - DISTRO_REPOS = try(local.distro_repos[enos_host_info.hosts[each.key].distro][enos_host_info.hosts[each.key].distro_version], "__none") - PACKAGE_MANAGER = local.package_manager[enos_host_info.hosts[each.key].distro] - RETRY_INTERVAL = var.retry_interval - TIMEOUT_SECONDS = var.timeout - } - - scripts = [abspath("${path.module}/scripts/add-repos.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -# Install any required packages. -resource "enos_remote_exec" "install_packages" { - for_each = var.hosts - depends_on = [ - enos_remote_exec.synchronize_repos, - enos_remote_exec.add_repos, - ] - - environment = { - PACKAGE_MANAGER = local.package_manager[enos_host_info.hosts[each.key].distro] - PACKAGES = length(var.packages) >= 1 ? join(" ", var.packages) : "__skip" - RETRY_INTERVAL = var.retry_interval - TIMEOUT_SECONDS = var.timeout - } - - scripts = [abspath("${path.module}/scripts/install-packages.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/install_packages/scripts/add-repos.sh b/enos/modules/install_packages/scripts/add-repos.sh deleted file mode 100644 index 47f3279..0000000 --- a/enos/modules/install_packages/scripts/add-repos.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "${PACKAGE_MANAGER}" ]] && fail "PACKAGE_MANAGER env variable has not been set" -[[ -z "${RETRY_INTERVAL}" ]] && fail "RETRY_INTERVAL env variable has not been set" -[[ -z "${TIMEOUT_SECONDS}" ]] && fail "TIMEOUT_SECONDS env variable has not been set" - -# Add any repositories that have have been passed in -add_repos() { - # If we don't have any repos on the list for this distro, no action needed. - if [ ${#DISTRO_REPOS[@]} -lt 1 ]; then - echo "DISTRO_REPOS is empty; No repos required for the packages for this Linux distro." - return 0 - fi - - case $PACKAGE_MANAGER in - apt) - # NOTE: We do not currently add any apt repositories in our scenarios. I suspect if that time - # comes we'll need to add support for apt-key here. - for repo in ${DISTRO_REPOS}; do - if [ "$repo" == "__none" ]; then - continue - fi - sudo add-apt-repository "${repo}" - done - ;; - dnf) - for repo in ${DISTRO_REPOS}; do - if [ "$repo" == "__none" ]; then - continue - fi - sudo dnf install -y "${repo}" - sudo dnf makecache -y - done - ;; - yum) - for repo in ${DISTRO_REPOS}; do - if [ "$repo" == "__none" ]; then - continue - fi - sudo yum install -y "${repo}" - sudo yum makecache -y - done - ;; - zypper) - # Add each repo - for repo in ${DISTRO_REPOS}; do - if [ "$repo" == "__none" ]; then - continue - fi - if sudo zypper lr "${repo}"; then - echo "A repo named ${repo} already exists, skipping..." - continue - fi - sudo zypper --gpg-auto-import-keys --non-interactive addrepo "${repo}" - done - sudo zypper --gpg-auto-import-keys ref - sudo zypper --gpg-auto-import-keys refs - ;; - *) - fail "Unsupported package manager: ${PACKAGE_MANAGER}" - ;; - esac -} - -begin_time=$(date +%s) -end_time=$((begin_time + TIMEOUT_SECONDS)) -while [ "$(date +%s)" -lt "$end_time" ]; do - if add_repos; then - exit 0 - fi - - sleep "$RETRY_INTERVAL" -done - -fail "Timed out waiting for distro repos to be set up" diff --git a/enos/modules/install_packages/scripts/install-packages.sh b/enos/modules/install_packages/scripts/install-packages.sh deleted file mode 100644 index 0b9bfde..0000000 --- a/enos/modules/install_packages/scripts/install-packages.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "${RETRY_INTERVAL}" ]] && fail "RETRY_INTERVAL env variable has not been set" -[[ -z "${TIMEOUT_SECONDS}" ]] && fail "TIMEOUT_SECONDS env variable has not been set" -[[ -z "${PACKAGES}" ]] && fail "PACKAGES env variable has not been set" -[[ -z "${PACKAGE_MANAGER}" ]] && fail "PACKAGE_MANAGER env variable has not been set" - -# Install packages based on the provided packages and package manager. We assume that the repositories -# have already been synchronized by the repo setup that is a prerequisite for this script. -install_packages() { - if [[ "${PACKAGES}" = "__skip" ]]; then - return 0 - fi - - set -x - echo "Installing Dependencies: ${PACKAGES}" - - # Use the default package manager of the current Linux distro to install packages - case $PACKAGE_MANAGER in - apt) - for package in ${PACKAGES}; do - if dpkg -s "${package}"; then - echo "Skipping installation of ${package} because it is already installed" - continue - else - echo "Installing ${package}" - local output - if ! output=$(sudo apt install -y "${package}" 2>&1); then - echo "Failed to install ${package}: ${output}" 1>&2 - return 1 - fi - fi - done - ;; - dnf) - for package in ${PACKAGES}; do - if rpm -q "${package}"; then - echo "Skipping installation of ${package} because it is already installed" - continue - else - echo "Installing ${package}" - local output - if ! output=$(sudo dnf -y install "${package}" 2>&1); then - echo "Failed to install ${package}: ${output}" 1>&2 - return 1 - fi - fi - done - ;; - yum) - for package in ${PACKAGES}; do - if rpm -q "${package}"; then - echo "Skipping installation of ${package} because it is already installed" - continue - else - echo "Installing ${package}" - local output - if ! output=$(sudo yum -y install "${package}" 2>&1); then - echo "Failed to install ${package}: ${output}" 1>&2 - return 1 - fi - fi - done - ;; - zypper) - for package in ${PACKAGES}; do - if rpm -q "${package}"; then - echo "Skipping installation of ${package} because it is already installed" - continue - else - echo "Installing ${package}" - local output - if ! output=$(sudo zypper --non-interactive install -y -l --force-resolution "${package}" 2>&1); then - echo "Failed to install ${package}: ${output}" 1>&2 - return 1 - fi - fi - done - ;; - *) - fail "No matching package manager provided." - ;; - esac -} - -begin_time=$(date +%s) -end_time=$((begin_time + TIMEOUT_SECONDS)) -while [[ "$(date +%s)" -lt "${end_time}" ]]; do - if install_packages; then - exit 0 - fi - - sleep "${RETRY_INTERVAL}" -done - -fail "Timed out waiting for packages to install" diff --git a/enos/modules/install_packages/scripts/synchronize-repos.sh b/enos/modules/install_packages/scripts/synchronize-repos.sh deleted file mode 100644 index d5890fe..0000000 --- a/enos/modules/install_packages/scripts/synchronize-repos.sh +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "${PACKAGE_MANAGER}" ]] && fail "PACKAGE_MANAGER env variable has not been set" -[[ -z "${RETRY_INTERVAL}" ]] && fail "RETRY_INTERVAL env variable has not been set" -[[ -z "${TIMEOUT_SECONDS}" ]] && fail "TIMEOUT_SECONDS env variable has not been set" - -# The SLES AMI's do not come configured with Zypper repositories by default. To get them you -# have to run SUSEConnect to register the instance with SUSE. On the AMI this is handled -# automatically by a oneshot systemd unit called guestregister.service. This oneshot service needs -# to complete before any other repo or package steps are completed. At the time of writing it's very -# unreliable so we have to ensure that it has correctly executed ourselves or restart it. We do this -# by checking if the guestregister.service has reached the correct "inactive" state that we need. -# If it hasn't reached that state it's usually in some sort of active state, i.e. running, or it has -# failed. If it's in one of the active states we need to let it continue and check the status when -# it completes. If it has completed but is failed we'll restart the service to re-run the script that -# executes SUSEConnect. -sles_check_guestregister_service_and_restart_if_failed() { - local active_state - local failed_state - - # systemctl returns non-zero exit codes. We rely on output here because all states don't have - # their own exit code. - set +e - active_state=$(sudo systemctl is-active guestregister.service) - failed_state=$(sudo systemctl is-failed guestregister.service) - set -e - - case "$active_state" in - active | activating | deactivating) - # It's running so we'll return 1 and get retried by the caller - echo "the guestregister.service is still in the ${active_state} state" 1>&2 - return 1 - ;; - *) - if [ "$active_state" == "inactive" ] && [ "$failed_state" == "inactive" ]; then - # The oneshot has completed and hasn't "failed" - echo "the guestregister.service is 'inactive' for both active and failed states" - return 0 - fi - - # Our service is stopped and failed, restart it and hope it works the next time - sudo systemctl restart --wait guestregister.service - ;; - esac -} - -# Check or restart the guestregister service if it has failed. If it passes do another check to make -# sure that the zypper repositories list isn't empty. -sles_ensure_suseconnect() { - local health_output - if ! health_output=$(sles_check_guestregister_service_and_restart_if_failed); then - echo "the guestregister.service failed to reach a healthy state: ${health_output}" 1>&2 - return 1 - fi - - # Make sure Zypper has repositories. - if ! lr_output=$(zypper lr); then - echo "The guestregister.service failed. Unable to SUSEConnect and thus have no Zypper repositories: ${lr_output}: ${health_output}." 1>&2 - return 1 - fi - - return 0 -} - -# Synchronize our repositories so that futher installation steps are working with updated cache -# and repo metadata. -synchronize_repos() { - case $PACKAGE_MANAGER in - apt) - sudo apt update - ;; - dnf) - sudo dnf makecache - ;; - yum) - sudo yum makecache - ;; - zypper) - if [ "$DISTRO" == "sles" ]; then - if ! sles_ensure_suseconnect; then - return 1 - fi - fi - sudo zypper --gpg-auto-import-keys --non-interactive ref - sudo zypper --gpg-auto-import-keys --non-interactive refs - ;; - *) - return 0 - ;; - esac -} - -# Function to check cloud-init status and retry on failure -# Before we start to modify repositories and install packages we'll wait for cloud-init to finish -# so it doesn't race with any of our package installations. -# We run as sudo because Amazon Linux 2 throws Python 2.7 errors when running `cloud-init status` as -# non-root user (known bug). -wait_for_cloud_init() { - if output=$(sudo cloud-init status --wait); then - return 0 - else - res=$? - case $res in - 2) - { - echo "WARNING: cloud-init did not complete successfully but recovered." - echo "Exit code: $res" - echo "Output: $output" - echo "Here are the logs for the failure:" - cat /var/log/cloud-init-* - } 1>&2 - return 0 - ;; - *) - { - echo "cloud-init did not complete successfully." - echo "Exit code: $res" - echo "Output: $output" - echo "Here are the logs for the failure:" - cat /var/log/cloud-init-* - } 1>&2 - return 1 - ;; - esac - fi -} - -# Wait for cloud-init if it exists -type cloud-init && wait_for_cloud_init - -# Synchronizing repos -begin_time=$(date +%s) -end_time=$((begin_time + TIMEOUT_SECONDS)) -while [ "$(date +%s)" -lt "$end_time" ]; do - if synchronize_repos; then - exit 0 - fi - - sleep "$RETRY_INTERVAL" -done - -fail "Timed out waiting for distro repos to be set up" diff --git a/enos/modules/k8s_deploy_vault/main.tf b/enos/modules/k8s_deploy_vault/main.tf deleted file mode 100644 index a422be4..0000000 --- a/enos/modules/k8s_deploy_vault/main.tf +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_version = ">= 1.0" - - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - - helm = { - source = "hashicorp/helm" - version = "2.6.0" - } - } -} - -locals { - helm_chart_settings = { - "server.ha.enabled" = "true" - "server.ha.replicas" = var.vault_instance_count - "server.ha.raft.enabled" = "true" - "server.affinity" = "" - "server.image.repository" = var.image_repository - "server.image.tag" = var.image_tag - "server.image.pullPolicy" = "Never" # Forces local image use - "server.resources.requests.cpu" = "50m" - "server.limits.memory" = "200m" - "server.limits.cpu" = "200m" - "server.ha.raft.config" = file("${abspath(path.module)}/raft-config.hcl") - "server.dataStorage.size" = "100m" - "server.logLevel" = var.vault_log_level - } - all_helm_chart_settings = var.ent_license == null ? local.helm_chart_settings : merge(local.helm_chart_settings, { - "server.extraEnvironmentVars.VAULT_LICENSE" = var.ent_license - }) - - vault_address = "http://127.0.0.1:8200" - - instance_indexes = [for idx in range(var.vault_instance_count) : tostring(idx)] - - leader_idx = local.instance_indexes[0] - followers_idx = toset(slice(local.instance_indexes, 1, var.vault_instance_count)) -} - -resource "helm_release" "vault" { - name = "vault" - - repository = "https://helm.releases.hashicorp.com" - chart = "vault" - - dynamic "set" { - for_each = local.all_helm_chart_settings - - content { - name = set.key - value = set.value - } - } -} - -data "enos_kubernetes_pods" "vault_pods" { - kubeconfig_base64 = var.kubeconfig_base64 - context_name = var.context_name - namespace = helm_release.vault.namespace - label_selectors = [ - "app.kubernetes.io/name=vault", - "component=server" - ] - - depends_on = [helm_release.vault] -} - -resource "enos_vault_init" "leader" { - bin_path = "/bin/vault" - vault_addr = local.vault_address - - key_shares = 5 - key_threshold = 3 - - transport = { - kubernetes = { - kubeconfig_base64 = var.kubeconfig_base64 - context_name = var.context_name - pod = data.enos_kubernetes_pods.vault_pods.pods[local.leader_idx].name - namespace = data.enos_kubernetes_pods.vault_pods.pods[local.leader_idx].namespace - } - } -} - -resource "enos_vault_unseal" "leader" { - bin_path = "/bin/vault" - vault_addr = local.vault_address - seal_type = "shamir" - unseal_keys = enos_vault_init.leader.unseal_keys_b64 - - transport = { - kubernetes = { - kubeconfig_base64 = var.kubeconfig_base64 - context_name = var.context_name - pod = data.enos_kubernetes_pods.vault_pods.pods[local.leader_idx].name - namespace = data.enos_kubernetes_pods.vault_pods.pods[local.leader_idx].namespace - } - } - - depends_on = [enos_vault_init.leader] -} - -// We need to manually join the followers since the join request must only happen after the leader -// has been initialized. We could use retry join, but in that case we'd need to restart the follower -// pods once the leader is setup. The default helm deployment configuration for an HA cluster as -// documented here: https://learn.hashicorp.com/tutorials/vault/kubernetes-raft-deployment-guide#configure-vault-helm-chart -// uses a liveness probe that automatically restarts nodes that are not healthy. This works well for -// clusters that are configured with auto-unseal as eventually the nodes would join and unseal. -resource "enos_remote_exec" "raft_join" { - for_each = local.followers_idx - - inline = [ - // asserts that vault is ready - "for i in 1 2 3 4 5; do vault status > /dev/null 2>&1 && break || sleep 5; done", - // joins the follower to the leader - "vault operator raft join http://vault-0.vault-internal:8200" - ] - - transport = { - kubernetes = { - kubeconfig_base64 = var.kubeconfig_base64 - context_name = var.context_name - pod = data.enos_kubernetes_pods.vault_pods.pods[each.key].name - namespace = data.enos_kubernetes_pods.vault_pods.pods[each.key].namespace - } - } - - depends_on = [enos_vault_unseal.leader] -} - - -resource "enos_vault_unseal" "followers" { - for_each = local.followers_idx - - bin_path = "/bin/vault" - vault_addr = local.vault_address - seal_type = "shamir" - unseal_keys = enos_vault_init.leader.unseal_keys_b64 - - transport = { - kubernetes = { - kubeconfig_base64 = var.kubeconfig_base64 - context_name = var.context_name - pod = data.enos_kubernetes_pods.vault_pods.pods[each.key].name - namespace = data.enos_kubernetes_pods.vault_pods.pods[each.key].namespace - } - } - - depends_on = [enos_remote_exec.raft_join] -} - -output "vault_root_token" { - value = enos_vault_init.leader.root_token -} - -output "vault_pods" { - value = data.enos_kubernetes_pods.vault_pods.pods -} diff --git a/enos/modules/k8s_deploy_vault/variables.tf b/enos/modules/k8s_deploy_vault/variables.tf deleted file mode 100644 index 9730f87..0000000 --- a/enos/modules/k8s_deploy_vault/variables.tf +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "context_name" { - type = string - description = "The name of the k8s context for Vault" -} - -variable "ent_license" { - type = string - description = "The value of a valid Vault Enterprise license" -} - -variable "image_repository" { - type = string - description = "The name of the Vault repository, ie hashicorp/vault or hashicorp/vault-enterprise for the image to deploy" -} - -variable "image_tag" { - type = string - description = "The tag of the vault image to deploy" -} - -variable "kubeconfig_base64" { - type = string - description = "The base64 encoded version of the Kubernetes configuration file" -} - -variable "vault_edition" { - type = string - description = "The Vault product edition" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_log_level" { - description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err." - type = string -} diff --git a/enos/modules/k8s_vault_verify_replication/main.tf b/enos/modules/k8s_vault_verify_replication/main.tf deleted file mode 100644 index 6660673..0000000 --- a/enos/modules/k8s_vault_verify_replication/main.tf +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -locals { - instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) -} - -resource "enos_remote_exec" "replication_status" { - for_each = local.instances - - inline = ["vault read -format=json sys/replication/status"] - - transport = { - kubernetes = { - kubeconfig_base64 = var.kubeconfig_base64 - context_name = var.context_name - pod = var.vault_pods[each.key].name - namespace = var.vault_pods[each.key].namespace - } - } -} - -resource "enos_local_exec" "verify_replication_status" { - - for_each = enos_remote_exec.replication_status - - environment = { - STATUS = each.value.stdout - VAULT_EDITION = var.vault_edition - } - - content = abspath("${path.module}/scripts/smoke-verify-replication.sh") -} diff --git a/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh deleted file mode 100755 index 6987f7c..0000000 --- a/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# The Vault replication smoke test, documented in -# https://docs.google.com/document/d/16sjIk3hzFDPyY5A9ncxTZV_9gnpYSF1_Vx6UA1iiwgI/edit#heading=h.kgrxf0f1et25 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -# Replication STATUS endpoint should have data.mode disabled for CE release -if [ "$VAULT_EDITION" == "ce" ]; then - if [ "$(echo "${STATUS}" | jq -r '.data.mode')" != "disabled" ]; then - fail "replication data mode is not disabled for CE release!" - fi -else - if [ "$(echo "${STATUS}" | jq -r '.data.dr')" == "" ]; then - fail "DR replication should be available for an ENT release!" - fi - if [ "$(echo "${STATUS}" | jq -r '.data.performance')" == "" ]; then - fail "Performance replication should be available for an ENT release!" - fi -fi diff --git a/enos/modules/k8s_vault_verify_replication/variables.tf b/enos/modules/k8s_vault_verify_replication/variables.tf deleted file mode 100644 index 011ae9c..0000000 --- a/enos/modules/k8s_vault_verify_replication/variables.tf +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_edition" { - type = string - description = "The vault product edition" -} - -variable "vault_pods" { - type = list(object({ - name = string - namespace = string - })) - description = "The vault instances for the cluster to verify" -} - -variable "kubeconfig_base64" { - type = string - description = "The base64 encoded version of the Kubernetes configuration file" -} - -variable "context_name" { - type = string - description = "The name of the k8s context for Vault" -} diff --git a/enos/modules/k8s_vault_verify_ui/main.tf b/enos/modules/k8s_vault_verify_ui/main.tf deleted file mode 100644 index 4013254..0000000 --- a/enos/modules/k8s_vault_verify_ui/main.tf +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - - -terraform { - required_providers { - enos = { - version = "> 0.4.0" - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -locals { - instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) -} - -resource "enos_remote_exec" "curl_ui" { - for_each = local.instances - - inline = [ - "curl -s -o /dev/null -w '%%{redirect_url}' http://localhost:8200/", - "curl -s -o /dev/null -Iw '%%{http_code}\n' http://localhost:8200/ui/" - ] - - transport = { - kubernetes = { - kubeconfig_base64 = var.kubeconfig_base64 - context_name = var.context_name - pod = var.vault_pods[each.key].name - namespace = var.vault_pods[each.key].namespace - } - } -} - -resource "enos_local_exec" "verify_ui" { - for_each = enos_remote_exec.curl_ui - - environment = { - REDIRECT_URL = split("\n", each.value.stdout)[0] - UI_URL_RESULT = split("\n", each.value.stdout)[1] - } - - scripts = [abspath("${path.module}/scripts/smoke-verify-ui.sh")] -} diff --git a/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh deleted file mode 100755 index 9964df2..0000000 --- a/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -if [ "${REDIRECT_URL}" != "http://localhost:8200/ui/" ]; then - fail "Port 8200 not redirecting to UI" -fi -if [ "${UI_URL_RESULT}" != "200" ]; then - fail "Vault UI is not available" -fi diff --git a/enos/modules/k8s_vault_verify_ui/variables.tf b/enos/modules/k8s_vault_verify_ui/variables.tf deleted file mode 100644 index 3f000c5..0000000 --- a/enos/modules/k8s_vault_verify_ui/variables.tf +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_pods" { - type = list(object({ - name = string - namespace = string - })) - description = "The vault instances for the cluster to verify" -} - -variable "kubeconfig_base64" { - type = string - description = "The base64 encoded version of the Kubernetes configuration file" -} - -variable "context_name" { - type = string - description = "The name of the k8s context for Vault" -} diff --git a/enos/modules/k8s_vault_verify_version/main.tf b/enos/modules/k8s_vault_verify_version/main.tf deleted file mode 100644 index 3574635..0000000 --- a/enos/modules/k8s_vault_verify_version/main.tf +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -locals { - instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) - expected_version = var.vault_edition == "ce" ? var.vault_product_version : "${var.vault_product_version}-ent" -} - -resource "enos_remote_exec" "release_info" { - for_each = local.instances - - environment = { - VAULT_BIN_PATH = var.vault_bin_path - } - - scripts = [abspath("${path.module}/scripts/get-status.sh")] - - transport = { - kubernetes = { - kubeconfig_base64 = var.kubeconfig_base64 - context_name = var.context_name - pod = var.vault_pods[each.key].name - namespace = var.vault_pods[each.key].namespace - } - } -} - -resource "enos_local_exec" "smoke-verify-version" { - for_each = enos_remote_exec.release_info - - environment = { - ACTUAL_VERSION = jsondecode(each.value.stdout).version - BUILD_DATE = var.vault_build_date - CHECK_BUILD_DATE = var.check_build_date - EXPECTED_VERSION = var.vault_product_version, - VAULT_EDITION = var.vault_edition, - VAULT_REVISION = var.vault_product_revision, - VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status) - } - - scripts = [abspath("${path.module}/scripts/smoke-verify-version.sh")] -} diff --git a/enos/modules/k8s_vault_verify_version/scripts/get-status.sh b/enos/modules/k8s_vault_verify_version/scripts/get-status.sh deleted file mode 100755 index b68e0f6..0000000 --- a/enos/modules/k8s_vault_verify_version/scripts/get-status.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env sh -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -status=$(${VAULT_BIN_PATH} status -format=json) -version=$(${VAULT_BIN_PATH} version) - -echo "{\"status\": ${status}, \"version\": \"${version}\"}" diff --git a/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh deleted file mode 100755 index fc0de96..0000000 --- a/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# The Vault smoke test to verify the Vault version installed - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -if [[ "${CHECK_BUILD_DATE}" == "false" ]]; then - expected_build_date="" -else - cfg_build_date="${BUILD_DATE}" - if [[ "${cfg_build_date}" == "" ]]; then - cfg_build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date) - fi - expected_build_date=", built $cfg_build_date" -fi - -vault_expected_version="Vault v${EXPECTED_VERSION} (${VAULT_REVISION})" - -case "${VAULT_EDITION}" in - ce) version_expected="${vault_expected_version}${expected_build_date}" ;; - ent) version_expected="${vault_expected_version}${expected_build_date}" ;; - ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; - ent.fips1403) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; - ent.hsm.fips1403) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; - *) fail "(${VAULT_EDITION}) does not match any known Vault editions" ;; -esac - -version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') - -if [[ "${ACTUAL_VERSION}" == "$version_expected_nosha" ]] || [[ "${ACTUAL_VERSION}" == "$version_expected" ]]; then - echo "Version verification succeeded!" -else - echo "Version checking enabled: ${CHECK_BUILD_DATE}" 1>&2 - echo "Given build date: ${BUILD_DATE}" 1>&2 - echo "Interpreted build date: ${cfg_build_date}" 1>&2 - - fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}" -fi diff --git a/enos/modules/k8s_vault_verify_version/variables.tf b/enos/modules/k8s_vault_verify_version/variables.tf deleted file mode 100644 index 05ca660..0000000 --- a/enos/modules/k8s_vault_verify_version/variables.tf +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_pods" { - type = list(object({ - name = string - namespace = string - })) - description = "The vault instances for the cluster to verify" -} - -variable "vault_bin_path" { - type = string - description = "The path to the vault binary" - default = "/bin/vault" -} - -variable "vault_product_version" { - type = string - description = "The vault product version" -} - -variable "vault_product_revision" { - type = string - description = "The vault product revision" -} - -variable "vault_edition" { - type = string - description = "The vault product edition" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -variable "kubeconfig_base64" { - type = string - description = "The base64 encoded version of the Kubernetes configuration file" -} - -variable "context_name" { - type = string - description = "The name of the k8s context for Vault" -} - -variable "check_build_date" { - type = bool - description = "Whether or not to verify that the version includes the build date" -} - -variable "vault_build_date" { - type = string - description = "The build date of the vault docker image to check" - default = "" -} diff --git a/enos/modules/k8s_vault_verify_write_data/main.tf b/enos/modules/k8s_vault_verify_write_data/main.tf deleted file mode 100644 index 5227971..0000000 --- a/enos/modules/k8s_vault_verify_write_data/main.tf +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -locals { - instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) -} - -resource "enos_remote_exec" "smoke-enable-secrets-kv" { - environment = { - VAULT_TOKEN = var.vault_root_token - } - - inline = ["${var.vault_bin_path} secrets enable -path=\"secret\" kv"] - - transport = { - kubernetes = { - kubeconfig_base64 = var.kubeconfig_base64 - context_name = var.context_name - pod = var.vault_pods[0].name - namespace = var.vault_pods[0].namespace - } - } -} - -# Verify that we can enable the k/v secrets engine and write data to it. -resource "enos_remote_exec" "smoke-write-test-data" { - depends_on = [enos_remote_exec.smoke-enable-secrets-kv] - for_each = local.instances - - environment = { - VAULT_TOKEN = var.vault_root_token - } - - inline = ["${var.vault_bin_path} kv put secret/test smoke${each.key}=fire"] - - transport = { - kubernetes = { - kubeconfig_base64 = var.kubeconfig_base64 - context_name = var.context_name - pod = var.vault_pods[each.key].name - namespace = var.vault_pods[each.key].namespace - } - } -} diff --git a/enos/modules/k8s_vault_verify_write_data/variables.tf b/enos/modules/k8s_vault_verify_write_data/variables.tf deleted file mode 100644 index 4e1754e..0000000 --- a/enos/modules/k8s_vault_verify_write_data/variables.tf +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_pods" { - type = list(object({ - name = string - namespace = string - })) - description = "The vault instances for the cluster to verify" -} - -variable "vault_bin_path" { - type = string - description = "The path to the vault binary" - default = "/bin/vault" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -variable "kubeconfig_base64" { - type = string - description = "The base64 encoded version of the Kubernetes configuration file" -} - -variable "context_name" { - type = string - description = "The name of the k8s context for Vault" -} diff --git a/enos/modules/load_docker_image/main.tf b/enos/modules/load_docker_image/main.tf deleted file mode 100644 index 9f5e15c..0000000 --- a/enos/modules/load_docker_image/main.tf +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "cluster_name" { - type = string - description = "The name of the cluster to load the image into" -} - -variable "image" { - type = string - description = "The image name for the image to load, i.e. hashicorp/vault" -} - -variable "tag" { - type = string - description = "The tag for the image to load, i.e. 1.12.0-dev" -} - -variable "archive" { - type = string - description = "The path to the image archive to load" - default = null -} - -resource "enos_local_kind_load_image" "vault" { - cluster_name = var.cluster_name - image = var.image - tag = var.tag - archive = var.archive -} - -output "tag" { - value = var.tag - description = "The tag of the docker image to load without the tag, i.e. 1.10.0" -} - -output "image" { - value = var.image - description = "The tag of the docker image to load without the tag, i.e. vault" -} - -output "repository" { - value = enos_local_kind_load_image.vault.loaded_images.repository - description = "The name of the image's repository, i.e. hashicorp/vault" -} diff --git a/enos/modules/local_kind_cluster/main.tf b/enos/modules/local_kind_cluster/main.tf deleted file mode 100644 index b21bfe6..0000000 --- a/enos/modules/local_kind_cluster/main.tf +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - random = { - source = "hashicorp/random" - version = ">= 3.4.3" - } - } -} - -resource "random_pet" "cluster_name" {} - -resource "enos_local_kind_cluster" "this" { - name = random_pet.cluster_name.id - kubeconfig_path = var.kubeconfig_path -} - -variable "kubeconfig_path" { - type = string -} - -output "cluster_name" { - value = random_pet.cluster_name.id -} - -output "kubeconfig_base64" { - value = enos_local_kind_cluster.this.kubeconfig_base64 -} - -output "context_name" { - value = enos_local_kind_cluster.this.context_name -} - -output "host" { - value = enos_local_kind_cluster.this.endpoint -} - -output "client_certificate" { - value = enos_local_kind_cluster.this.client_certificate -} - -output "client_key" { - value = enos_local_kind_cluster.this.client_key -} - -output "cluster_ca_certificate" { - value = enos_local_kind_cluster.this.cluster_ca_certificate -} diff --git a/enos/modules/read_license/main.tf b/enos/modules/read_license/main.tf deleted file mode 100644 index 823714f..0000000 --- a/enos/modules/read_license/main.tf +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "file_name" {} - -output "license" { - value = file(var.file_name) -} diff --git a/enos/modules/replication_data/main.tf b/enos/modules/replication_data/main.tf deleted file mode 100644 index 91c89a4..0000000 --- a/enos/modules/replication_data/main.tf +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -// An arithmetic module for calculating inputs and outputs for various replication steps. - -variable "added_hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - default = {} -} - -variable "initial_hosts" { - description = "The initial set of Vault cluster hosts before removing and adding hosts" - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - default = {} -} - -variable "removed_primary_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - default = null -} - -variable "removed_follower_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - default = null -} - -locals { - remaining_initial = setsubtract(values(var.initial_hosts), [var.removed_primary_host, var.removed_follower_host]) - remaining_hosts_list = tolist(setunion(values(var.added_hosts), local.remaining_initial)) - remaining_hosts = { for idx in range(length(local.remaining_hosts_list)) : idx => local.remaining_hosts_list[idx] } -} - -output "remaining_hosts" { - value = local.remaining_hosts -} diff --git a/enos/modules/restart_vault/main.tf b/enos/modules/restart_vault/main.tf deleted file mode 100644 index 2486671..0000000 --- a/enos/modules/restart_vault/main.tf +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault hosts" -} - -variable "vault_addr" { - type = string - description = "The local vault api address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the vault binary is installed" -} - - -resource "enos_remote_exec" "restart" { - for_each = var.hosts - - environment = { - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/restart-vault.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - diff --git a/enos/modules/restart_vault/scripts/restart-vault.sh b/enos/modules/restart_vault/scripts/restart-vault.sh deleted file mode 100644 index 3521994..0000000 --- a/enos/modules/restart_vault/scripts/restart-vault.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -if ! out=$(sudo systemctl stop vault 2>&1); then - fail "failed to stop vault: $out: $(sudo systemctl status vault)" -fi - -if ! out=$(sudo systemctl daemon-reload 2>&1); then - fail "failed to daemon-reload systemd: $out" 1>&2 -fi - -if ! out=$(sudo systemctl start vault 2>&1); then - fail "failed to start vault: $out: $(sudo systemctl status vault)" -fi - -count=0 -retries=5 -while :; do - # Check the Vault seal status - status=$($binpath status) - code=$? - - if [ $code == 0 ] || [ $code == 2 ]; then - # 0 is unsealed and 2 is running but sealed - echo "$status" - exit 0 - fi - - printf "Waiting for Vault cluster to be ready: status code: %s, status:\n%s\n" "$code" "$status" 2>&1 - - wait=$((3 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - fail "Timed out waiting for Vault node to be ready after restart" - fi -done diff --git a/enos/modules/seal_awskms/main.tf b/enos/modules/seal_awskms/main.tf deleted file mode 100644 index e8a1ad3..0000000 --- a/enos/modules/seal_awskms/main.tf +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "cluster_id" { - type = string -} - -variable "cluster_meta" { - type = string - default = null -} - -variable "cluster_ssh_keypair" { - type = string - default = null -} - -variable "common_tags" { - type = map(string) - default = null -} - -variable "other_resources" { - type = list(string) - default = [] -} - -locals { - cluster_name = var.cluster_meta == null ? var.cluster_id : "${var.cluster_id}-${var.cluster_meta}" -} - -resource "aws_kms_key" "key" { - description = "auto-unseal-key-${local.cluster_name}" - deletion_window_in_days = 7 // 7 is the shortest allowed window - tags = var.common_tags -} - -resource "aws_kms_alias" "alias" { - name = "alias/auto-unseal-key-${local.cluster_name}" - target_key_id = aws_kms_key.key.key_id -} - -output "attributes" { - description = "Seal device specific attributes" - value = { - kms_key_id = aws_kms_key.key.arn - } -} - -// We output our resource name and a collection of those passed in to create a full list of key -// resources that might be required for instance roles that are associated with some unseal types. -output "resource_name" { - description = "The awskms key name" - value = aws_kms_key.key.arn -} - -output "resource_names" { - description = "The list of awskms key names to associate with a role" - value = compact(concat([aws_kms_key.key.arn], var.other_resources)) -} diff --git a/enos/modules/seal_pkcs11/main.tf b/enos/modules/seal_pkcs11/main.tf deleted file mode 100644 index 084d364..0000000 --- a/enos/modules/seal_pkcs11/main.tf +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -/* - -A seal module that emulates using a real PKCS#11 HSM. For this we'll use softhsm2. You'll -need softhsm2 and opensc installed to get access to the userspace tools and dynamic library that -Vault Enterprise will use. Here we'll take in the vault hosts and use the one of the nodes -to generate the hsm slot and the tokens, and then we'll copy the softhsm tokens to the other nodes. - -Using softhsm2 and opensc is a bit complicated but here's a cheat sheet for getting started. - -$ brew install softhsm opensc -or -$ sudo apt install softhsm2 opensc - -Create a softhsm slot. You can use anything you want for the pin and the supervisor pin. This will -output the slot identifier, which you'll use as the `slot` parameter in the seal config. -$ softhsm2-util --init-token --free --so-pin=1234 --pin=1234 --label="seal" | grep -oE '[0-9]+$' - -You can see the slots: -$ softhsm2-util --show-slots -Or use opensc's pkcs11-tool. Make sure to use your pin for the -p flag. The module that we refer -to is the location of the shared library that we need to provide to Vault Enterprise. Depending on -your platform or installation method this could be different. -$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 -IL - -Find yours -$ find /usr/local -type f -name libsofthsm2.so -print -quit - -Your tokens will be installed in the default directories.tokendir. See man softhsm2.conf(5) for -more details. On macOS from brew this is /usr/local/var/lib/softhsm/tokens/ - -Vault Enterprise supports creating the HSM keys, but for softhsm2 that would require us to -initialize with one node before copying the contents. So instead we'll create an HSM key and HMAC -key that we'll copy everywhere. - -$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 --token-label seal --keygen --usage-sign --label hsm_hmac --id 1 --key-type GENERIC:32 --private --sensitive -$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 --token-label seal --keygen --usage-sign --label hsm_aes --id 2 --key-type AES:32 --private --sensitive --usage-wrap - -Now you should be able to configure Vault Enterprise seal stanza. -*/ - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "cluster_id" { - type = string - description = "The VPC ID of the cluster" -} - -variable "cluster_meta" { - type = string - default = null - description = "Any metadata that needs to be passed in. If we're creating multiple softhsm tokens this value could be a prior KEYS_BASE64" -} - -variable "cluster_ssh_keypair" { - type = string - description = "The ssh keypair of the vault cluster. We need this to used the inherited provider for our target" -} - -variable "common_tags" { - type = map(string) - default = null -} - -variable "other_resources" { - type = list(string) - default = [] -} - -resource "random_string" "id" { - length = 8 - numeric = false - special = false - upper = false -} - -module "ec2_info" { - source = "../ec2_info" -} - -locals { - id = "${var.cluster_id}-${random_string.id.result}" -} - -module "target" { - source = "../target_ec2_instances" - ami_id = module.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] - cluster_tag_key = local.id - common_tags = var.common_tags - instance_count = 1 - instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - ports_ingress = [ - { - description = "SSH" - port = 22 - protocol = "tcp" - }, - ] - // Make sure it's not too long as we use this for aws resources that size maximums that are easy - // to hit. - project_name = substr("vault-ci-softhsm-${local.id}", 0, 32) - ssh_keypair = var.cluster_ssh_keypair - vpc_id = var.cluster_id -} - -module "create_vault_keys" { - source = "../softhsm_create_vault_keys" - - cluster_id = var.cluster_id - hosts = module.target.hosts -} - -// Our attributes contain all required keys for the seal stanza and our base64 encoded softhsm -// token and keys. -output "attributes" { - description = "Seal device specific attributes" - value = module.create_vault_keys.all_attributes -} - -// Shim for chaining seals that require IAM roles -output "resource_name" { value = null } -output "resource_names" { value = var.other_resources } diff --git a/enos/modules/seal_shamir/main.tf b/enos/modules/seal_shamir/main.tf deleted file mode 100644 index 55e26d1..0000000 --- a/enos/modules/seal_shamir/main.tf +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# A shim seal module for shamir seals. For Shamir seals the enos_vault_init resource will take care -# of creating our seal. - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "cluster_id" { default = null } -variable "cluster_meta" { default = null } -variable "cluster_ssh_keypair" { default = null } -variable "common_tags" { default = null } -variable "image_id" { default = null } -variable "other_resources" { - type = list(string) - default = [] -} - -output "resource_name" { value = null } -output "resource_names" { value = var.other_resources } -output "attributes" { value = null } diff --git a/enos/modules/shutdown_multiple_nodes/main.tf b/enos/modules/shutdown_multiple_nodes/main.tf deleted file mode 100644 index c2781cd..0000000 --- a/enos/modules/shutdown_multiple_nodes/main.tf +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "old_hosts" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances to be shutdown" -} - -resource "enos_remote_exec" "shutdown_multiple_nodes" { - for_each = var.old_hosts - inline = ["sudo shutdown -P --no-wall; exit 0"] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/shutdown_node/main.tf b/enos/modules/shutdown_node/main.tf deleted file mode 100644 index 0458570..0000000 --- a/enos/modules/shutdown_node/main.tf +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The node to shut down" -} - -resource "enos_remote_exec" "shutdown_node" { - inline = ["sudo shutdown -P --no-wall; exit 0"] - - transport = { - ssh = { - host = var.host.public_ip - } - } -} diff --git a/enos/modules/softhsm_create_vault_keys/main.tf b/enos/modules/softhsm_create_vault_keys/main.tf deleted file mode 100644 index 4132de8..0000000 --- a/enos/modules/softhsm_create_vault_keys/main.tf +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "cluster_id" { - type = string -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The hosts that will have access to the softhsm" -} - -locals { - pin = resource.random_string.pin.result - aes_label = "vault_hsm_aes_${local.pin}" - hmac_label = "vault_hsm_hmac_${local.pin}" - seal_attributes = jsondecode(resource.enos_remote_exec.create_keys.stdout) - target = tomap({ "0" = var.hosts[0] }) - token = "${var.cluster_id}_${local.pin}" -} - -resource "random_string" "pin" { - length = 5 - lower = true - upper = false - numeric = true - special = false -} - -module "install" { - source = "../softhsm_install" - - hosts = local.target - include_tools = true # make sure opensc is also installed as we need it to create keys -} - -module "initialize" { - source = "../softhsm_init" - depends_on = [module.install] - - hosts = local.target -} - -// Create our keys. Our stdout contains the requried the values for the pksc11 seal stanza -// as JSON. https://developer.hashicorp.com/vault/docs/configuration/seal/pkcs11#pkcs11-parameters -resource "enos_remote_exec" "create_keys" { - depends_on = [ - module.install, - module.initialize, - ] - - environment = { - AES_LABEL = local.aes_label - HMAC_LABEL = local.hmac_label - PIN = resource.random_string.pin.result - TOKEN_DIR = module.initialize.token_dir - TOKEN_LABEL = local.token - SO_PIN = resource.random_string.pin.result - } - - scripts = [abspath("${path.module}/scripts/create-keys.sh")] - - transport = { - ssh = { - host = var.hosts[0].public_ip - } - } -} - -// Get our softhsm token. Stdout is a base64 encoded gzipped tarball of the softhsm token dir. This -// allows us to pass around binary data inside of Terraform's type system. -resource "enos_remote_exec" "get_keys" { - depends_on = [enos_remote_exec.create_keys] - - environment = { - TOKEN_DIR = module.initialize.token_dir - } - - scripts = [abspath("${path.module}/scripts/get-keys.sh")] - - transport = { - ssh = { - host = var.hosts[0].public_ip - } - } -} - -output "seal_attributes" { - description = "Seal device specific attributes. Contains all required keys for the seal stanza" - value = local.seal_attributes -} - -output "token_base64" { - description = "The softhsm token and keys gzipped tarball in base64" - value = enos_remote_exec.get_keys.stdout -} - -output "token_dir" { - description = "The softhsm directory where tokens and keys are stored" - value = module.initialize.token_dir -} - -output "token_label" { - description = "The HSM slot token label" - value = local.token -} - -output "all_attributes" { - description = "Seal device specific attributes" - value = merge( - local.seal_attributes, - { - token_base64 = enos_remote_exec.get_keys.stdout, - token_dir = module.initialize.token_dir - }, - ) -} diff --git a/enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh b/enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh deleted file mode 100644 index 6518779..0000000 --- a/enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$AES_LABEL" ]] && fail "AES_LABEL env variable has not been set" -[[ -z "$HMAC_LABEL" ]] && fail "HMAC_LABEL env variable has not been set" -[[ -z "$PIN" ]] && fail "PIN env variable has not been set" -[[ -z "$SO_PIN" ]] && fail "SO_PIN env variable has not been set" -[[ -z "$TOKEN_LABEL" ]] && fail "TOKEN_LABEL env variable has not been set" -[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" - -if ! type softhsm2-util &> /dev/null; then - fail "unable to locate softhsm2-util in PATH. Have you installed softhsm?" -fi - -if ! type pkcs11-tool &> /dev/null; then - fail "unable to locate pkcs11-tool in PATH. Have you installed opensc?" -fi - -# Create an HSM slot and return the slot number in decimal value. -create_slot() { - sudo softhsm2-util --init-token --free --so-pin="$SO_PIN" --pin="$PIN" --label="$TOKEN_LABEL" | grep -oE '[0-9]+$' -} - -# Find the location of our softhsm shared object. -find_softhsm_so() { - sudo find /usr -type f -name libsofthsm2.so -print -quit -} - -# Create key a key in the slot. Args: module, key label, id number, key type -keygen() { - sudo pkcs11-tool --keygen --usage-sign --private --sensitive --usage-wrap \ - --module "$1" \ - -p "$PIN" \ - --token-label "$TOKEN_LABEL" \ - --label "$2" \ - --id "$3" \ - --key-type "$4" -} - -# Create our softhsm slot and keys -main() { - local slot - if ! slot=$(create_slot); then - fail "failed to create softhsm token slot" - fi - - local so - if ! so=$(find_softhsm_so); then - fail "unable to locate libsofthsm2.so shared object" - fi - - if ! keygen "$so" "$AES_LABEL" 1 'AES:32' 1>&2; then - fail "failed to create AES key" - fi - - if ! keygen "$so" "$HMAC_LABEL" 2 'GENERIC:32' 1>&2; then - fail "failed to create HMAC key" - fi - - # Return our seal configuration attributes as JSON - cat << EOF -{ - "lib": "${so}", - "slot": "${slot}", - "pin": "${PIN}", - "key_label": "${AES_LABEL}", - "hmac_key_label": "${HMAC_LABEL}", - "generate_key": "false" -} -EOF - exit 0 -} - -main diff --git a/enos/modules/softhsm_create_vault_keys/scripts/get-keys.sh b/enos/modules/softhsm_create_vault_keys/scripts/get-keys.sh deleted file mode 100644 index 953880f..0000000 --- a/enos/modules/softhsm_create_vault_keys/scripts/get-keys.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" - -# Tar up our token. We have to do this as a superuser because softhsm is owned by root. -sudo tar -czf token.tgz -C "$TOKEN_DIR" . -me="$(whoami)" -sudo chown "$me:$me" token.tgz - -# Write the value STDOUT as base64 so we can handle binary data as a string -base64 -i token.tgz diff --git a/enos/modules/softhsm_distribute_vault_keys/main.tf b/enos/modules/softhsm_distribute_vault_keys/main.tf deleted file mode 100644 index 0ccebe1..0000000 --- a/enos/modules/softhsm_distribute_vault_keys/main.tf +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.4.9" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The hosts for whom we'll distribute the softhsm tokens and keys" -} - -variable "token_base64" { - type = string - description = "The base64 encoded gzipped tarball of the softhsm token" -} - -locals { - // The user/group name for softhsm - softhsm_groups = { - "amzn" = "ods" - "rhel" = "ods" - "ubuntu" = "softhsm" - } - - // Determine if we should skip distribution. If we haven't been passed in a base64 token tarball - // we should short circuit the rest of the module. - skip = var.token_base64 == null || var.token_base64 == "" ? true : false -} - -module "install" { - // TODO: Should packages take a string instead of array so we can plan with unknown values that could change? - source = "../softhsm_install" - - hosts = var.hosts - include_tools = false # we don't need opensc on machines that did not create the HSM. -} - -module "initialize" { - source = "../softhsm_init" - depends_on = [module.install] - - hosts = var.hosts - skip = local.skip -} - -# In order for the vault service to access our keys we need to deal with ownership of files. Make -# sure we have a vault user on the machine if it doesn't already exist. Our distribution script -# below will handle adding vault to the "softhsm" group and setting ownership of the tokens. -resource "enos_user" "vault" { - for_each = var.hosts - - name = "vault" - home_dir = "/etc/vault.d" - shell = "/bin/false" - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -// Get the host information so we can ensure that the correct user/group is used for softhsm. -resource "enos_host_info" "hosts" { - for_each = var.hosts - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -// Distribute our softhsm token and keys to the given hosts. -resource "enos_remote_exec" "distribute_token" { - for_each = var.hosts - depends_on = [ - module.initialize, - enos_user.vault, - enos_host_info.hosts, - ] - - environment = { - TOKEN_BASE64 = var.token_base64 - TOKEN_DIR = module.initialize.token_dir - SOFTHSM_GROUP = local.softhsm_groups[enos_host_info.hosts[each.key].distro] - } - - scripts = [abspath("${path.module}/scripts/distribute-token.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -output "lib" { - value = module.install.lib -} diff --git a/enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh b/enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh deleted file mode 100644 index 3427991..0000000 --- a/enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -ex - -fail() { - echo "$1" 1>&2 - exit 1 -} - -# If we're not given keys we'll short circuit. This should only happen if we're skipping distribution -# because we haven't created a token or keys. -if [ -z "$TOKEN_BASE64" ]; then - echo "TOKEN_BASE64 environment variable was unset. Assuming we don't need to distribute our token" 1>&2 - exit 0 -fi - -[[ -z "$SOFTHSM_GROUP" ]] && fail "SOFTHSM_GROUP env variable has not been set" -[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" - -# Convert our base64 encoded gzipped tarball of the softhsm token back into a tarball. -base64 --decode - > token.tgz <<< "$TOKEN_BASE64" - -# Expand it. We assume it was written with the correct directory metadata. Do this as a superuser -# because the token directory should be owned by root. -sudo tar -xvf token.tgz -C "$TOKEN_DIR" - -# Make sure the vault user is in the softhsm group to get access to the tokens. -sudo usermod -aG "$SOFTHSM_GROUP" vault -sudo chown -R "vault:$SOFTHSM_GROUP" "$TOKEN_DIR" diff --git a/enos/modules/softhsm_init/main.tf b/enos/modules/softhsm_init/main.tf deleted file mode 100644 index edadca8..0000000 --- a/enos/modules/softhsm_init/main.tf +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.4.9" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The hosts for whom default softhsm configuration will be applied" -} - -variable "skip" { - type = bool - default = false - description = "Whether or not to skip initializing softhsm" -} - -locals { - // The location on disk to write the softhsm tokens to - token_dir = "/var/lib/softhsm/tokens" - - // Where the default configuration is - config_paths = { - "amzn" = "/etc/softhsm2.conf" - "rhel" = "/etc/softhsm2.conf" - "ubuntu" = "/etc/softhsm/softhsm2.conf" - } - - host_key = element(keys(enos_host_info.hosts), 0) - config_path = local.config_paths[enos_host_info.hosts[local.host_key].distro] -} - -resource "enos_host_info" "hosts" { - for_each = var.hosts - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -resource "enos_remote_exec" "init_softhsm" { - for_each = var.hosts - depends_on = [enos_host_info.hosts] - - environment = { - CONFIG_PATH = local.config_paths[enos_host_info.hosts[each.key].distro] - TOKEN_DIR = local.token_dir - SKIP = var.skip ? "true" : "false" - } - - scripts = [abspath("${path.module}/scripts/init-softhsm.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -output "config_path" { - // Technically this is actually just the first config path of our hosts. - value = local.config_path -} - -output "token_dir" { - value = local.token_dir -} - -output "skipped" { - value = var.skip -} diff --git a/enos/modules/softhsm_init/scripts/init-softhsm.sh b/enos/modules/softhsm_init/scripts/init-softhsm.sh deleted file mode 100644 index 3181d9e..0000000 --- a/enos/modules/softhsm_init/scripts/init-softhsm.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$CONFIG_PATH" ]] && fail "CONFIG_PATH env variable has not been set" -[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" -[[ -z "$SKIP" ]] && fail "SKIP env variable has not been set" - -if [ "$SKIP" == "true" ]; then - exit 0 -fi - -cat << EOF | sudo tee "$CONFIG_PATH" -directories.tokendir = $TOKEN_DIR -objectstore.backend = file -log.level = DEBUG -slots.removable = false -slots.mechanisms = ALL -library.reset_on_fork = false -EOF - -sudo mkdir -p "$TOKEN_DIR" -sudo chmod 0770 "$TOKEN_DIR" diff --git a/enos/modules/softhsm_install/main.tf b/enos/modules/softhsm_install/main.tf deleted file mode 100644 index ff0f497..0000000 --- a/enos/modules/softhsm_install/main.tf +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The hosts that will have access to the softhsm. We assume they're all the same platform and architecture" -} - -variable "include_tools" { - type = bool - default = false - description = "Install opensc pkcs11-tools along with softhsm" -} - -variable "retry_interval" { - type = string - default = "2" - description = "How long to wait between retries" -} - -variable "timeout" { - type = string - default = "15" - description = "How many seconds to wait before timing out" -} - -locals { - packages = var.include_tools ? { - // These packages match the distros that are currently defined in the `ec2_info` module. - amzn = { - "2023" = ["softhsm", "opensc"] - } - rhel = { - "8.10" = ["softhsm", "opensc"] - "9.5" = ["softhsm", "opensc"] - } - ubuntu = { - "20.04" = ["softhsm", "opensc"] - "22.04" = ["softhsm", "opensc"] - "24.04" = ["softhsm2", "opensc"] - } - } : { - amzn = { - "2023" = ["softhsm"] - } - rhel = { - "8.10" = ["softhsm"] - "9.5" = ["softhsm"] - } - ubuntu = { - "20.04" = ["softhsm"] - "22.04" = ["softhsm"] - "24.04" = ["softhsm2"] - } - } -} - -// Get the host information so we can ensure that we install the correct packages depending on the -// distro and distro version -resource "enos_host_info" "target" { - transport = { - ssh = { - host = var.hosts["0"].public_ip - } - } -} - -module "install_softhsm" { - source = "../install_packages" - - hosts = var.hosts - packages = local.packages[enos_host_info.target.distro][enos_host_info.target.distro_version] -} - -resource "enos_remote_exec" "find_shared_object" { - for_each = var.hosts - depends_on = [module.install_softhsm] - - environment = { - RETRY_INTERVAL = var.retry_interval - TIMEOUT_SECONDS = var.timeout - } - - scripts = [abspath("${path.module}/scripts/find-shared-object.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -locals { - object_paths = compact(distinct(values(enos_remote_exec.find_shared_object)[*].stdout)) -} - -output "lib" { - value = local.object_paths[0] - - precondition { - condition = length(local.object_paths) == 1 - error_message = "SoftHSM targets cannot have different libsofthsm2.so shared object paths. Are they all the same Linux distro?" - } -} diff --git a/enos/modules/softhsm_install/scripts/find-shared-object.sh b/enos/modules/softhsm_install/scripts/find-shared-object.sh deleted file mode 100644 index 52b720d..0000000 --- a/enos/modules/softhsm_install/scripts/find-shared-object.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -## Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" -[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" - -begin_time=$(date +%s) -end_time=$((begin_time + TIMEOUT_SECONDS)) -while [ "$(date +%s)" -lt "$end_time" ]; do - if so=$(sudo find /usr -type f -name libsofthsm2.so -print -quit); then - echo "$so" - exit 0 - fi - - sleep "$RETRY_INTERVAL" -done - -fail "Timed out trying to locate libsofthsm2.so shared object" diff --git a/enos/modules/start_vault/main.tf b/enos/modules/start_vault/main.tf deleted file mode 100644 index e2eec5f..0000000 --- a/enos/modules/start_vault/main.tf +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - # We need to specify the provider source in each module until we publish it - # to the public registry - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.5.3" - } - } -} - -locals { - api_addr_localhost = var.ip_version == 4 ? "http://127.0.0.1:${var.listener_port}" : "http://[::1]:${var.listener_port}" - api_addrs = tolist([for h in var.hosts : { - 4 : "http://${h.public_ip}:${var.listener_port}", - 6 : "http://[${h.ipv6}]:${var.listener_port}", - }]) - api_addrs_internal = tolist([for h in var.hosts : { - 4 : "http://${h.private_ip}:${var.listener_port}", - 6 : "http://[${h.ipv6}]:${var.listener_port}", - }]) - bin_path = "${var.install_dir}/vault" - cluster_addrs = tolist([for h in var.hosts : { - 4 : "http://${h.public_ip}:${var.cluster_port}", - 6 : "http://[${h.ipv6}]:${var.cluster_port}", - }]) - cluster_addrs_internal = tolist([for h in var.hosts : { - 4 : "http://${h.private_ip}:${var.cluster_port}", - 6 : "http://[${h.ipv6}]:${var.cluster_port}", - }]) - // In order to get Terraform to plan we have to use collections with keys that are known at plan - // time. Here we're creating locals that keep track of index values that point to our target hosts. - followers = toset(slice(local.instances, 1, length(local.instances))) - instances = [for idx in range(length(var.hosts)) : tostring(idx)] - leader = toset(slice(local.instances, 0, 1)) - listener_address = var.ip_version == 4 ? "0.0.0.0:${var.listener_port}" : "[::]:${var.listener_port}" - // Handle cases where we might have to distribute HSM tokens for the pkcs11 seal before starting - // vault. - token_base64 = try(lookup(var.seal_attributes, "token_base64", ""), "") - token_base64_secondary = try(lookup(var.seal_attributes_secondary, "token_base64", ""), "") - // This module currently supports up to two defined seals. Most of our locals logic here is for - // creating the correct seal configuration. - seals = { - primary = local.seal_primary - secondary = local.seal_secondary - } - seals_primary = { - awskms = { - type = "awskms" - attributes = merge( - { - name = var.seal_alias - priority = var.seal_priority - }, var.seal_attributes - ) - } - pkcs11 = { - type = "pkcs11" - attributes = merge( - { - name = var.seal_alias - priority = var.seal_priority - }, - // Strip out attributes that aren't supposed to be in seal stanza like our base64 encoded - // softhsm blob and the token directory. We'll also inject the shared object library - // location that we detect on the target machines. This allows use to create the token and - // keys on a machines that have different shared object locations. - merge( - try({ for key, val in var.seal_attributes : key => val if key != "token_base64" && key != "token_dir" }, {}), - # Note: the below reference has to point to a specific instance of the maybe_configure_hsm - # module (in this case [0]) due to the maybe_configure_hsm module call using `count` to control whether it runs or not. - try({ lib = module.maybe_configure_hsm[0].lib }, {}) - ), - ) - } - shamir = { - type = "shamir" - attributes = null - } - } - seal_primary = local.seals_primary[var.seal_type] - seals_secondary = { - awskms = { - type = "awskms" - attributes = merge( - { - name = var.seal_alias_secondary - priority = var.seal_priority_secondary - }, var.seal_attributes_secondary - ) - } - pkcs11 = { - type = "pkcs11" - attributes = merge( - { - name = var.seal_alias_secondary - priority = var.seal_priority_secondary - }, - merge( - try({ for key, val in var.seal_attributes_secondary : key => val if key != "token_base64" && key != "token_dir" }, {}), - # Note: the below reference has to point to a specific instance of the maybe_configure_hsm_secondary - # module (in this case [0]) due to the maybe_configure_hsm_secondary module call using `count` to control whether it runs or not. - try({ lib = module.maybe_configure_hsm_secondary[0].lib }, {}) - ), - ) - } - none = { - type = "none" - attributes = null - } - } - seal_secondary = local.seals_secondary[var.seal_type_secondary] - storage_address = var.ip_version == 4 ? "0.0.0.0:${var.external_storage_port}" : "[::]:${var.external_storage_port}" - storage_attributes = [for idx, host in var.hosts : (var.storage_backend == "raft" ? - merge( - { - node_id = "${var.storage_node_prefix}_${idx}" - }, - var.storage_backend_attrs - ) : - { - address = local.storage_address - path = "vault" - }) - ] - storage_retry_join = { - "raft" : { - auto_join : "provider=aws addr_type=${var.ip_version == 4 ? "private_v4" : "public_v6"} tag_key=${var.cluster_tag_key} tag_value=${var.cluster_name}", - auto_join_scheme : "http", - }, - } -} - -# You might be wondering why our start_vault module, which supports shamir, awskms, and pkcs11 seal -# types, contains sub-modules that are only used for HSM. Well, each of those seal devices has -# different requirements and as such we have some seal specific requirements before starting Vault. -# -# A Shamir seal key cannot exist until Vault has already started, so this modules responsibility for -# shamir seals is ensuring that the seal type is passed to the enos_vault_start resource. That's it. -# -# Auto-unseal with a KMS requires that we configure the enos_vault_start resource with the correct -# seal type and the attributes necessary to know which KMS key to use. Vault should automatically -# unseal if we've given it the correct configuration. As long as Vault is able to access the key -# in the KMS it should be able to start. That's normally done via roles associated to the target -# machines, which is outside the scope of this module. -# -# Auto-unseal with an HSM and PKCS#11 is more complicated because a shared object library, which is -# how we interface with the HSM, must be present on each node in order to start Vault. In the real -# world this means an actual HSM in the same rack or data center as every node in the Vault cluster, -# but in our case we're creating ephemeral infrastructure for these test scenarios and don't have a -# real HSM available. We could use CloudHSM or the like, but at the time of writing CloudHSM -# provisioning takes anywhere from 30 to 60 minutes and costs upwards of $2 dollars an hour. That's -# far too long and expensive for scenarios we'll run fairly frequently. Instead, we test using a -# software HSM. Using a software HSM solves the cost and speed problems but creates new set of -# problems. We need to ensure every node in the cluster has access to the same "HSM" and with -# softhsm that means the same software, configuration, tokens and keys. Our `seal_pkcs11` module -# takes care of creating the token and keys, but that's the end of the road for that module. It's -# our job to ensure that when we're starting Vault with a software HSM that we'll ensure the correct -# software, configuration and data are available on the nodes. That's where the following two -# modules come in. They handle installing the required software, configuring it, and distributing -# the key data that was passed in via seal attributes. -module "maybe_configure_hsm" { - source = "../softhsm_distribute_vault_keys" - count = (var.seal_type == "pkcs11" || var.seal_type_secondary == "pkcs11") ? 1 : 0 - - hosts = var.hosts - token_base64 = local.token_base64 -} - -module "maybe_configure_hsm_secondary" { - source = "../softhsm_distribute_vault_keys" - depends_on = [module.maybe_configure_hsm] - count = (var.seal_type == "pkcs11" || var.seal_type_secondary == "pkcs11") ? 1 : 0 - - hosts = var.hosts - token_base64 = local.token_base64_secondary -} - -resource "enos_vault_start" "leader" { - for_each = local.leader - depends_on = [ - module.maybe_configure_hsm_secondary, - ] - - bin_path = local.bin_path - config_dir = var.config_dir - config_mode = var.config_mode - environment = merge(var.environment, { - VAULT_DISABLE_MLOCK = var.disable_mlock - }) - config = { - api_addr = local.api_addrs_internal[tonumber(each.value)][var.ip_version] - cluster_addr = local.cluster_addrs_internal[tonumber(each.value)][var.ip_version] - cluster_name = var.cluster_name - listener = { - type = "tcp" - attributes = { - address = local.listener_address - tls_disable = "true" - } - } - log_level = var.log_level - storage = { - type = var.storage_backend - attributes = local.storage_attributes[each.key] - retry_join = try(local.storage_retry_join[var.storage_backend], null) - } - seals = local.seals - ui = true - } - license = var.license - manage_service = var.manage_service - username = var.service_username - unit_name = "vault" - - transport = { - ssh = { - host = var.hosts[each.value].public_ip - } - } -} - -resource "enos_vault_start" "followers" { - depends_on = [ - enos_vault_start.leader, - ] - for_each = local.followers - - bin_path = local.bin_path - config_dir = var.config_dir - config_mode = var.config_mode - environment = merge(var.environment, { - VAULT_DISABLE_MLOCK = var.disable_mlock - }) - config = { - api_addr = local.api_addrs_internal[tonumber(each.value)][var.ip_version] - cluster_addr = local.cluster_addrs_internal[tonumber(each.value)][var.ip_version] - cluster_name = var.cluster_name - listener = { - type = "tcp" - attributes = { - address = local.listener_address - tls_disable = "true" - } - } - log_level = var.log_level - storage = { - type = var.storage_backend - attributes = { for key, value in local.storage_attributes[each.key] : key => value } - retry_join = try(local.storage_retry_join[var.storage_backend], null) - } - seals = local.seals - ui = true - } - license = var.license - manage_service = var.manage_service - username = var.service_username - unit_name = "vault" - - transport = { - ssh = { - host = var.hosts[each.value].public_ip - } - } -} - -output "token_base64" { - value = local.token_base64 -} - -output "token_base64_secondary" { - value = local.token_base64_secondary -} diff --git a/enos/modules/start_vault/outputs.tf b/enos/modules/start_vault/outputs.tf deleted file mode 100644 index c20e7b8..0000000 --- a/enos/modules/start_vault/outputs.tf +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -output "api_addr_localhost" { - description = "The localhost API address" - value = local.api_addr_localhost -} - -output "api_addrs" { - description = "The external API addresses of all nodes the cluster" - value = local.api_addrs -} - -output "cluster_name" { - description = "The Vault cluster name" - value = var.cluster_name -} - -output "cluster_port" { - description = "The Vault cluster request forwarding listener port" - value = var.cluster_port -} - -output "external_storage_port" { - description = "The Vault cluster non-raft external storage port" - value = var.external_storage_port -} - -output "followers" { - description = "The follower enos_vault_start resources" - value = enos_vault_start.followers -} - -output "leader" { - description = "The leader enos_vault_start resource" - value = enos_vault_start.leader -} - -output "ipv6s" { - description = "Vault cluster target host ipv6s" - value = [for host in var.hosts : host.ipv6] -} - -output "listener_port" { - description = "The Vault cluster TCP listener port" - value = var.listener_port -} - -output "private_ips" { - description = "Vault cluster target host private_ips" - value = [for host in var.hosts : host.private_ip] -} - -output "public_ips" { - description = "Vault cluster target host public_ips" - value = [for host in var.hosts : host.public_ip] -} - -output "hosts" { - description = "The vault cluster instances that were created" - - value = var.hosts -} diff --git a/enos/modules/start_vault/variables.tf b/enos/modules/start_vault/variables.tf deleted file mode 100644 index 21d4a4e..0000000 --- a/enos/modules/start_vault/variables.tf +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "cluster_name" { - type = string - description = "The Vault cluster name" -} - -variable "cluster_port" { - type = number - description = "The cluster port for Vault to listen on" - default = 8201 -} - -variable "cluster_tag_key" { - type = string - description = "The Vault cluster tag key" - default = "retry_join" -} - -variable "config_dir" { - type = string - description = "The directory to use for Vault configuration" - default = "/etc/vault.d" -} - -variable "config_mode" { - description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." - default = "file" - - validation { - condition = contains(["env", "file"], var.config_mode) - error_message = "The config_mode must be either 'env' or 'file'. No other configuration modes are supported." - } -} - -variable "disable_mlock" { - type = bool - description = "Disable mlock for Vault process." - default = false -} - -variable "environment" { - description = "Optional Vault configuration environment variables to set starting Vault" - type = map(string) - default = null -} - -variable "external_storage_port" { - type = number - description = "The port to connect to when using external storage" - default = 8500 -} - -variable "hosts" { - description = "The target machines host addresses to use for the Vault cluster" - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) -} - -variable "install_dir" { - type = string - description = "The directory where the vault binary will be installed" - default = "/opt/vault/bin" -} - -variable "ip_version" { - type = number - description = "The IP version to use for the Vault TCP listeners" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "license" { - type = string - sensitive = true - description = "The value of the Vault license" - default = null -} - -variable "log_level" { - type = string - description = "The vault service log level" - default = "info" - - validation { - condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) - error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." - } -} - -variable "manage_service" { - type = bool - description = "Manage the Vault service users and systemd unit. Disable this to use configuration in RPM and Debian packages" - default = true -} - -variable "listener_port" { - type = number - description = "The port for Vault to listen on" - default = 8200 -} - -variable "seal_alias" { - type = string - description = "The primary seal alias name" - default = "primary" -} - -variable "seal_alias_secondary" { - type = string - description = "The secondary seal alias name" - default = "secondary" -} - -variable "seal_attributes" { - description = "The primary auto-unseal attributes" - default = null -} - -variable "seal_attributes_secondary" { - description = "The secondary auto-unseal attributes" - default = null -} - -variable "seal_priority" { - type = string - description = "The primary seal priority" - default = "1" -} - -variable "seal_priority_secondary" { - type = string - description = "The secondary seal priority" - default = "2" -} - -variable "seal_type" { - type = string - description = "The method by which to unseal the Vault cluster" - default = "awskms" - - validation { - condition = contains(["awskms", "pkcs11", "shamir"], var.seal_type) - error_message = "The seal_type must be either 'awskms', 'pkcs11', or 'shamir'. No other seal types are supported." - } -} - -variable "seal_type_secondary" { - type = string - description = "A secondary HA seal method. Only supported in Vault Enterprise >= 1.15" - default = "none" - - validation { - condition = contains(["awskms", "pkcs11", "none"], var.seal_type_secondary) - error_message = "The secondary_seal_type must be 'awskms', 'pkcs11' or 'none'. No other secondary seal types are supported." - } -} - -variable "service_username" { - type = string - description = "The host username to own the vault service" - default = "vault" -} - -variable "storage_backend" { - type = string - description = "The storage backend to use" - default = "raft" - - validation { - condition = contains(["raft", "consul"], var.storage_backend) - error_message = "The storage_backend must be either raft or consul. No other storage backends are supported." - } -} - -variable "storage_backend_attrs" { - type = map(any) - description = "An optional set of key value pairs to inject into the storage block" - default = {} -} - -variable "storage_node_prefix" { - type = string - description = "A prefix to use for each node in the Vault storage configuration" - default = "node" -} diff --git a/enos/modules/stop_vault/main.tf b/enos/modules/stop_vault/main.tf deleted file mode 100644 index 6dd477d..0000000 --- a/enos/modules/stop_vault/main.tf +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - # We need to specify the provider source in each module until we publish it - # to the public registry - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.4.0" - } - } -} - -variable "service_name" { - type = string - description = "The Vault systemd service name" - default = "vault" -} - -variable "hosts" { - description = "The target machines host addresses to use for the Vault cluster" - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) -} - -resource "enos_remote_exec" "shutdown_multiple_nodes" { - for_each = var.hosts - inline = ["sudo systemctl stop ${var.service_name}.service; sleep 5"] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/target_ec2_fleet/main.tf b/enos/modules/target_ec2_fleet/main.tf deleted file mode 100644 index 411d174..0000000 --- a/enos/modules/target_ec2_fleet/main.tf +++ /dev/null @@ -1,339 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - # We need to specify the provider source in each module until we publish it - # to the public registry - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.3.24" - } - } -} - -data "aws_vpc" "vpc" { - id = var.vpc_id -} - -data "aws_subnets" "vpc" { - filter { - name = "vpc-id" - values = [var.vpc_id] - } -} - -data "aws_iam_policy_document" "target" { - statement { - resources = ["*"] - - actions = [ - "ec2:DescribeInstances", - "secretsmanager:*" - ] - } - - dynamic "statement" { - for_each = var.seal_key_names - - content { - resources = [statement.value] - - actions = [ - "kms:DescribeKey", - "kms:ListKeys", - "kms:Encrypt", - "kms:Decrypt", - "kms:GenerateDataKey" - ] - } - } -} - -data "aws_iam_policy_document" "target_role" { - statement { - actions = ["sts:AssumeRole"] - - principals { - type = "Service" - identifiers = ["ec2.amazonaws.com"] - } - } -} - -data "enos_environment" "localhost" {} - -resource "random_string" "random_cluster_name" { - length = 8 - lower = true - upper = false - numeric = false - special = false -} - -resource "random_string" "unique_id" { - length = 4 - lower = true - upper = false - numeric = false - special = false -} - -// ec2:CreateFleet only allows up to 4 InstanceRequirements overrides so we can only ever request -// a fleet across 4 or fewer subnets if we want to bid with InstanceRequirements instead of -// weighted instance types. -resource "random_shuffle" "subnets" { - input = data.aws_subnets.vpc.ids - result_count = 4 -} - -locals { - spot_allocation_strategy = "lowestPrice" - on_demand_allocation_strategy = "lowestPrice" - instances = toset([for idx in range(var.instance_count) : tostring(idx)]) - cluster_name = coalesce(var.cluster_name, random_string.random_cluster_name.result) - name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" - fleet_tag = "${local.name_prefix}-spot-fleet-target" - fleet_tags = { - Name = "${local.name_prefix}-${var.cluster_tag_key}-target" - "${var.cluster_tag_key}" = local.cluster_name - Fleet = local.fleet_tag - } -} - -resource "aws_iam_role" "target" { - name = "${local.name_prefix}-target-role" - assume_role_policy = data.aws_iam_policy_document.target_role.json -} - -resource "aws_iam_instance_profile" "target" { - name = "${local.name_prefix}-target-profile" - role = aws_iam_role.target.name -} - -resource "aws_iam_role_policy" "target" { - name = "${local.name_prefix}-target-policy" - role = aws_iam_role.target.id - policy = data.aws_iam_policy_document.target.json -} - -resource "aws_security_group" "target" { - name = "${local.name_prefix}-target" - description = "Target instance security group" - vpc_id = var.vpc_id - - # SSH traffic - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - ]) - } - - # Vault traffic - ingress { - from_port = 8200 - to_port = 8201 - protocol = "tcp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - formatlist("%s/32", var.ssh_allow_ips) - ]) - } - - # Consul traffic - ingress { - from_port = 8300 - to_port = 8302 - protocol = "tcp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - ]) - } - - ingress { - from_port = 8301 - to_port = 8302 - protocol = "udp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - ]) - } - - ingress { - from_port = 8500 - to_port = 8503 - protocol = "tcp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - ]) - } - - ingress { - from_port = 8600 - to_port = 8600 - protocol = "tcp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - ]) - } - - ingress { - from_port = 8600 - to_port = 8600 - protocol = "udp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - ]) - } - - # Internal traffic - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - self = true - } - - # External traffic - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags = merge( - var.common_tags, - { - Name = "${local.name_prefix}-sg" - }, - ) -} - -resource "aws_launch_template" "target" { - name = "${local.name_prefix}-target" - image_id = var.ami_id - key_name = var.ssh_keypair - - iam_instance_profile { - name = aws_iam_instance_profile.target.name - } - - instance_requirements { - burstable_performance = "included" - - memory_mib { - min = var.instance_mem_min - max = var.instance_mem_max - } - - vcpu_count { - min = var.instance_cpu_min - max = var.instance_cpu_max - } - } - - network_interfaces { - associate_public_ip_address = true - delete_on_termination = true - security_groups = [aws_security_group.target.id] - } - - tag_specifications { - resource_type = "instance" - - tags = merge( - var.common_tags, - local.fleet_tags, - ) - } -} - -# There are three primary knobs we can turn to try and optimize our costs by -# using a spot fleet: our min and max instance requirements, our max bid -# price, and the allocation strategy to use when fulfilling the spot request. -# We've currently configured our instance requirements to allow for anywhere -# from 2-4 vCPUs and 4-16GB of RAM. We intentionally have a wide range -# to allow for a large instance size pool to be considered. Our next knob is our -# max bid price. As we're using spot fleets to save on instance cost, we never -# want to pay more for an instance than we were on-demand. We've set the max price -# to equal what we pay for t3.medium instances on-demand, which are the smallest -# reliable size for Vault scenarios. The final knob is the allocation strategy -# that AWS will use when looking for instances that meet our resource and cost -# requirements. We're using the "lowestPrice" strategy to get the absolute -# cheapest machines that will fit the requirements, but it comes with a slightly -# higher capacity risk than say, "capacityOptimized" or "priceCapacityOptimized". -# Unless we see capacity issues or instances being shut down then we ought to -# stick with that strategy. -resource "aws_ec2_fleet" "targets" { - replace_unhealthy_instances = false - terminate_instances = true // terminate instances when we "delete" the fleet - terminate_instances_with_expiration = false - tags = merge( - var.common_tags, - local.fleet_tags, - ) - type = "instant" // make a synchronous request for the entire fleet - - launch_template_config { - launch_template_specification { - launch_template_id = aws_launch_template.target.id - version = aws_launch_template.target.latest_version - } - - dynamic "override" { - for_each = random_shuffle.subnets.result - - content { - subnet_id = override.value - } - } - } - - on_demand_options { - allocation_strategy = local.on_demand_allocation_strategy - max_total_price = (var.max_price * var.instance_count) - min_target_capacity = var.capacity_type == "on-demand" ? var.instance_count : null - // One of these has to be set to enforce our on-demand target capacity minimum - single_availability_zone = false - single_instance_type = true - } - - spot_options { - allocation_strategy = local.spot_allocation_strategy - // The instance_pools_to_use_count is only valid for the allocation_strategy - // lowestPrice. When we are using that strategy we'll want to always set it - // to non-zero to avoid rebuilding the fleet on a re-run. For any other strategy - // set it to zero to avoid rebuilding the fleet on a re-run. - instance_pools_to_use_count = local.spot_allocation_strategy == "lowestPrice" ? 1 : null - } - - // Try and provision only spot instances and fall back to on-demand. - target_capacity_specification { - default_target_capacity_type = var.capacity_type - spot_target_capacity = var.capacity_type == "spot" ? var.instance_count : 0 - on_demand_target_capacity = var.capacity_type == "on-demand" ? var.instance_count : 0 - target_capacity_unit_type = "units" // units == instance count - total_target_capacity = var.instance_count - } -} - -data "aws_instance" "targets" { - depends_on = [ - aws_ec2_fleet.targets, - ] - for_each = local.instances - - instance_id = aws_ec2_fleet.targets.fleet_instance_set[0].instance_ids[each.key] - -} diff --git a/enos/modules/target_ec2_fleet/outputs.tf b/enos/modules/target_ec2_fleet/outputs.tf deleted file mode 100644 index 505db0e..0000000 --- a/enos/modules/target_ec2_fleet/outputs.tf +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -output "cluster_name" { - value = local.cluster_name -} - -output "hosts" { - description = "The ec2 fleet target hosts" - value = { for idx in range(var.instance_count) : idx => { - public_ip = data.aws_instance.targets[idx].public_ip - private_ip = data.aws_instance.targets[idx].private_ip - ipv6 = try(data.aws_instance.targets[idx].ipv6_addresses[0], null) - } } -} diff --git a/enos/modules/target_ec2_fleet/variables.tf b/enos/modules/target_ec2_fleet/variables.tf deleted file mode 100644 index f0eb87b..0000000 --- a/enos/modules/target_ec2_fleet/variables.tf +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "ami_id" { - description = "The machine image identifier" - type = string -} - -variable "cluster_name" { - type = string - description = "A unique cluster identifier" - default = null -} - -variable "cluster_tag_key" { - type = string - description = "The key name for the cluster tag" - default = "TargetCluster" -} - -variable "common_tags" { - description = "Common tags for cloud resources" - type = map(string) - default = { - Project = "vault-ci" - } -} - -variable "disable_selinux" { - description = "Optionally disable SELinux for certain distros/versions" - type = bool - default = true -} - -variable "instance_mem_min" { - description = "The minimum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" - type = number - default = 4096 // ~4 GB -} - -variable "instance_mem_max" { - description = "The maximum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" - type = number - default = 16385 // ~16 GB -} - -variable "instance_cpu_min" { - description = "The minimum number of vCPU's for each instance in the fleet" - type = number - default = 2 -} - -variable "instance_cpu_max" { - description = "The maximum number of vCPU's for each instance in the fleet" - type = number - default = 8 // Unlikely we'll ever get that high due to spot price bid protection -} - -variable "instance_count" { - description = "The number of target instances to create" - type = number - default = 3 -} - -variable "max_price" { - description = "The maximum hourly price to pay for each target instance" - type = string - default = "0.0416" -} - -variable "project_name" { - description = "A unique project name" - type = string -} - -variable "seal_key_names" { - type = list(string) - description = "The key management seal key names" - default = null -} - -variable "ssh_allow_ips" { - description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" - type = list(string) - default = [] -} - -variable "ssh_keypair" { - description = "SSH keypair used to connect to EC2 instances" - type = string -} - -variable "capacity_type" { - description = "What capacity type to use for EC2 instances" - type = string - default = "on-demand" - - validation { - condition = contains(["on-demand", "spot"], var.capacity_type) - error_message = "The capacity_type must be either 'on-demand' or 'spot'." - } -} - -variable "vpc_id" { - description = "The identifier of the VPC where the target instances will be created" - type = string -} diff --git a/enos/modules/target_ec2_shim/main.tf b/enos/modules/target_ec2_shim/main.tf deleted file mode 100644 index c755668..0000000 --- a/enos/modules/target_ec2_shim/main.tf +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - # We need to specify the provider source in each module until we publish it - # to the public registry - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.3.24" - } - } -} - -variable "ami_id" { default = null } -variable "cluster_name" { default = null } -variable "cluster_tag_key" { default = null } -variable "common_tags" { default = null } -variable "disable_selinux" { default = true } -variable "instance_count" { default = 3 } -variable "instance_cpu_max" { default = null } -variable "instance_cpu_min" { default = null } -variable "instance_mem_max" { default = null } -variable "instance_mem_min" { default = null } -variable "instance_types" { default = null } -variable "max_price" { default = null } -variable "ports_ingress" { default = null } -variable "project_name" { default = null } -variable "seal_key_names" { default = null } -variable "ssh_allow_ips" { default = null } -variable "ssh_keypair" { default = null } -variable "vpc_id" { default = null } - -resource "random_string" "cluster_name" { - length = 8 - lower = true - upper = false - numeric = false - special = false -} - -output "cluster_name" { - value = coalesce(var.cluster_name, random_string.cluster_name.result) -} - -output "hosts" { - value = { for idx in range(var.instance_count) : idx => { - public_ip = "null-public-${idx}" - private_ip = "null-private-${idx}" - ipv6 = "null-ipv6-${idx}" - } } -} diff --git a/enos/modules/target_ec2_spot_fleet/main.tf b/enos/modules/target_ec2_spot_fleet/main.tf deleted file mode 100644 index 4a76274..0000000 --- a/enos/modules/target_ec2_spot_fleet/main.tf +++ /dev/null @@ -1,466 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - # We need to specify the provider source in each module until we publish it - # to the public registry - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.3.24" - } - } -} - -data "aws_vpc" "vpc" { - id = var.vpc_id -} - -data "aws_subnets" "vpc" { - filter { - name = "vpc-id" - values = [var.vpc_id] - } -} - -data "aws_iam_policy_document" "target" { - statement { - resources = ["*"] - - actions = [ - "ec2:DescribeInstances", - "secretsmanager:*" - ] - } - - dynamic "statement" { - for_each = var.seal_key_names - - content { - resources = [statement.value] - - actions = [ - "kms:DescribeKey", - "kms:ListKeys", - "kms:Encrypt", - "kms:Decrypt", - "kms:GenerateDataKey" - ] - } - } -} - -data "aws_iam_policy_document" "target_role" { - statement { - actions = ["sts:AssumeRole"] - - principals { - type = "Service" - identifiers = ["ec2.amazonaws.com"] - } - } -} - -data "aws_iam_policy_document" "fleet" { - statement { - resources = ["*"] - - actions = [ - "ec2:DescribeImages", - "ec2:DescribeSubnets", - "ec2:RequestSpotInstances", - "ec2:TerminateInstances", - "ec2:DescribeInstanceStatus", - "ec2:CancelSpotFleetRequests", - "ec2:CreateTags", - "ec2:RunInstances", - "ec2:StartInstances", - "ec2:StopInstances", - ] - } - - statement { - effect = "Deny" - - resources = [ - "arn:aws:ec2:*:*:instance/*", - ] - - actions = [ - "ec2:RunInstances", - ] - - condition { - test = "StringNotEquals" - variable = "ec2:InstanceMarketType" - values = ["spot"] - } - } - - statement { - resources = ["*"] - - actions = [ - "iam:PassRole", - ] - - condition { - test = "StringEquals" - variable = "iam:PassedToService" - values = [ - "ec2.amazonaws.com", - ] - } - } - - statement { - resources = [ - "arn:aws:elasticloadbalancing:*:*:loadbalancer/*", - ] - - actions = [ - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - ] - } - - statement { - resources = [ - "arn:aws:elasticloadbalancing:*:*:*/*" - ] - - actions = [ - "elasticloadbalancing:RegisterTargets" - ] - } -} - -data "aws_iam_policy_document" "fleet_role" { - statement { - actions = ["sts:AssumeRole"] - - principals { - type = "Service" - identifiers = ["spotfleet.amazonaws.com"] - } - } -} - -data "enos_environment" "localhost" {} - -resource "random_string" "random_cluster_name" { - length = 8 - lower = true - upper = false - numeric = false - special = false -} - -resource "random_string" "unique_id" { - length = 4 - lower = true - upper = false - numeric = false - special = false -} - -// ec2:RequestSpotFleet only allows up to 4 InstanceRequirements overrides so we can only ever -// request a fleet across 4 or fewer subnets if we want to bid with InstanceRequirements instead of -// weighted instance types. -resource "random_shuffle" "subnets" { - input = data.aws_subnets.vpc.ids - result_count = 4 -} - -locals { - allocation_strategy = "lowestPrice" - instances = toset([for idx in range(var.instance_count) : tostring(idx)]) - cluster_name = coalesce(var.cluster_name, random_string.random_cluster_name.result) - name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" - fleet_tag = "${local.name_prefix}-spot-fleet-target" - fleet_tags = { - Name = "${local.name_prefix}-${var.cluster_tag_key}-target" - "${var.cluster_tag_key}" = local.cluster_name - Fleet = local.fleet_tag - } -} - -resource "aws_iam_role" "target" { - name = "${local.name_prefix}-target-role" - assume_role_policy = data.aws_iam_policy_document.target_role.json -} - -resource "aws_iam_instance_profile" "target" { - name = "${local.name_prefix}-target-profile" - role = aws_iam_role.target.name -} - -resource "aws_iam_role_policy" "target" { - name = "${local.name_prefix}-target-policy" - role = aws_iam_role.target.id - policy = data.aws_iam_policy_document.target.json -} - -resource "aws_iam_role" "fleet" { - name = "${local.name_prefix}-fleet-role" - assume_role_policy = data.aws_iam_policy_document.fleet_role.json -} - -resource "aws_iam_role_policy" "fleet" { - name = "${local.name_prefix}-fleet-policy" - role = aws_iam_role.fleet.id - policy = data.aws_iam_policy_document.fleet.json -} - -resource "aws_security_group" "target" { - name = "${local.name_prefix}-target" - description = "Target instance security group" - vpc_id = var.vpc_id - - # SSH traffic - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - ]) - } - - # Vault traffic - ingress { - from_port = 8200 - to_port = 8201 - protocol = "tcp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - formatlist("%s/32", var.ssh_allow_ips) - ]) - } - - # Consul traffic - ingress { - from_port = 8300 - to_port = 8302 - protocol = "tcp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - ]) - } - - ingress { - from_port = 8301 - to_port = 8302 - protocol = "udp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - ]) - } - - ingress { - from_port = 8500 - to_port = 8503 - protocol = "tcp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - ]) - } - - ingress { - from_port = 8600 - to_port = 8600 - protocol = "tcp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - ]) - } - - ingress { - from_port = 8600 - to_port = 8600 - protocol = "udp" - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - ]) - } - - # Internal traffic - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - self = true - } - - # External traffic - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - tags = merge( - var.common_tags, - { - Name = "${local.name_prefix}-sg" - }, - ) -} - -resource "aws_launch_template" "target" { - name = "${local.name_prefix}-target" - image_id = var.ami_id - instance_type = null - key_name = var.ssh_keypair - - iam_instance_profile { - name = aws_iam_instance_profile.target.name - } - - instance_requirements { - burstable_performance = "included" - - memory_mib { - min = var.instance_mem_min - max = var.instance_mem_max - } - - vcpu_count { - min = var.instance_cpu_min - max = var.instance_cpu_max - } - } - - network_interfaces { - associate_public_ip_address = true - delete_on_termination = true - security_groups = [aws_security_group.target.id] - } - - tag_specifications { - resource_type = "instance" - - tags = merge( - var.common_tags, - local.fleet_tags, - ) - } -} - -# There are three primary knobs we can turn to try and optimize our costs by -# using a spot fleet: our min and max instance requirements, our max bid -# price, and the allocation strategy to use when fulfilling the spot request. -# We've currently configured our instance requirements to allow for anywhere -# from 2-4 vCPUs and 4-16GB of RAM. We intentionally have a wide range -# to allow for a large instance size pool to be considered. Our next knob is our -# max bid price. As we're using spot fleets to save on instance cost, we never -# want to pay more for an instance than we were on-demand. We've set the max price -# to equal what we pay for t3.medium instances on-demand, which are the smallest -# reliable size for Vault scenarios. The final knob is the allocation strategy -# that AWS will use when looking for instances that meet our resource and cost -# requirements. We're using the "lowestPrice" strategy to get the absolute -# cheapest machines that will fit the requirements, but it comes with a slightly -# higher capacity risk than say, "capacityOptimized" or "priceCapacityOptimized". -# Unless we see capacity issues or instances being shut down then we ought to -# stick with that strategy. -resource "aws_spot_fleet_request" "targets" { - allocation_strategy = local.allocation_strategy - fleet_type = "request" - iam_fleet_role = aws_iam_role.fleet.arn - // The instance_pools_to_use_count is only valid for the allocation_strategy - // lowestPrice. When we are using that strategy we'll want to always set it - // to 1 to avoid rebuilding the fleet on a re-run. For any other strategy - // set it to zero to avoid rebuilding the fleet on a re-run. - instance_pools_to_use_count = local.allocation_strategy == "lowestPrice" ? 1 : 0 - spot_price = var.max_price - target_capacity = var.instance_count - terminate_instances_on_delete = true - wait_for_fulfillment = true - - launch_template_config { - launch_template_specification { - id = aws_launch_template.target.id - version = aws_launch_template.target.latest_version - } - - // We cannot currently use more than one subnet[0]. Until the bug has been resolved - // we'll choose a random subnet. It would be ideal to bid across all subnets to get - // the absolute cheapest available at the time of bidding. - // - // [0] https://github.com/hashicorp/terraform-provider-aws/issues/30505 - - /* - dynamic "overrides" { - for_each = random_shuffle.subnets.result - - content { - subnet_id = overrides.value - } - } - */ - - overrides { - subnet_id = random_shuffle.subnets.result[0] - } - } - - tags = merge( - var.common_tags, - local.fleet_tags, - ) -} - -resource "time_sleep" "wait_for_fulfillment" { - depends_on = [aws_spot_fleet_request.targets] - create_duration = "2s" -} - -data "aws_instances" "targets" { - depends_on = [ - time_sleep.wait_for_fulfillment, - aws_spot_fleet_request.targets, - ] - - instance_tags = local.fleet_tags - instance_state_names = [ - "pending", - "running", - ] - - filter { - name = "image-id" - values = [var.ami_id] - } - - filter { - name = "iam-instance-profile.arn" - values = [aws_iam_instance_profile.target.arn] - } -} - -data "aws_instance" "targets" { - depends_on = [ - aws_spot_fleet_request.targets, - data.aws_instances.targets - ] - for_each = local.instances - - instance_id = data.aws_instances.targets.ids[each.key] -} - -module "disable_selinux" { - source = "../disable_selinux" - count = var.disable_selinux == true ? 1 : 0 - - hosts = { for idx in range(var.instance_count) : idx => { - public_ip = aws_instance.targets[idx].public_ip - private_ip = aws_instance.targets[idx].private_ip - } } -} diff --git a/enos/modules/target_ec2_spot_fleet/outputs.tf b/enos/modules/target_ec2_spot_fleet/outputs.tf deleted file mode 100644 index 505db0e..0000000 --- a/enos/modules/target_ec2_spot_fleet/outputs.tf +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -output "cluster_name" { - value = local.cluster_name -} - -output "hosts" { - description = "The ec2 fleet target hosts" - value = { for idx in range(var.instance_count) : idx => { - public_ip = data.aws_instance.targets[idx].public_ip - private_ip = data.aws_instance.targets[idx].private_ip - ipv6 = try(data.aws_instance.targets[idx].ipv6_addresses[0], null) - } } -} diff --git a/enos/modules/target_ec2_spot_fleet/variables.tf b/enos/modules/target_ec2_spot_fleet/variables.tf deleted file mode 100644 index af6c0dc..0000000 --- a/enos/modules/target_ec2_spot_fleet/variables.tf +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "ami_id" { - description = "The machine image identifier" - type = string -} - -variable "cluster_name" { - type = string - description = "A unique cluster identifier" - default = null -} - -variable "cluster_tag_key" { - type = string - description = "The key name for the cluster tag" - default = "TargetCluster" -} - -variable "common_tags" { - description = "Common tags for cloud resources" - type = map(string) - default = { - Project = "Vault" - } -} - -variable "disable_selinux" { - description = "Optionally disable SELinux for certain distros/versions" - type = bool - default = true -} - -variable "instance_mem_min" { - description = "The minimum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" - type = number - default = 4096 // ~4 GB -} - -variable "instance_mem_max" { - description = "The maximum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" - type = number - default = 16385 // ~16 GB -} - -variable "instance_cpu_min" { - description = "The minimum number of vCPU's for each instance in the fleet" - type = number - default = 2 -} - -variable "instance_cpu_max" { - description = "The maximum number of vCPU's for each instance in the fleet" - type = number - default = 8 // Unlikely we'll ever get that high due to spot price bid protection -} - -variable "instance_count" { - description = "The number of target instances to create" - type = number - default = 3 -} - -variable "project_name" { - description = "A unique project name" - type = string -} - -variable "max_price" { - description = "The maximum hourly price to pay for each target instance" - type = string - default = "0.0416" -} - -variable "seal_key_names" { - type = list(string) - description = "The key management seal key names" - default = null -} - -variable "ssh_allow_ips" { - description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" - type = list(string) - default = [] -} - -variable "ssh_keypair" { - description = "SSH keypair used to connect to EC2 instances" - type = string -} - -variable "vpc_id" { - description = "The identifier of the VPC where the target instances will be created" - type = string -} diff --git a/enos/modules/vault_agent/main.tf b/enos/modules/vault_agent/main.tf deleted file mode 100644 index e5d1966..0000000 --- a/enos/modules/vault_agent/main.tf +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "ip_version" { - type = number - default = 4 - description = "The IP version to use for the Vault TCP listeners" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_agent_port" { - type = number - description = "The listener port number for the Vault Agent" -} - -variable "vault_agent_template_destination" { - type = string - description = "The destination of the template rendered by Agent" -} - -variable "vault_agent_template_contents" { - type = string - description = "The template contents to be rendered by Agent" -} - -variable "vault_root_token" { - type = string - description = "The Vault root token" -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The Vault cluster instances that were created" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -locals { - agent_listen_addr = "${var.ip_version == 4 ? "127.0.0.1" : "[::1]"}:${var.vault_agent_port}" -} - -resource "enos_remote_exec" "set_up_approle_auth_and_agent" { - environment = { - AGENT_LISTEN_ADDR = local.agent_listen_addr, - VAULT_ADDR = var.vault_addr, - VAULT_INSTALL_DIR = var.vault_install_dir, - VAULT_TOKEN = var.vault_root_token, - VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination, - VAULT_AGENT_TEMPLATE_CONTENTS = var.vault_agent_template_contents, - } - - scripts = [abspath("${path.module}/scripts/set-up-approle-and-agent.sh")] - - transport = { - ssh = { - host = var.hosts[0].public_ip - } - } -} - -output "vault_agent_listen_addr" { - description = "The vault agent listen address" - value = local.agent_listen_addr -} diff --git a/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh b/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh deleted file mode 100644 index 6af219a..0000000 --- a/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - return 1 -} - -[[ -z "$AGENT_LISTEN_ADDR" ]] && fail "AGENT_LISTEN_ADDR env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_AGENT_TEMPLATE_CONTENTS" ]] && fail "VAULT_AGENT_TEMPLATE_CONTENTS env variable has not been set" -[[ -z "$VAULT_AGENT_TEMPLATE_DESTINATION" ]] && fail "VAULT_AGENT_TEMPLATE_DESTINATION env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) -$binpath auth disable approle || true - -$binpath auth enable approle - -$binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000 - -ROLEID=$($binpath read --format=json auth/approle/role/agent-role/role-id | jq -r '.data.role_id') - -if [[ "$ROLEID" == '' ]]; then - fail "expected ROLEID to be nonempty, but it is empty" -fi - -SECRETID=$($binpath write -f --format=json auth/approle/role/agent-role/secret-id | jq -r '.data.secret_id') - -if [[ "$SECRETID" == '' ]]; then - fail "expected SECRETID to be nonempty, but it is empty" -fi - -echo "$ROLEID" > /tmp/role-id -echo "$SECRETID" > /tmp/secret-id - -cat > /tmp/vault-agent.hcl <<- EOM -pid_file = "/tmp/pidfile" - -vault { - address = "${VAULT_ADDR}" - tls_skip_verify = true - retry { - num_retries = 10 - } -} - -cache { - enforce_consistency = "always" - use_auto_auth_token = true -} - -listener "tcp" { - address = "${AGENT_LISTEN_ADDR}" - tls_disable = true -} - -template { - destination = "${VAULT_AGENT_TEMPLATE_DESTINATION}" - contents = "${VAULT_AGENT_TEMPLATE_CONTENTS}" - exec { - command = "pkill -F /tmp/pidfile" - } -} - -auto_auth { - method { - type = "approle" - config = { - role_id_file_path = "/tmp/role-id" - secret_id_file_path = "/tmp/secret-id" - } - } - sink { - type = "file" - config = { - path = "/tmp/token" - } - } -} -EOM - -# If Agent is still running from a previous run, kill it -pkill -F /tmp/pidfile || true - -# If the template file already exists, remove it -rm "${VAULT_AGENT_TEMPLATE_DESTINATION}" || true - -# Run agent (it will kill itself when it finishes rendering the template) -if ! $binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1; then - fail "failed to run vault agent: $(cat /tmp/agent-logs.txt)" -fi diff --git a/enos/modules/vault_cluster/main.tf b/enos/modules/vault_cluster/main.tf deleted file mode 100644 index a70ab69..0000000 --- a/enos/modules/vault_cluster/main.tf +++ /dev/null @@ -1,414 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - # We need to specify the provider source in each module until we publish it - # to the public registry - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.4.0" - } - } -} - -data "enos_environment" "localhost" {} - -locals { - audit_device_file_path = "/var/log/vault/vault_audit.log" - audit_socket_port = "9090" - bin_path = "${var.install_dir}/vault" - consul_bin_path = "${var.consul_install_dir}/consul" - enable_audit_devices = var.enable_audit_devices && var.initialize_cluster - disable_mlock = false - // In order to get Terraform to plan we have to use collections with keys - // that are known at plan time. In order for our module to work our var.hosts - // must be a map with known keys at plan time. Here we're creating locals - // that keep track of index values that point to our target hosts. - followers = toset(slice(local.instances, 1, length(local.instances))) - instances = [for idx in range(length(var.hosts)) : tostring(idx)] - key_shares = { - "awskms" = null - "shamir" = 5 - "pkcs11" = null - } - key_threshold = { - "awskms" = null - "shamir" = 3 - "pkcs11" = null - } - leader = toset(slice(local.instances, 0, 1)) - netcat_command = { - amzn = "nc" - opensuse-leap = "netcat" - rhel = "nc" - sles = "nc" - ubuntu = "netcat" - } - recovery_shares = { - "awskms" = 5 - "shamir" = null - "pkcs11" = 5 - } - recovery_threshold = { - "awskms" = 3 - "shamir" = null - "pkcs11" = 3 - } - vault_service_user = "vault" -} - -resource "enos_host_info" "hosts" { - for_each = var.hosts - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -resource "enos_bundle_install" "consul" { - for_each = { - for idx, host in var.hosts : idx => var.hosts[idx] - if var.storage_backend == "consul" - } - - destination = var.consul_install_dir - release = merge(var.consul_release, { product = "consul" }) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -# We run install_packages before we install Vault because for some combinations of -# certain Linux distros and artifact types (e.g. SLES and RPM packages), there may -# be packages that are required to perform Vault installation (e.g. openssl). -module "install_packages" { - source = "../install_packages" - - hosts = var.hosts - packages = var.packages -} - -resource "enos_bundle_install" "vault" { - for_each = var.hosts - depends_on = [ - module.install_packages, // Don't race for the package manager locks with install_packages - ] - - destination = var.install_dir - release = var.release == null ? var.release : merge({ product = "vault" }, var.release) - artifactory = var.artifactory_release - path = var.local_artifact_path - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -resource "enos_consul_start" "consul" { - for_each = enos_bundle_install.consul - - bin_path = local.consul_bin_path - data_dir = var.consul_data_dir - config = { - # GetPrivateInterfaces is a go-sockaddr template that helps Consul get the correct - # addr in all of our default cases. This is required in the case of Amazon Linux, - # because amzn has a default docker listener that will make Consul try to use the - # incorrect addr. - bind_addr = "{{ GetPrivateInterfaces | include \"type\" \"IP\" | sort \"default\" | limit 1 | attr \"address\"}}" - data_dir = var.consul_data_dir - datacenter = "dc1" - retry_join = ["provider=aws tag_key=${var.backend_cluster_tag_key} tag_value=${var.backend_cluster_name}"] - server = false - bootstrap_expect = 0 - license = var.consul_license - log_level = var.consul_log_level - log_file = var.consul_log_file - } - license = var.consul_license - unit_name = "consul" - username = "consul" - - transport = { - ssh = { - host = var.hosts[each.key].public_ip - } - } -} - -module "start_vault" { - source = "../start_vault" - - depends_on = [ - enos_consul_start.consul, - module.install_packages, - enos_bundle_install.vault, - ] - - cluster_name = var.cluster_name - cluster_port = var.cluster_port - cluster_tag_key = var.cluster_tag_key - config_dir = var.config_dir - config_mode = var.config_mode - disable_mlock = local.disable_mlock - external_storage_port = var.external_storage_port - hosts = var.hosts - install_dir = var.install_dir - ip_version = var.ip_version - license = var.license - listener_port = var.listener_port - log_level = var.log_level - manage_service = var.manage_service - seal_attributes = var.seal_attributes - seal_attributes_secondary = var.seal_attributes_secondary - seal_type = var.seal_type - seal_type_secondary = var.seal_type_secondary - service_username = local.vault_service_user - storage_backend = var.storage_backend - storage_backend_attrs = var.storage_backend_addl_config - storage_node_prefix = var.storage_node_prefix -} - -resource "enos_vault_init" "leader" { - depends_on = [ - module.start_vault, - ] - for_each = toset([ - for idx, leader in local.leader : leader - if var.initialize_cluster - ]) - - bin_path = local.bin_path - vault_addr = module.start_vault.leader[0].config.api_addr - - key_shares = local.key_shares[var.seal_type] - key_threshold = local.key_threshold[var.seal_type] - - recovery_shares = local.recovery_shares[var.seal_type] - recovery_threshold = local.recovery_threshold[var.seal_type] - - transport = { - ssh = { - host = var.hosts[each.value].public_ip - } - } -} - -resource "enos_vault_unseal" "leader" { - depends_on = [ - module.start_vault, - enos_vault_init.leader, - ] - for_each = enos_vault_init.leader // only unseal the leader if we initialized it - - bin_path = local.bin_path - vault_addr = module.start_vault.leader[each.key].config.api_addr - seal_type = var.seal_type - unseal_keys = var.seal_type != "shamir" ? null : coalesce(var.shamir_unseal_keys, enos_vault_init.leader[0].unseal_keys_hex) - - transport = { - ssh = { - host = var.hosts[tolist(local.leader)[0]].public_ip - } - } -} - -resource "enos_vault_unseal" "followers" { - depends_on = [ - enos_vault_init.leader, - enos_vault_unseal.leader, - ] - // Only unseal followers if we're not using an auto-unseal method and we've - // initialized the cluster - for_each = toset([ - for idx, follower in local.followers : follower - if var.seal_type == "shamir" && var.initialize_cluster - ]) - - bin_path = local.bin_path - vault_addr = module.start_vault.followers[each.key].config.api_addr - seal_type = var.seal_type - unseal_keys = var.seal_type != "shamir" ? null : coalesce(var.shamir_unseal_keys, enos_vault_init.leader[0].unseal_keys_hex) - - transport = { - ssh = { - host = var.hosts[each.value].public_ip - } - } -} - -// Force unseal the cluster. This is used if the vault-cluster module is used -// to add additional nodes to a cluster via auto-pilot, or some other means. -// When that happens we'll want to set initialize_cluster to false and -// force_unseal to true. -resource "enos_vault_unseal" "maybe_force_unseal" { - depends_on = [ - module.start_vault.followers, - ] - for_each = { - for idx, host in var.hosts : idx => host - if var.force_unseal && !var.initialize_cluster - } - - bin_path = local.bin_path - vault_addr = module.start_vault.api_addr_localhost - seal_type = var.seal_type - unseal_keys = coalesce( - var.shamir_unseal_keys, - try(enos_vault_init.leader[0].unseal_keys_hex, null), - ) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -# Add the vault install location to the PATH and set up VAULT_ADDR and VAULT_TOKEN environement -# variables in the login shell so we don't have to do it if/when we login in to a cluster node. -resource "enos_remote_exec" "configure_login_shell_profile" { - depends_on = [ - enos_vault_init.leader, - enos_vault_unseal.leader, - ] - for_each = var.hosts - - environment = { - VAULT_ADDR = module.start_vault.api_addr_localhost - VAULT_TOKEN = var.root_token != null ? var.root_token : try(enos_vault_init.leader[0].root_token, "_") - VAULT_INSTALL_DIR = var.install_dir - } - - scripts = [abspath("${path.module}/scripts/set-up-login-shell-profile.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -# Add a motd to assist people that might be logging in. -resource "enos_file" "motd" { - depends_on = [ - enos_remote_exec.configure_login_shell_profile - ] - for_each = var.hosts - - destination = "/etc/motd" - content = <&2 - exit 1 -} - -[[ -z "$LOG_FILE_PATH" ]] && fail "LOG_FILE_PATH env variable has not been set" -[[ -z "$SERVICE_USER" ]] && fail "SERVICE_USER env variable has not been set" - -LOG_DIR=$(dirname "$LOG_FILE_PATH") - -function retry { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=10 - count=$((count + 1)) - - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - return "$exit" - fi - done - - return 0 -} - -retry 7 id -a "$SERVICE_USER" - -sudo mkdir -p "$LOG_DIR" -sudo chown -R "$SERVICE_USER":"$SERVICE_USER" "$LOG_DIR" diff --git a/enos/modules/vault_cluster/scripts/enable-audit-devices.sh b/enos/modules/vault_cluster/scripts/enable-audit-devices.sh deleted file mode 100644 index a93bd55..0000000 --- a/enos/modules/vault_cluster/scripts/enable-audit-devices.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -exo pipefail - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" -[[ -z "$LOG_FILE_PATH" ]] && fail "LOG_FILE_PATH env variable has not been set" -[[ -z "$SOCKET_PORT" ]] && fail "SOCKET_PORT env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_BIN_PATH" ]] && fail "VAULT_BIN_PATH env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -enable_file_audit_device() { - $VAULT_BIN_PATH audit enable file file_path="$LOG_FILE_PATH" -} - -enable_syslog_audit_device() { - $VAULT_BIN_PATH audit enable syslog tag="vault" facility="AUTH" -} - -enable_socket_audit_device() { - if [ "$IP_VERSION" = "4" ]; then - "$VAULT_BIN_PATH" audit enable socket address="127.0.0.1:$SOCKET_PORT" - else - "$VAULT_BIN_PATH" audit enable socket address="[::1]:$SOCKET_PORT" - fi -} - -main() { - if ! enable_file_audit_device; then - fail "Failed to enable vault file audit device" - fi - - if ! enable_syslog_audit_device; then - fail "Failed to enable vault syslog audit device" - fi - - if ! enable_socket_audit_device; then - local log - log=$(cat /tmp/vault-socket.log) - fail "Failed to enable vault socket audit device: listener log: $log" - fi - - return 0 -} - -main diff --git a/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh b/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh deleted file mode 100644 index f3a42d2..0000000 --- a/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -# Determine the profile file we should write to. We only want to affect login shells and bash will -# only read one of these in ordered of precendence. -determineProfileFile() { - if [ -f "$HOME/.bash_profile" ]; then - printf "%s/.bash_profile\n" "$HOME" - return 0 - fi - - if [ -f "$HOME/.bash_login" ]; then - printf "%s/.bash_login\n" "$HOME" - return 0 - fi - - printf "%s/.profile\n" "$HOME" -} - -appendVaultProfileInformation() { - tee -a "$1" <<< "export PATH=$PATH:$VAULT_INSTALL_DIR -export VAULT_ADDR=$VAULT_ADDR -export VAULT_TOKEN=$VAULT_TOKEN" -} - -main() { - local profile_file - if ! profile_file=$(determineProfileFile); then - fail "failed to determine login shell profile file location" - fi - - # If vault_cluster is used more than once, eg: autopilot or replication, this module can - # be called more than once. Short ciruit here if our profile is already set up. - if grep VAULT_ADDR < "$profile_file"; then - exit 0 - fi - - if ! appendVaultProfileInformation "$profile_file"; then - fail "failed to write vault configuration to login shell profile" - fi - - exit 0 -} - -main diff --git a/enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh b/enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh deleted file mode 100644 index 9c714a3..0000000 --- a/enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -exo pipefail - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" -[[ -z "$NETCAT_COMMAND" ]] && fail "NETCAT_COMMAND env variable has not been set" -[[ -z "$SOCKET_PORT" ]] && fail "SOCKET_PORT env variable has not been set" - -if [ "$IP_VERSION" = "4" ]; then - export SOCKET_ADDR="127.0.0.1" -else - export SOCKET_ADDR="::1" -fi - -socket_listener_procs() { - pgrep -x "${NETCAT_COMMAND}" -} - -kill_socket_listener() { - pkill "${NETCAT_COMMAND}" -} - -test_socket_listener() { - case $IP_VERSION in - 4) - "${NETCAT_COMMAND}" -zvw 2 "${SOCKET_ADDR}" "$SOCKET_PORT" < /dev/null - ;; - 6) - "${NETCAT_COMMAND}" -6 -zvw 2 "${SOCKET_ADDR}" "$SOCKET_PORT" < /dev/null - ;; - *) - fail "unknown IP_VERSION: $IP_VERSION" - ;; - esac -} - -start_socket_listener() { - if socket_listener_procs; then - test_socket_listener - return $? - fi - - # Run nc to listen on port 9090 for the socket auditor. We spawn nc - # with nohup to ensure that the listener doesn't expect a SIGHUP and - # thus block the SSH session from exiting or terminating on exit. - case $IP_VERSION in - 4) - nohup nc -kl "$SOCKET_PORT" >> /tmp/vault-socket.log 2>&1 < /dev/null & - ;; - 6) - nohup nc -6 -kl "$SOCKET_PORT" >> /tmp/vault-socket.log 2>&1 < /dev/null & - ;; - *) - fail "unknown IP_VERSION: $IP_VERSION" - ;; - esac -} - -read_log() { - local f - f=/tmp/vault-socket.log - [[ -f "$f" ]] && cat "$f" -} - -main() { - if socket_listener_procs; then - # Clean up old nc's that might not be working - kill_socket_listener - fi - - if ! start_socket_listener; then - fail "Failed to start audit socket listener: socket listener log: $(read_log)" - fi - - # wait for nc to listen - sleep 1 - - if ! test_socket_listener; then - fail "Error testing socket listener: socket listener log: $(read_log)" - fi - - return 0 -} - -main diff --git a/enos/modules/vault_cluster/variables.tf b/enos/modules/vault_cluster/variables.tf deleted file mode 100644 index 1e4de12..0000000 --- a/enos/modules/vault_cluster/variables.tf +++ /dev/null @@ -1,291 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "artifactory_release" { - type = object({ - username = string - token = string - url = string - sha256 = string - }) - description = "The Artifactory release information to install Vault artifacts from Artifactory" - default = null -} - -variable "backend_cluster_name" { - type = string - description = "The name of the backend cluster" - default = null -} - -variable "backend_cluster_tag_key" { - type = string - description = "The tag key for searching for backend nodes" - default = null -} - -variable "cluster_name" { - type = string - description = "The Vault cluster name" - default = null -} - -variable "cluster_port" { - type = number - description = "The cluster port for Vault to listen on" - default = 8201 -} - -variable "cluster_tag_key" { - type = string - description = "The Vault cluster tag key" - default = "retry_join" -} - -variable "config_dir" { - type = string - description = "The directory to use for Vault configuration" - default = "/etc/vault.d" -} - -variable "config_mode" { - description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." - default = "file" - - validation { - condition = contains(["env", "file"], var.config_mode) - error_message = "The config_mode must be either 'env' or 'file'. No other configuration modes are supported." - } -} - -variable "config_env_vars" { - description = "Optional Vault configuration environment variables to set starting Vault" - type = map(string) - default = null -} - -variable "consul_data_dir" { - type = string - description = "The directory where the consul will store data" - default = "/opt/consul/data" -} - -variable "consul_install_dir" { - type = string - description = "The directory where the consul binary will be installed" - default = "/opt/consul/bin" -} - -variable "consul_license" { - type = string - sensitive = true - description = "The consul enterprise license" - default = null -} - -variable "consul_log_file" { - type = string - description = "The file where the consul will write log output" - default = "/var/log/consul.log" -} - -variable "consul_log_level" { - type = string - description = "The consul service log level" - default = "info" - - validation { - condition = contains(["trace", "debug", "info", "warn", "error"], var.consul_log_level) - error_message = "The consul_log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." - } -} - -variable "consul_release" { - type = object({ - version = string - edition = string - }) - description = "Consul release version and edition to install from releases.hashicorp.com" - default = { - version = "1.15.1" - edition = "ce" - } -} - -variable "distro_version" { - type = string - description = "The Linux distro version" - default = null -} - -variable "enable_audit_devices" { - description = "If true every audit device will be enabled" - type = bool - default = true -} - -variable "external_storage_port" { - type = number - description = "The port to connect to when using external storage" - default = 8500 -} - -variable "force_unseal" { - type = bool - description = "Always unseal the Vault cluster even if we're not initializing it" - default = false -} - -variable "hosts" { - description = "The target machines host addresses to use for the Vault cluster" - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) -} - -variable "initialize_cluster" { - type = bool - description = "Initialize the Vault cluster" - default = true -} - -variable "install_dir" { - type = string - description = "The directory where the Vault binary will be installed" - default = "/opt/vault/bin" -} - -variable "ip_version" { - type = number - description = "The IP version to use for the Vault TCP listeners" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "license" { - type = string - sensitive = true - description = "The value of the Vault license" - default = null -} - -variable "listener_port" { - type = number - description = "The port for Vault to listen on" - default = 8200 -} - -variable "local_artifact_path" { - type = string - description = "The path to a locally built vault artifact to install. It can be a zip archive, RPM, or Debian package" - default = null -} - -variable "log_level" { - type = string - description = "The vault service log level" - default = "info" - - validation { - condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) - error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." - } -} - -variable "manage_service" { - type = bool - description = "Manage the Vault service users and systemd unit. Disable this to use configuration in RPM and Debian packages" - default = true -} - -variable "packages" { - type = list(string) - description = "A list of packages to install via the target host package manager" - default = [] -} - -variable "release" { - type = object({ - version = string - edition = string - }) - description = "Vault release version and edition to install from releases.hashicorp.com" - default = null -} - -variable "root_token" { - type = string - description = "The Vault root token that we can use to initialize and configure the cluster" - default = null -} - -variable "seal_ha_beta" { - description = "Enable using Seal HA on clusters that meet minimum version requirements and are enterprise editions" - default = true -} - -variable "seal_attributes" { - description = "The auto-unseal device attributes" - default = null -} - -variable "seal_attributes_secondary" { - description = "The secondary auto-unseal device attributes" - default = null -} - -variable "seal_type" { - type = string - description = "The primary seal device type" - default = "awskms" - - validation { - condition = contains(["awskms", "pkcs11", "shamir"], var.seal_type) - error_message = "The seal_type must be either 'awskms', 'pkcs11', or 'shamir'. No other seal types are supported." - } -} - -variable "seal_type_secondary" { - type = string - description = "A secondary HA seal device type. Only supported in Vault Enterprise >= 1.15" - default = "none" - - validation { - condition = contains(["awskms", "none", "pkcs11"], var.seal_type_secondary) - error_message = "The secondary_seal_type must be 'awskms', 'none', or 'pkcs11'. No other secondary seal types are supported." - } -} - -variable "shamir_unseal_keys" { - type = list(string) - description = "Shamir unseal keys. Often only used adding additional nodes to an already initialized cluster." - default = null -} - -variable "storage_backend" { - type = string - description = "The storage backend to use" - default = "raft" - - validation { - condition = contains(["raft", "consul"], var.storage_backend) - error_message = "The storage_backend must be either raft or consul. No other storage backends are supported." - } -} - -variable "storage_backend_addl_config" { - type = map(any) - description = "An optional set of key value pairs to inject into the storage block" - default = {} -} - -variable "storage_node_prefix" { - type = string - description = "A prefix to use for each node in the Vault storage configuration" - default = "node" -} diff --git a/enos/modules/vault_failover_demote_dr_primary/main.tf b/enos/modules/vault_failover_demote_dr_primary/main.tf deleted file mode 100644 index 8193370..0000000 --- a/enos/modules/vault_failover_demote_dr_primary/main.tf +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "primary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The primary cluster leader host" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -locals { -} - -resource "enos_remote_exec" "demote_dr_primary" { - environment = { - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - } - - inline = ["${var.vault_install_dir}/vault write -f sys/replication/dr/primary/demote"] - - - transport = { - ssh = { - host = var.primary_leader_host.public_ip - } - } -} diff --git a/enos/modules/vault_failover_promote_dr_secondary/main.tf b/enos/modules/vault_failover_promote_dr_secondary/main.tf deleted file mode 100644 index 8538253..0000000 --- a/enos/modules/vault_failover_promote_dr_secondary/main.tf +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "secondary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The secondary cluster leader host" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -variable "dr_operation_token" { - type = string - description = "The wrapping token created on primary cluster" -} - -locals { - dr_operation_token = var.dr_operation_token -} - -resource "enos_remote_exec" "promote_dr_secondary" { - environment = { - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - } - - inline = ["${var.vault_install_dir}/vault write -f sys/replication/dr/secondary/promote dr_operation_token=${local.dr_operation_token}"] - - - transport = { - ssh = { - host = var.secondary_leader_host.public_ip - } - } -} diff --git a/enos/modules/vault_failover_update_dr_primary/main.tf b/enos/modules/vault_failover_update_dr_primary/main.tf deleted file mode 100644 index cc159f2..0000000 --- a/enos/modules/vault_failover_update_dr_primary/main.tf +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "secondary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The secondary cluster leader host" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" - -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -variable "dr_operation_token" { - type = string - description = "The wrapping token created on primary cluster" -} - -variable "wrapping_token" { - type = string - description = "The wrapping token created on primary cluster" -} - -locals { - dr_operation_token = var.dr_operation_token - wrapping_token = var.wrapping_token -} - -resource "enos_remote_exec" "update_dr_primary" { - environment = { - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - } - - inline = ["${var.vault_install_dir}/vault write sys/replication/dr/secondary/update-primary dr_operation_token=${local.dr_operation_token} token=${local.wrapping_token}"] - - - transport = { - ssh = { - host = var.secondary_leader_host.public_ip - } - } -} diff --git a/enos/modules/vault_get_cluster_ips/main.tf b/enos/modules/vault_get_cluster_ips/main.tf deleted file mode 100644 index ef31018..0000000 --- a/enos/modules/vault_get_cluster_ips/main.tf +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -/* - -Given our expected hosts, determine which is currently the leader and verify that all expected -nodes are either the leader or a follower. - -*/ - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The Vault cluster hosts that are expected to be in the cluster" -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -locals { - follower_hosts_list = [ - for idx in range(length(var.hosts)) : var.hosts[idx] if var.ip_version == 6 ? - contains(tolist(local.follower_ipv6s), var.hosts[idx].ipv6) : - contains(tolist(local.follower_private_ips), var.hosts[idx].private_ip) - ] - follower_hosts = { - for idx in range(local.host_count - 1) : idx => try(local.follower_hosts_list[idx], null) - } - follower_ipv6s = jsondecode(enos_remote_exec.follower_ipv6s.stdout) - follower_private_ips = jsondecode(enos_remote_exec.follower_private_ipv4s.stdout) - follower_public_ips = [for host in local.follower_hosts : host.public_ip] - host_count = length(var.hosts) - ipv6s = [for k, v in values(tomap(var.hosts)) : tostring(v["ipv6"])] - leader_host_list = [ - for idx in range(length(var.hosts)) : var.hosts[idx] if var.ip_version == 6 ? - var.hosts[idx].ipv6 == local.leader_ipv6 : - var.hosts[idx].private_ip == local.leader_private_ip - ] - leader_host = try(local.leader_host_list[0], null) - leader_ipv6 = trimspace(enos_remote_exec.leader_ipv6.stdout) - leader_private_ip = trimspace(enos_remote_exec.leader_private_ipv4.stdout) - leader_public_ip = try(local.leader_host.public_ip, null) - private_ips = [for k, v in values(tomap(var.hosts)) : tostring(v["private_ip"])] -} - -resource "enos_remote_exec" "leader_private_ipv4" { - environment = { - IP_VERSION = var.ip_version - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_TOKEN = var.vault_root_token - } - - scripts = [abspath("${path.module}/scripts/get-leader-ipv4.sh")] - - transport = { - ssh = { - host = var.hosts[0].public_ip - } - } -} - -resource "enos_remote_exec" "leader_ipv6" { - environment = { - IP_VERSION = var.ip_version - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_TOKEN = var.vault_root_token - } - - scripts = [abspath("${path.module}/scripts/get-leader-ipv6.sh")] - - transport = { - ssh = { - host = var.hosts[0].public_ip - } - } -} - -resource "enos_remote_exec" "follower_private_ipv4s" { - environment = { - IP_VERSION = var.ip_version - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_LEADER_PRIVATE_IP = local.leader_private_ip - VAULT_PRIVATE_IPS = jsonencode(local.private_ips) - VAULT_TOKEN = var.vault_root_token - } - - scripts = [abspath("${path.module}/scripts/get-follower-ipv4s.sh")] - - transport = { - ssh = { - host = var.hosts[0].public_ip - } - } -} - -resource "enos_remote_exec" "follower_ipv6s" { - environment = { - IP_VERSION = var.ip_version - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_IPV6S = jsonencode(local.ipv6s) - VAULT_LEADER_IPV6 = local.leader_ipv6 - VAULT_TOKEN = var.vault_root_token - } - - scripts = [abspath("${path.module}/scripts/get-follower-ipv6s.sh")] - - transport = { - ssh = { - host = var.hosts[0].public_ip - } - } -} - -output "follower_hosts" { - value = local.follower_hosts -} - -output "follower_ipv6s" { - value = local.follower_ipv6s -} - -output "follower_private_ips" { - value = local.follower_private_ips -} - -output "follower_public_ips" { - value = local.follower_public_ips -} - -output "leader_host" { - value = local.leader_host -} - -output "leader_hosts" { - value = { 0 : local.leader_host } -} - -output "leader_ipv6" { - value = local.leader_ipv6 -} - -output "leader_private_ip" { - value = local.leader_private_ip -} - -output "leader_public_ip" { - value = local.leader_public_ip -} diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv4s.sh b/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv4s.sh deleted file mode 100644 index 51f3b76..0000000 --- a/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv4s.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "Unable to locate vault binary at $binpath" - -getFollowerPrivateIPsFromOperatorMembers() { - if members=$($binpath operator members -format json); then - if followers=$(echo "$members" | jq -e --argjson expected "$VAULT_PRIVATE_IPS" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")) as $followers | $expected - ($expected - $followers)'); then - # Make sure that we got all the followers - if jq -e --argjson expected "$VAULT_PRIVATE_IPS" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then - echo "$followers" - return 0 - fi - fi - fi - - return 1 -} - -removeIP() { - local needle - local haystack - needle=$1 - haystack=$2 - if remain=$(jq -e --arg ip "$needle" -c '. | map(select(.!=$ip))' <<< "$haystack"); then - if [[ -n "$remain" ]]; then - echo "$remain" - return 0 - fi - fi - - return 1 -} - -count=0 -retries=10 -while :; do - case $IP_VERSION in - 4) - [[ -z "$VAULT_PRIVATE_IPS" ]] && fail "VAULT_PRIVATE_IPS env variable has not been set" - [[ -z "$VAULT_LEADER_PRIVATE_IP" ]] && fail "VAULT_LEADER_PRIVATE_IP env variable has not been set" - - # Vault >= 1.10.x has the operator members. If we have that then we'll use it. - if $binpath operator -h 2>&1 | grep members &> /dev/null; then - if followers=$(getFollowerPrivateIPsFromOperatorMembers); then - echo "$followers" - exit 0 - fi - else - removeIP "$VAULT_LEADER_PRIVATE_IP" "$VAULT_PRIVATE_IPS" - - return $? - fi - ;; - 6) - echo '[]' - exit 0 - ;; - *) - fail "unknown IP_VERSION: $IP_VERSION" - ;; - esac - - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - fail "Timed out trying to obtain the cluster followers" - fi -done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv6s.sh b/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv6s.sh deleted file mode 100644 index f51247b..0000000 --- a/enos/modules/vault_get_cluster_ips/scripts/get-follower-ipv6s.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -echo "$VAULT_IPV6S" > /tmp/vaultipv6s - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "Unable to locate vault binary at $binpath" - -getFollowerIPV6sFromOperatorMembers() { - if members=$($binpath operator members -format json); then - if followers=$(echo "$members" | jq -e --argjson expected "$VAULT_IPV6S" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("\\[(.+)\\]") | .[0]) as $followers | $expected - ($expected - $followers)'); then - # Make sure that we got all the followers - if jq -e --argjson expected "$VAULT_IPV6S" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then - echo "$followers" - return 0 - fi - fi - fi - - return 1 -} - -removeIP() { - local needle - local haystack - needle=$1 - haystack=$2 - if remain=$(jq -e --arg ip "$needle" -c '. | map(select(.!=$ip))' <<< "$haystack"); then - if [[ -n "$remain" ]]; then - echo "$remain" - return 0 - fi - fi - - return 1 -} - -count=0 -retries=10 -while :; do - case $IP_VERSION in - 4) - echo "[]" - exit 0 - ;; - 6) - [[ -z "$VAULT_IPV6S" ]] && fail "VAULT_IPV6S env variable has not been set" - [[ -z "$VAULT_LEADER_IPV6" ]] && fail "VAULT_LEADER_IPV6 env variable has not been set" - - # Vault >= 1.10.x has the operator members. If we have that then we'll use it. - if $binpath operator -h 2>&1 | grep members &> /dev/null; then - if followers=$(getFollowerIPV6sFromOperatorMembers); then - echo "$followers" - exit 0 - fi - else - [[ -z "$VAULT_LEADER_IPV6" ]] && fail "VAULT_LEADER_IPV6 env variable has not been set" - removeIP "$VAULT_LEADER_IPV6" "$VAULT_IPV6S" - exit $? - fi - ;; - *) - fail "unknown IP_VERSION: $IP_VERSION" - ;; - esac - - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - fail "Timed out trying to obtain the cluster followers" - fi -done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv4.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv4.sh deleted file mode 100644 index f5697a9..0000000 --- a/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv4.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "Unable to locate vault binary at $binpath" - -findLeaderPrivateIP() { - # Find the leader private IP address - if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then - if [[ -n "$ip" ]]; then - echo "$ip" - return 0 - fi - fi - - # Some older versions of vault don't support reading sys/leader. Try falling back to the cli status. - if ip=$($binpath status -format json | jq -r '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then - if [[ -n "$ip" ]]; then - echo "$ip" - return 0 - fi - fi - - return 1 -} - -count=0 -retries=5 -while :; do - case $IP_VERSION in - 4) - # Find the leader private IP address - if ip=$(findLeaderPrivateIP); then - echo "$ip" - exit 0 - fi - ;; - 6) - exit 0 - ;; - *) - fail "unknown IP_VERSION: $IP_VERSION" - ;; - esac - - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - fail "Timed out trying to obtain the cluster leader" - fi -done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv6.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv6.sh deleted file mode 100644 index d5d5a45..0000000 --- a/enos/modules/vault_get_cluster_ips/scripts/get-leader-ipv6.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "Unable to locate vault binary at $binpath" - -findLeaderIPV6() { - # Find the leader private IP address - if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("\\[(.+)\\]") | .[0]'); then - if [[ -n "$ip" ]]; then - echo "$ip" - return 0 - fi - fi - - # Some older versions of vault don't support reading sys/leader. Try falling back to the cli status. - if ip=$($binpath status -format json | jq -r '.leader_address | scan("\\[(.+)\\]") | .[0]'); then - if [[ -n "$ip" ]]; then - echo "$ip" - return 0 - fi - fi - - return 1 -} - -count=0 -retries=5 -while :; do - # Find the leader private IP address - case $IP_VERSION in - 4) - exit 0 - ;; - 6) - if ip=$(findLeaderIPV6); then - echo "$ip" - exit 0 - fi - ;; - *) - fail "unknown IP_VERSION: $IP_VERSION" - ;; - esac - - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - fail "Timed out trying to obtain the cluster leader" - fi -done diff --git a/enos/modules/vault_proxy/main.tf b/enos/modules/vault_proxy/main.tf deleted file mode 100644 index b69b052..0000000 --- a/enos/modules/vault_proxy/main.tf +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The Vault cluster instances that were created" -} - -variable "ip_version" { - type = number - description = "The IP version to use for the Vault TCP listeners" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_proxy_pidfile" { - type = string - description = "The filepath where the Vault Proxy pid file is kept" - default = "/tmp/pidfile" -} - -variable "vault_proxy_port" { - type = number - description = "The Vault Proxy listener port" -} - -variable "vault_root_token" { - type = string - description = "The Vault root token" -} - -locals { - vault_proxy_address = "${var.ip_version == 4 ? "127.0.0.1" : "[::1]"}:${var.vault_proxy_port}" -} - -resource "enos_remote_exec" "set_up_approle_auth_and_proxy" { - environment = { - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_PROXY_ADDRESS = local.vault_proxy_address - VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile - VAULT_TOKEN = var.vault_root_token - } - - scripts = [abspath("${path.module}/scripts/set-up-approle-and-proxy.sh")] - - transport = { - ssh = { - host = var.hosts[0].public_ip - } - } -} - -resource "enos_remote_exec" "use_proxy" { - environment = { - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile - VAULT_PROXY_ADDRESS = local.vault_proxy_address - } - - scripts = [abspath("${path.module}/scripts/use-proxy.sh")] - - transport = { - ssh = { - host = var.hosts[0].public_ip - } - } - - depends_on = [ - enos_remote_exec.set_up_approle_auth_and_proxy - ] -} diff --git a/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh b/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh deleted file mode 100644 index a4be7e8..0000000 --- a/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -binpath=${VAULT_INSTALL_DIR}/vault - -fail() { - echo "$1" 1>&2 - return 1 -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) -$binpath auth disable approle || true - -$binpath auth enable approle - -$binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000 - -ROLEID=$($binpath read --format=json auth/approle/role/proxy-role/role-id | jq -r '.data.role_id') - -if [[ "$ROLEID" == '' ]]; then - fail "expected ROLEID to be nonempty, but it is empty" -fi - -SECRETID=$($binpath write -f --format=json auth/approle/role/proxy-role/secret-id | jq -r '.data.secret_id') - -if [[ "$SECRETID" == '' ]]; then - fail "vault write -f --format=json auth/approle/role/proxy-role/secret-id did not return a .data.secret_id" -fi - -echo "$ROLEID" > /tmp/role-id -echo "$SECRETID" > /tmp/secret-id - -# Write the Vault Proxy's configuration to /tmp/vault-proxy.hcl -# The Proxy references the Vault server address passed in as $VAULT_ADDR -# The Proxy itself listens at the address passed in as $VAULT_PROXY_ADDRESS -cat > /tmp/vault-proxy.hcl <<- EOM -pid_file = "${VAULT_PROXY_PIDFILE}" - -vault { - address = "${VAULT_ADDR}" - tls_skip_verify = true - retry { - num_retries = 10 - } -} - -api_proxy { - enforce_consistency = "always" - use_auto_auth_token = true -} - -listener "tcp" { - address = "${VAULT_PROXY_ADDRESS}" - tls_disable = true -} - -auto_auth { - method { - type = "approle" - config = { - role_id_file_path = "/tmp/role-id" - secret_id_file_path = "/tmp/secret-id" - } - } - sink { - type = "file" - config = { - path = "/tmp/token" - } - } -} -EOM - -# If Proxy is still running from a previous run, kill it -pkill -F "${VAULT_PROXY_PIDFILE}" || true - -# Run proxy in the background -$binpath proxy -config=/tmp/vault-proxy.hcl > /tmp/proxy-logs.txt 2>&1 & diff --git a/enos/modules/vault_proxy/scripts/use-proxy.sh b/enos/modules/vault_proxy/scripts/use-proxy.sh deleted file mode 100644 index 23a62e0..0000000 --- a/enos/modules/vault_proxy/scripts/use-proxy.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - return 1 -} - -[[ -z "$VAULT_PROXY_ADDRESS" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_PROXY_PIDFILE" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -# Will cause the Vault CLI to communicate with the Vault Proxy, since it -# is listening at port 8100. -export VAULT_ADDR="http://${VAULT_PROXY_ADDRESS}" - -# Explicitly unsetting VAULT_TOKEN to make sure that the Vault Proxy's token -# is used. -unset VAULT_TOKEN - -# Use the Vault CLI to communicate with the Vault Proxy (via the VAULT_ADDR env -# var) to lookup the details of the Proxy's token and make sure that the -# .data.path field contains 'auth/approle/login', thus confirming that the Proxy -# automatically authenticated itself. -if ! $binpath token lookup -format=json | jq -Mer --arg expected "auth/approle/login" '.data.path == $expected'; then - fail "expected proxy to automatically authenticate using 'auth/approle/login', got: '$($binpath token lookup -format=json | jq -r '.data.path')'" -fi - -# Now that we're done, kill the proxy -pkill -F "${VAULT_PROXY_PIDFILE}" || true diff --git a/enos/modules/vault_raft_remove_node_and_verify/main.tf b/enos/modules/vault_raft_remove_node_and_verify/main.tf deleted file mode 100644 index ed6842c..0000000 --- a/enos/modules/vault_raft_remove_node_and_verify/main.tf +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster followers" -} - - -variable "retry_interval" { - type = number - description = "How many seconds to wait between each retry" - default = 2 -} - -variable "timeout" { - type = number - description = "The max number of seconds to wait before timing out" - default = 60 -} - -variable "listener_port" { - type = number - description = "The listener port for vault" -} -variable "vault_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The leader's host information" -} -variable "vault_addr" { - type = string - description = "The local address to use to query vault" -} -variable "cluster_port" { - type = number - description = "The cluster port for vault" -} -variable "ip_version" { - type = number - description = "The IP version to use for the Vault TCP listeners" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} -variable "vault_root_token" { - type = string - description = "The vault root token" -} -variable "vault_seal_type" { - type = string - description = "The Vault seal type" -} - -variable "add_back_nodes" { - type = bool - description = "whether to add the nodes back" -} - -variable "vault_unseal_keys" {} - -variable "vault_install_dir" { - type = string - description = "The directory where the vault binary is installed" -} - - -module "choose_follower_to_remove" { - source = "../choose_follower_host" - followers = var.hosts -} - -module "remove_raft_node" { - source = "../vault_raft_remove_peer" - depends_on = [module.choose_follower_to_remove] - - - hosts = module.choose_follower_to_remove.chosen_follower - ip_version = var.ip_version - is_voter = true - operator_instance = var.vault_leader_host.public_ip - vault_addr = var.vault_addr - vault_cluster_addr_port = var.cluster_port - vault_install_dir = var.vault_install_dir - vault_root_token = var.vault_root_token -} - -module "verify_removed" { - source = "../vault_verify_removed_node" - depends_on = [ - module.remove_raft_node - ] - - add_back_nodes = true - cluster_port = var.cluster_port - hosts = module.choose_follower_to_remove.chosen_follower - ip_version = var.ip_version - listener_port = var.listener_port - vault_addr = var.vault_addr - vault_install_dir = var.vault_install_dir - vault_leader_host = var.vault_leader_host - vault_root_token = var.vault_root_token - vault_seal_type = var.vault_seal_type - vault_unseal_keys = var.vault_seal_type == "shamir" ? var.vault_unseal_keys : null -} diff --git a/enos/modules/vault_raft_remove_peer/main.tf b/enos/modules/vault_raft_remove_peer/main.tf deleted file mode 100644 index cbadd3f..0000000 --- a/enos/modules/vault_raft_remove_peer/main.tf +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The old vault nodes to be removed" -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "operator_instance" { - type = string - description = "The ip address of the operator (Voter) node" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_cluster_addr_port" { - description = "The Raft cluster address port" - type = string -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -variable "is_voter" { - type = bool - default = false - description = "Whether the nodes that are going to be removed are voters" -} - -resource "enos_remote_exec" "vault_raft_remove_peer" { - for_each = var.hosts - - environment = { - REMOVE_VAULT_CLUSTER_ADDR = "${var.ip_version == 4 ? "${each.value.private_ip}" : "[${each.value.ipv6}]"}:${var.vault_cluster_addr_port}" - VAULT_TOKEN = var.vault_root_token - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - REMOVE_NODE_IS_VOTER = var.is_voter - } - - scripts = [abspath("${path.module}/scripts/raft-remove-peer.sh")] - - transport = { - ssh = { - host = var.operator_instance - } - } -} diff --git a/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh b/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh deleted file mode 100644 index 9fdd40b..0000000 --- a/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -binpath=${VAULT_INSTALL_DIR}/vault -node_addr=${REMOVE_VAULT_CLUSTER_ADDR} - -fail() { - echo "$1" 2>&1 - return 1 -} - -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -retry() { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - echo "retry $count" - else - return "$exit" - fi - done - - return 0 -} - -remove_peer() { - if ! node_id=$("$binpath" operator raft list-peers -format json | jq -Mr --argjson expected "${REMOVE_NODE_IS_VOTER}" '.data.config.servers[] | select(.address=='\""$node_addr"\"') | select(.voter==$expected) | .node_id'); then - fail "failed to get node id of a node with voter status ${REMOVE_NODE_IS_VOTER}" - fi - - $binpath operator raft remove-peer "$node_id" -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -# Retry a few times because it can take some time for things to settle after autopilot upgrade -retry 5 remove_peer diff --git a/enos/modules/vault_setup_dr_primary/main.tf b/enos/modules/vault_setup_dr_primary/main.tf deleted file mode 100644 index 69e29e6..0000000 --- a/enos/modules/vault_setup_dr_primary/main.tf +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "primary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The primary cluster leader host" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -// Enable DR replication on the primary. This will immediately clear all data in the secondary. -resource "enos_remote_exec" "enable_dr_replication" { - environment = { - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/enable.sh")] - - transport = { - ssh = { - host = var.primary_leader_host.public_ip - } - } -} diff --git a/enos/modules/vault_setup_dr_primary/scripts/enable.sh b/enos/modules/vault_setup_dr_primary/scripts/enable.sh deleted file mode 100644 index b8c987b..0000000 --- a/enos/modules/vault_setup_dr_primary/scripts/enable.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -binpath=${VAULT_INSTALL_DIR}/vault - -fail() { - echo "$1" 1>&2 - return 1 -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -# Activate the primary -$binpath write -f sys/replication/dr/primary/enable diff --git a/enos/modules/vault_setup_perf_primary/main.tf b/enos/modules/vault_setup_perf_primary/main.tf deleted file mode 100644 index 155ab20..0000000 --- a/enos/modules/vault_setup_perf_primary/main.tf +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "primary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The primary cluster leader host" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -resource "enos_remote_exec" "configure_pr_primary" { - environment = { - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/configure-vault-pr-primary.sh")] - - transport = { - ssh = { - host = var.primary_leader_host.public_ip - } - } -} diff --git a/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh deleted file mode 100644 index 10398b8..0000000 --- a/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -binpath=${VAULT_INSTALL_DIR}/vault - -fail() { - echo "$1" 1>&2 - return 1 -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -# Activate the primary -$binpath write -f sys/replication/performance/primary/enable diff --git a/enos/modules/vault_setup_replication_secondary/main.tf b/enos/modules/vault_setup_replication_secondary/main.tf deleted file mode 100644 index ec1ae64..0000000 --- a/enos/modules/vault_setup_replication_secondary/main.tf +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "secondary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The secondary cluster leader host" -} - -variable "replication_type" { - type = string - description = "The type of replication to perform" - - validation { - condition = contains(["dr", "performance"], var.replication_type) - error_message = "The replication_type must be either dr or performance" - } -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -variable "wrapping_token" { - type = string - description = "The wrapping token created on primary cluster" -} - -resource "enos_remote_exec" "enable_replication" { - environment = { - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - } - - inline = ["${var.vault_install_dir}/vault write sys/replication/${var.replication_type}/secondary/enable token=${var.wrapping_token}"] - - transport = { - ssh = { - host = var.secondary_leader_host.public_ip - } - } -} - -// Wait for our primary host to be the "leader", which means it's running and all "setup" tasks -// have been completed. We'll have to unseal our follower nodes after this has occurred. -module "wait_for_leader" { - source = "../vault_wait_for_leader" - - depends_on = [ - enos_remote_exec.enable_replication - ] - - hosts = { "0" : var.secondary_leader_host } - ip_version = var.ip_version - vault_addr = var.vault_addr - vault_install_dir = var.vault_install_dir - vault_root_token = var.vault_root_token -} - -// Ensure that our leader is ready to for us to unseal follower nodes. -resource "enos_remote_exec" "wait_for_leader_ready" { - depends_on = [ - module.wait_for_leader, - ] - - environment = { - REPLICATION_TYPE = var.replication_type - RETRY_INTERVAL = 3 // seconds - TIMEOUT_SECONDS = 60 // seconds - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/wait-for-leader-ready.sh")] - - transport = { - ssh = { - host = var.secondary_leader_host.public_ip - } - } -} diff --git a/enos/modules/vault_setup_replication_secondary/scripts/wait-for-leader-ready.sh b/enos/modules/vault_setup_replication_secondary/scripts/wait-for-leader-ready.sh deleted file mode 100644 index 09837c6..0000000 --- a/enos/modules/vault_setup_replication_secondary/scripts/wait-for-leader-ready.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - return 1 -} - -[[ -z "$REPLICATION_TYPE" ]] && fail "REPLICATION_TYPE env variable has not been set" -[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" -[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json - -replicationStatus() { - $binpath read "sys/replication/${REPLICATION_TYPE}/status" | jq .data -} - -isReady() { - # Find the leader private IP address - local status - if ! status=$(replicationStatus); then - return 1 - fi - - if ! jq -eMc '.state == "stream-wals"' &> /dev/null <<< "$status"; then - echo "DR replication state is not yet running" 1>&2 - echo "DR replication is not yet running, got: $(jq '.state' <<< "$status")" 1>&2 - return 1 - fi - - if ! jq -eMc '.mode == "secondary"' &> /dev/null <<< "$status"; then - echo "DR replication mode is not yet primary, got: $(jq '.mode' <<< "$status")" 1>&2 - return 1 - fi - - if ! jq -eMc '.corrupted_merkle_tree == false' &> /dev/null <<< "$status"; then - echo "DR replication merkle is corrupted" 1>&2 - return 1 - fi - - echo "${REPLICATION_TYPE} primary is ready for followers to be unsealed!" 1>&2 - return 0 -} - -begin_time=$(date +%s) -end_time=$((begin_time + TIMEOUT_SECONDS)) -while [ "$(date +%s)" -lt "$end_time" ]; do - if isReady; then - exit 0 - fi - - sleep "$RETRY_INTERVAL" -done - -fail "Timed out waiting for ${REPLICATION_TYPE} primary to ready: $(replicationStatus)" diff --git a/enos/modules/vault_step_down/main.tf b/enos/modules/vault_step_down/main.tf deleted file mode 100644 index 4074969..0000000 --- a/enos/modules/vault_step_down/main.tf +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "leader_host" { - type = object({ - private_ip = string - public_ip = string - }) - - description = "The vault cluster host that can be expected as a leader" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -resource "enos_remote_exec" "vault_operator_step_down" { - environment = { - VAULT_TOKEN = var.vault_root_token - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/operator-step-down.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} diff --git a/enos/modules/vault_step_down/scripts/operator-step-down.sh b/enos/modules/vault_step_down/scripts/operator-step-down.sh deleted file mode 100644 index 07f2c38..0000000 --- a/enos/modules/vault_step_down/scripts/operator-step-down.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -eou pipefail - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -eval "$binpath" operator step-down diff --git a/enos/modules/vault_test_ui/main.tf b/enos/modules/vault_test_ui/main.tf deleted file mode 100644 index 9fc16a7..0000000 --- a/enos/modules/vault_test_ui/main.tf +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -locals { - # base test environment excludes the filter argument - ui_test_environment_base = { - VAULT_ADDR = "http://${var.vault_addr}:8200" - VAULT_TOKEN = var.vault_root_token - VAULT_UNSEAL_KEYS = jsonencode(slice(var.vault_unseal_keys, 0, var.vault_recovery_threshold)) - } - ui_test_environment = var.ui_test_filter == null || try(length(trimspace(var.ui_test_filter)) == 0, true) ? local.ui_test_environment_base : merge(local.ui_test_environment_base, { - TEST_FILTER = var.ui_test_filter - }) - # The environment variables need to be double escaped since the process of rendering them to the - # outputs eats the escaping. Therefore double escaping ensures that the values are rendered as - # properly escaped json, i.e. "[\"value\"]" suitable to be parsed as json. - escaped_ui_test_environment = [ - for key, value in local.ui_test_environment : "export ${key}='${value}'" - ] -} - -resource "enos_local_exec" "test_ui" { - count = var.ui_run_tests ? 1 : 0 - environment = local.ui_test_environment - scripts = ["${path.module}/scripts/test_ui.sh"] -} diff --git a/enos/modules/vault_test_ui/outputs.tf b/enos/modules/vault_test_ui/outputs.tf deleted file mode 100644 index ae4f926..0000000 --- a/enos/modules/vault_test_ui/outputs.tf +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -output "ui_test_stderr" { - value = var.ui_run_tests ? enos_local_exec.test_ui[0].stderr : "No std out tests where not run" -} - -output "ui_test_stdout" { - value = var.ui_run_tests ? enos_local_exec.test_ui[0].stdout : "No std out tests where not run" -} - -output "ui_test_environment" { - value = join(" \\ \n", local.escaped_ui_test_environment) - description = "The environment variables that are required in order to run the test:enos yarn target" -} diff --git a/enos/modules/vault_test_ui/scripts/test_ui.sh b/enos/modules/vault_test_ui/scripts/test_ui.sh deleted file mode 100755 index 9a98243..0000000 --- a/enos/modules/vault_test_ui/scripts/test_ui.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -eux -o pipefail - -project_root=$(git rev-parse --show-toplevel) -pushd "$project_root" > /dev/null - -echo "running test-ember-enos" -make test-ember-enos -popd > /dev/null diff --git a/enos/modules/vault_test_ui/variables.tf b/enos/modules/vault_test_ui/variables.tf deleted file mode 100644 index 99625b2..0000000 --- a/enos/modules/vault_test_ui/variables.tf +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "vault_addr" { - description = "The local vault API listen address" - type = string -} - -variable "vault_root_token" { - description = "The vault root token" - type = string -} - -variable "ui_test_filter" { - type = string - description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f='" - default = null -} - -variable "vault_unseal_keys" { - description = "Base64 encoded recovery keys to use for the seal/unseal test" - type = list(string) -} - -variable "vault_recovery_threshold" { - description = "The number of recovery keys to require when unsealing Vault" - type = string -} - -variable "ui_run_tests" { - type = bool - description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run" - default = true -} diff --git a/enos/modules/vault_unseal_replication_followers/main.tf b/enos/modules/vault_unseal_replication_followers/main.tf deleted file mode 100644 index 59d34a7..0000000 --- a/enos/modules/vault_unseal_replication_followers/main.tf +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# This module unseals the replication secondary follower nodes -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The Vault cluster hosts to unseal" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_seal_type" { - type = string - description = "The Vault seal type" -} - -variable "vault_unseal_keys" {} - -locals { - vault_bin_path = "${var.vault_install_dir}/vault" -} - -# After replication is enabled the secondary follower nodes are expected to be sealed, -# so we wait for the secondary follower nodes to update the seal status -resource "enos_remote_exec" "wait_until_sealed" { - for_each = var.hosts - environment = { - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/wait-until-sealed.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -# The follower nodes on secondary replication cluster incorrectly report -# unseal progress 2/3 (Issue: https://hashicorp.atlassian.net/browse/VAULT-12309), -# so we restart the followers to allow them to auto-unseal -resource "enos_remote_exec" "restart_followers" { - depends_on = [enos_remote_exec.wait_until_sealed] - for_each = { - for idx, host in var.hosts : idx => host - if var.vault_seal_type != "shamir" - } - - inline = ["sudo systemctl restart vault"] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -# We cannot use the vault_unseal resouce due to the known issue -# (https://hashicorp.atlassian.net/browse/VAULT-12311). We use a custom -# script to allow retry for unsealing the secondary followers -resource "enos_remote_exec" "unseal_followers" { - depends_on = [enos_remote_exec.restart_followers] - # The unseal keys are required only for seal_type shamir - for_each = { - for idx, host in var.hosts : idx => host - if var.vault_seal_type == "shamir" - } - - environment = { - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - UNSEAL_KEYS = join(",", var.vault_unseal_keys) - } - - scripts = [abspath("${path.module}/scripts/unseal-node.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -# This is a second attempt needed to unseal the secondary followers -# using a custom script due to get past the known issue -# (https://hashicorp.atlassian.net/browse/VAULT-12311) -resource "enos_remote_exec" "unseal_followers_again" { - depends_on = [enos_remote_exec.unseal_followers] - for_each = { - for idx, host in var.hosts : idx => host - if var.vault_seal_type == "shamir" - } - - environment = { - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - UNSEAL_KEYS = join(",", var.vault_unseal_keys) - } - - scripts = [abspath("${path.module}/scripts/unseal-node.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_unseal_replication_followers/scripts/unseal-node.sh b/enos/modules/vault_unseal_replication_followers/scripts/unseal-node.sh deleted file mode 100755 index c6dafb0..0000000 --- a/enos/modules/vault_unseal_replication_followers/scripts/unseal-node.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -binpath=${VAULT_INSTALL_DIR}/vault - -IFS="," read -r -a keys <<< "${UNSEAL_KEYS}" - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -count=0 -retries=5 -while :; do - for key in "${keys[@]}"; do - - # Check the Vault seal status - seal_status=$($binpath status -format json | jq '.sealed') - - if [[ "$seal_status" == "true" ]]; then - echo "running unseal with $key count $count with retry $retries" >> /tmp/unseal_script.out - "$binpath" operator unseal "$key" > /dev/null 2>&1 - else - exit 0 - fi - done - - wait=$((1 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - fail "failed to unseal node" - fi -done diff --git a/enos/modules/vault_unseal_replication_followers/scripts/wait-until-sealed.sh b/enos/modules/vault_unseal_replication_followers/scripts/wait-until-sealed.sh deleted file mode 100644 index a507228..0000000 --- a/enos/modules/vault_unseal_replication_followers/scripts/wait-until-sealed.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -binpath=${VAULT_INSTALL_DIR}/vault - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -count=0 -retries=5 -while :; do - # Check the Vault seal status - seal_status=$($binpath status -format json | jq '.sealed') - - if [[ "$seal_status" == "true" ]]; then - exit 0 - fi - - wait=$((3 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - fail "Expected node to be sealed" - fi -done diff --git a/enos/modules/vault_upgrade/main.tf b/enos/modules/vault_upgrade/main.tf deleted file mode 100644 index 7f9dec0..0000000 --- a/enos/modules/vault_upgrade/main.tf +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.5.4" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_artifactory_release" { - type = object({ - username = string - token = string - url = string - sha256 = string - }) - description = "Vault release version and edition to install from artifactory.hashicorp.engineering" - default = null -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_local_artifact_path" { - type = string - description = "The path to a locally built vault artifact to install" - default = null -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -variable "vault_seal_type" { - type = string - description = "The Vault seal type" -} - -variable "vault_unseal_keys" { - type = list(string) - description = "The keys to use to unseal Vault when not using auto-unseal" - default = null -} - -locals { - vault_bin_path = "${var.vault_install_dir}/vault" -} - -// Upgrade the Vault artifact in-place. With zip bundles we must use the same path of the original -// installation so that we can re-use the systemd unit that enos_vault_start created at -// /etc/systemd/system/vault.service. The path does not matter for package types as the systemd -// unit for the bianry is included and will be installed. -resource "enos_bundle_install" "upgrade_vault_binary" { - for_each = var.hosts - - destination = var.vault_install_dir - artifactory = var.vault_artifactory_release - path = var.vault_local_artifact_path - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -// We assume that our original Vault cluster used a zip bundle from releases.hashicorp.com and as -// such enos_vault_start will have created a systemd unit for it at /etc/systemd/systemd/vault.service. -// If we're upgrading to a package that contains its own systemd unit we'll need to remove the -// old unit file so that when we restart vault we pick up the new unit that points to the updated -// binary. -resource "enos_remote_exec" "maybe_remove_old_unit_file" { - for_each = var.hosts - depends_on = [enos_bundle_install.upgrade_vault_binary] - - environment = { - ARTIFACT_NAME = enos_bundle_install.upgrade_vault_binary[each.key].name - } - - scripts = [abspath("${path.module}/scripts/maybe-remove-old-unit-file.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -module "get_ip_addresses" { - source = "../vault_get_cluster_ips" - - depends_on = [enos_remote_exec.maybe_remove_old_unit_file] - - hosts = var.hosts - ip_version = var.ip_version - vault_addr = var.vault_addr - vault_install_dir = var.vault_install_dir - vault_root_token = var.vault_root_token -} - -module "restart_followers" { - source = "../restart_vault" - hosts = module.get_ip_addresses.follower_hosts - vault_addr = var.vault_addr - vault_install_dir = var.vault_install_dir -} - -resource "enos_vault_unseal" "followers" { - for_each = { - for idx, host in module.get_ip_addresses.follower_hosts : idx => host - if var.vault_seal_type == "shamir" - } - depends_on = [module.restart_followers] - - bin_path = local.vault_bin_path - vault_addr = var.vault_addr - seal_type = var.vault_seal_type - unseal_keys = var.vault_unseal_keys - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -module "wait_for_followers_unsealed" { - source = "../vault_wait_for_cluster_unsealed" - depends_on = [ - module.restart_followers, - enos_vault_unseal.followers, - ] - - hosts = module.get_ip_addresses.follower_hosts - vault_addr = var.vault_addr - vault_install_dir = var.vault_install_dir -} - -module "restart_leader" { - depends_on = [module.wait_for_followers_unsealed] - source = "../restart_vault" - hosts = module.get_ip_addresses.leader_hosts - vault_addr = var.vault_addr - vault_install_dir = var.vault_install_dir -} - -resource "enos_vault_unseal" "leader" { - count = var.vault_seal_type == "shamir" ? 1 : 0 - depends_on = [module.restart_leader] - - bin_path = local.vault_bin_path - vault_addr = var.vault_addr - seal_type = var.vault_seal_type - unseal_keys = var.vault_unseal_keys - - transport = { - ssh = { - host = module.get_ip_addresses.leader_public_ip - } - } -} diff --git a/enos/modules/vault_upgrade/scripts/maybe-remove-old-unit-file.sh b/enos/modules/vault_upgrade/scripts/maybe-remove-old-unit-file.sh deleted file mode 100644 index e5c673a..0000000 --- a/enos/modules/vault_upgrade/scripts/maybe-remove-old-unit-file.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$ARTIFACT_NAME" ]] && fail "ARTIFACT_NAME env variable has not been set" - -if [ "${ARTIFACT_NAME##*.}" == "zip" ]; then - echo "Skipped removing unit file because new artifact is a zip bundle" - exit 0 -fi - -# Get the unit file for the vault.service that is running. If it's not in /etc/systemd then it -# should be a package provided unit file so we don't need to delete anything. -# -# Note that we use -p instead of -P so that we support ancient amzn 2 systemctl. -if ! unit_path=$(systemctl show -p FragmentPath vault | cut -d = -f2 2>&1); then - echo "Skipped removing unit file because and existing path could not be found: $unit_path" - exit 0 -fi - -if [[ "$unit_path" == *"/etc/systemd"* ]]; then - if [ -f "$unit_path" ]; then - echo "Removing old systemd unit file: $unit_path" - if ! out=$(sudo rm "$unit_path" 2>&1); then - fail "Failed to remove old unit file: $unit_path: $out" - fi - else - echo "Skipped removing old systemd unit file because it no longer exists: $unit_path" - fi -else - echo "Skipped removing old systemd unit file because it was not created in /etc/systemd/: $unit_path" -fi diff --git a/enos/modules/vault_verify_agent_output/main.tf b/enos/modules/vault_verify_agent_output/main.tf deleted file mode 100644 index 68e0484..0000000 --- a/enos/modules/vault_verify_agent_output/main.tf +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "vault_agent_expected_output" { - type = string - description = "The output that's expected in the rendered template at vault_agent_template_destination" -} - -variable "vault_agent_template_destination" { - type = string - description = "The destination of the template rendered by Agent" -} - -resource "enos_remote_exec" "verify_vault_agent_output" { - environment = { - VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination - VAULT_AGENT_EXPECTED_OUTPUT = var.vault_agent_expected_output - } - - scripts = [abspath("${path.module}/scripts/verify-vault-agent-output.sh")] - - transport = { - ssh = { - host = var.hosts[0].public_ip - } - } -} diff --git a/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh b/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh deleted file mode 100644 index 7924e17..0000000 --- a/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - return 1 -} - -actual_output=$(cat "${VAULT_AGENT_TEMPLATE_DESTINATION}") -if [[ "$actual_output" != "${VAULT_AGENT_EXPECTED_OUTPUT}" ]]; then - fail "expected '${VAULT_AGENT_EXPECTED_OUTPUT}' to be the Agent output, but got: '$actual_output'" -fi diff --git a/enos/modules/vault_verify_autopilot/main.tf b/enos/modules/vault_verify_autopilot/main.tf deleted file mode 100644 index 236acf7..0000000 --- a/enos/modules/vault_verify_autopilot/main.tf +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_autopilot_upgrade_status" { - type = string - description = "The autopilot upgrade expected status" -} - -variable "vault_autopilot_upgrade_version" { - type = string - description = "The Vault upgraded version" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -resource "enos_remote_exec" "smoke-verify-autopilot" { - for_each = var.hosts - - environment = { - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir, - VAULT_TOKEN = var.vault_root_token, - VAULT_AUTOPILOT_UPGRADE_STATUS = var.vault_autopilot_upgrade_status, - VAULT_AUTOPILOT_UPGRADE_VERSION = var.vault_autopilot_upgrade_version, - } - - scripts = [abspath("${path.module}/scripts/smoke-verify-autopilot.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh b/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh deleted file mode 100755 index 6408c76..0000000 --- a/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_AUTOPILOT_UPGRADE_STATUS" ]] && fail "VAULT_AUTOPILOT_UPGRADE_STATUS env variable has not been set" -[[ -z "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]] && fail "VAULT_AUTOPILOT_UPGRADE_VERSION env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -count=0 -retries=8 -while :; do - state=$($binpath read -format=json sys/storage/raft/autopilot/state) - status="$(jq -r '.data.upgrade_info.status' <<< "$state")" - target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")" - - if [ "$status" = "$VAULT_AUTOPILOT_UPGRADE_STATUS" ] && [ "$target_version" = "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]; then - exit 0 - fi - - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status" - echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version" - sleep "$wait" - else - echo "$state" - echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status" - echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version" - fail "Autopilot did not get into the correct status" - fi -done diff --git a/enos/modules/vault_verify_billing_start_date/main.tf b/enos/modules/vault_verify_billing_start_date/main.tf deleted file mode 100644 index 0d72fa7..0000000 --- a/enos/modules/vault_verify_billing_start_date/main.tf +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_cluster_addr_port" { - description = "The Raft cluster address port" - type = string - default = "8201" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "hosts" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -resource "enos_remote_exec" "vault_verify_billing_start_date" { - for_each = var.hosts - - environment = { - VAULT_ADDR = var.vault_addr - VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}" - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_LOCAL_BINARY_PATH = "${var.vault_install_dir}/vault" - VAULT_TOKEN = var.vault_root_token - } - - scripts = [abspath("${path.module}/scripts/verify-billing-start.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_billing_start_date/scripts/verify-billing-start.sh b/enos/modules/vault_verify_billing_start_date/scripts/verify-billing-start.sh deleted file mode 100644 index c4334cc..0000000 --- a/enos/modules/vault_verify_billing_start_date/scripts/verify-billing-start.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -retry() { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep 30 - else - return "$exit" - fi - done - - return 0 -} - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -enable_debugging() { - echo "Turning debugging on.." - export PS4='+(${BASH_SOURCE}:${LINENO})> ${FUNCNAME[0]:+${FUNCNAME[0]}(): }' - set -x -} - -get_billing_start_date() { - "$binpath" read -format=json sys/internal/counters/config | jq -r ".data.billing_start_timestamp" -} - -get_target_platform() { - uname -s -} - -# Given the date as ARGV 1, return 1 year as a unix date -verify_date_is_in_current_year() { - local billing_start_unix - local one_year_ago_unix - - # Verify if the billing start date is in the latest billing year - case $(get_target_platform) in - Linux) - billing_start_unix=$(TZ=UTC date -d "$1" +'%s') # For "now", use $(date +'%s') - one_year_ago_unix=$(TZ=UTC date -d "1 year ago" +'%s') - ;; - Darwin) - one_year_ago_unix=$(TZ=UTC date -v -1y +'%s') - billing_start_unix=$(TZ=UTC date -j -f "%Y-%m-%dT%H:%M:%SZ" "${1}" +'%s') - ;; - *) - fail "Unsupported target host operating system: $(get_target_platform)" 1>&2 - ;; - esac - - if [ "$billing_start_unix" -gt "$one_year_ago_unix" ]; then - echo "Billing start date $1 has successfully rolled over to current year." - exit 0 - else - local vault_ps - vault_ps=$(pgrep vault | xargs) - echo "On version $version, pid $vault_ps, addr $VAULT_ADDR, Billing start date $1 did not roll over to current year" 1>&2 - fi -} - -verify_billing_start_date() { - local billing_start - billing_start=$(get_billing_start_date) - - if verify_date_is_in_current_year "$billing_start"; then - return 0 - fi - - local version - local vault_ps - version=$("$binpath" status -format=json | jq .version) - vault_ps=$(pgrep vault | xargs) - echo "On version $version, pid $vault_ps, addr $VAULT_ADDR, Billing start date $billing_start did not roll over to current year" 1>&2 - return 1 -} - -enable_debugging - -retry 10 verify_billing_start_date diff --git a/enos/modules/vault_verify_default_lcq/main.tf b/enos/modules/vault_verify_default_lcq/main.tf deleted file mode 100644 index bb05726..0000000 --- a/enos/modules/vault_verify_default_lcq/main.tf +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "retry_interval" { - type = number - description = "How many seconds to wait between each retry" - default = 2 -} - -variable "timeout" { - type = number - description = "The max number of seconds to wait before timing out" - default = 60 -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_autopilot_default_max_leases" { - type = string - description = "The autopilot upgrade expected max_leases" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -resource "enos_remote_exec" "smoke_verify_default_lcq" { - for_each = var.hosts - - environment = { - RETRY_INTERVAL = var.retry_interval - TIMEOUT_SECONDS = var.timeout - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - DEFAULT_LCQ = var.vault_autopilot_default_max_leases - } - - scripts = [abspath("${path.module}/scripts/smoke-verify-default-lcq.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh b/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh deleted file mode 100755 index 64e8e0f..0000000 --- a/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -# Exit early if we haven't been given an expected DEFAULT_LCQ -[[ -z "$DEFAULT_LCQ" ]] && exit 0 - -[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" -[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -getMaxLeases() { - curl --request GET --header "X-Vault-Token: $VAULT_TOKEN" \ - "$VAULT_ADDR/v1/sys/quotas/lease-count/default" | jq '.data.max_leases // empty' -} - -waitForMaxLeases() { - local max_leases - if ! max_leases=$(getMaxLeases); then - echo "failed getting /v1/sys/quotas/lease-count/default data" 1>&2 - return 1 - fi - - if [[ "$max_leases" == "$DEFAULT_LCQ" ]]; then - echo "$max_leases" - return 0 - else - echo "Expected Default LCQ $DEFAULT_LCQ but got $max_leases" - return 1 - fi -} - -begin_time=$(date +%s) -end_time=$((begin_time + TIMEOUT_SECONDS)) -while [ "$(date +%s)" -lt "$end_time" ]; do - if waitForMaxLeases; then - exit 0 - fi - - sleep "$RETRY_INTERVAL" -done - -fail "Timed out waiting for Default LCQ verification to complete. Data:\n\t$(getMaxLeases)" diff --git a/enos/modules/vault_verify_dr_replication/main.tf b/enos/modules/vault_verify_dr_replication/main.tf deleted file mode 100644 index f7f99fd..0000000 --- a/enos/modules/vault_verify_dr_replication/main.tf +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "primary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The primary cluster leader host" -} - -variable "secondary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The secondary cluster leader host" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "wrapping_token" { - type = string - description = "The wrapping token created on primary cluster" - default = null -} - -locals { - primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip - secondary_leader_addr = var.ip_version == 6 ? var.secondary_leader_host.ipv6 : var.secondary_leader_host.private_ip - primary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_primary.stdout) - secondary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_secondary.stdout) -} - -resource "enos_remote_exec" "verify_replication_status_on_primary" { - environment = { - IP_VERSION = var.ip_version - PRIMARY_LEADER_ADDR = local.primary_leader_addr - SECONDARY_LEADER_ADDR = local.secondary_leader_addr - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] - - transport = { - ssh = { - host = var.primary_leader_host.public_ip - } - } -} - -resource "enos_remote_exec" "verify_replication_status_on_secondary" { - environment = { - IP_VERSION = var.ip_version - PRIMARY_LEADER_ADDR = local.primary_leader_addr - SECONDARY_LEADER_ADDR = local.secondary_leader_addr - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] - - transport = { - ssh = { - host = var.secondary_leader_host.public_ip - } - } -} - -output "primary_replication_status" { - value = local.primary_replication_status -} - -output "known_primary_cluster_addrs" { - value = local.secondary_replication_status.data.known_primary_cluster_addrs -} - -output "secondary_replication_status" { - value = local.secondary_replication_status -} - -output "primary_replication_data_secondaries" { - value = local.primary_replication_status.data.secondaries -} - -output "secondary_replication_data_primaries" { - value = local.secondary_replication_status.data.primaries -} diff --git a/enos/modules/vault_verify_dr_replication/scripts/verify-replication-status.sh b/enos/modules/vault_verify_dr_replication/scripts/verify-replication-status.sh deleted file mode 100644 index f01a9cd..0000000 --- a/enos/modules/vault_verify_dr_replication/scripts/verify-replication-status.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# This script waits for the replication status to be established -# then verifies the dr replication between primary and -# secondary clusters - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" -[[ -z "$PRIMARY_LEADER_ADDR" ]] && fail "PRIMARY_LEADER_ADDR env variable has not been set" -[[ -z "$SECONDARY_LEADER_ADDR" ]] && fail "SECONDARY_LEADER_ADDR env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -retry() { - local retries=$1 - shift - local count=0 - - until "$@"; do - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - fail "$($binpath read -format=json sys/replication/dr/status)" - fi - done -} - -check_dr_status() { - dr_status=$($binpath read -format=json sys/replication/dr/status) - cluster_state=$(jq -r '.data.state' <<< "$dr_status") - connection_mode=$(jq -r '.data.mode' <<< "$dr_status") - - if [[ "$cluster_state" == 'idle' ]]; then - echo "replication cluster state is idle" 1>&2 - return 1 - fi - - if [[ "$connection_mode" == "primary" ]]; then - connection_status=$(jq -r '.data.secondaries[0].connection_status' <<< "$dr_status") - if [[ "$connection_status" == 'disconnected' ]]; then - echo ".data.secondaries[0].connection_status from primary node is 'disconnected'" 1>&2 - return 1 - fi - # Confirm we are in a "running" state for the primary - if [[ "$cluster_state" != "running" ]]; then - echo "replication cluster primary state is not running" 1>&2 - return 1 - fi - else - connection_status=$(jq -r '.data.primaries[0].connection_status' <<< "$dr_status") - if [[ "$connection_status" == 'disconnected' ]]; then - echo ".data.primaries[0].connection_status from secondary node is 'disconnected'" 1>&2 - return 1 - fi - # Confirm we are in a "stream-wals" state for the secondary - if [[ "$cluster_state" != "stream-wals" ]]; then - echo "replication cluster primary state is not stream-wals" 1>&2 - return 1 - fi - known_primary_cluster_addrs=$(jq -r '.data.known_primary_cluster_addrs' <<< "$dr_status") - if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_ADDR"; then - echo "$PRIMARY_LEADER_ADDR is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2 - return 1 - fi - fi - - echo "$dr_status" - return 0 -} - -if [ "$IP_VERSION" != 4 ] && [ "$IP_VERSION" != 6 ]; then - fail "unsupported IP_VERSION: $IP_VERSION" -fi - -# Retry for a while because it can take some time for replication to sync -retry 10 check_dr_status diff --git a/enos/modules/vault_verify_performance_replication/main.tf b/enos/modules/vault_verify_performance_replication/main.tf deleted file mode 100644 index f7f99fd..0000000 --- a/enos/modules/vault_verify_performance_replication/main.tf +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "primary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The primary cluster leader host" -} - -variable "secondary_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The secondary cluster leader host" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "wrapping_token" { - type = string - description = "The wrapping token created on primary cluster" - default = null -} - -locals { - primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip - secondary_leader_addr = var.ip_version == 6 ? var.secondary_leader_host.ipv6 : var.secondary_leader_host.private_ip - primary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_primary.stdout) - secondary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_secondary.stdout) -} - -resource "enos_remote_exec" "verify_replication_status_on_primary" { - environment = { - IP_VERSION = var.ip_version - PRIMARY_LEADER_ADDR = local.primary_leader_addr - SECONDARY_LEADER_ADDR = local.secondary_leader_addr - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] - - transport = { - ssh = { - host = var.primary_leader_host.public_ip - } - } -} - -resource "enos_remote_exec" "verify_replication_status_on_secondary" { - environment = { - IP_VERSION = var.ip_version - PRIMARY_LEADER_ADDR = local.primary_leader_addr - SECONDARY_LEADER_ADDR = local.secondary_leader_addr - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] - - transport = { - ssh = { - host = var.secondary_leader_host.public_ip - } - } -} - -output "primary_replication_status" { - value = local.primary_replication_status -} - -output "known_primary_cluster_addrs" { - value = local.secondary_replication_status.data.known_primary_cluster_addrs -} - -output "secondary_replication_status" { - value = local.secondary_replication_status -} - -output "primary_replication_data_secondaries" { - value = local.primary_replication_status.data.secondaries -} - -output "secondary_replication_data_primaries" { - value = local.secondary_replication_status.data.primaries -} diff --git a/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh b/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh deleted file mode 100644 index 57b1b43..0000000 --- a/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# This script waits for the replication status to be established -# then verifies the performance replication between primary and -# secondary clusters - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set" -[[ -z "$PRIMARY_LEADER_ADDR" ]] && fail "PRIMARY_LEADER_ADDR env variable has not been set" -[[ -z "$SECONDARY_LEADER_ADDR" ]] && fail "SECONDARY_LEADER_ADDR env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -retry() { - local retries=$1 - shift - local count=0 - - until "$@"; do - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - fail "$($binpath read -format=json sys/replication/performance/status)" - fi - done -} - -check_pr_status() { - pr_status=$($binpath read -format=json sys/replication/performance/status) - cluster_state=$(jq -r '.data.state' <<< "$pr_status") - connection_mode=$(jq -r '.data.mode' <<< "$pr_status") - - if [[ "$cluster_state" == 'idle' ]]; then - echo "replication cluster state is idle" 1>&2 - return 1 - fi - - if [[ "$connection_mode" == "primary" ]]; then - connection_status=$(jq -r '.data.secondaries[0].connection_status' <<< "$pr_status") - if [[ "$connection_status" == 'disconnected' ]]; then - echo ".data.secondaries[0].connection_status from primary node is 'disconnected'" 1>&2 - return 1 - fi - if [ "$IP_VERSION" == 4 ]; then - secondary_cluster_addr=$(jq -r '.data.secondaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")' <<< "$pr_status") - else - secondary_cluster_addr=$(jq -r '.data.secondaries[0].cluster_address | scan("\\[(.+)\\]") | .[0]' <<< "$pr_status") - fi - if [[ "$secondary_cluster_addr" != "$SECONDARY_LEADER_ADDR" ]]; then - echo ".data.secondaries[0].cluster_address should have an IP address of $SECONDARY_LEADER_ADDR, got: $secondary_cluster_addr" 1>&2 - return 1 - fi - else - connection_status=$(jq -r '.data.primaries[0].connection_status' <<< "$pr_status") - if [[ "$connection_status" == 'disconnected' ]]; then - echo ".data.primaries[0].connection_status from secondary node is 'disconnected'" 1>&2 - return 1 - fi - if [ "$IP_VERSION" == 4 ]; then - primary_cluster_addr=$(jq -r '.data.primaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")' <<< "$pr_status") - else - primary_cluster_addr=$(jq -r '.data.primaries[0].cluster_address | scan("\\[(.+)\\]") | .[0]' <<< "$pr_status") - fi - if [[ "$primary_cluster_addr" != "$PRIMARY_LEADER_ADDR" ]]; then - echo ".data.primaries[0].cluster_address should have an IP address of $PRIMARY_LEADER_ADDR, got: $primary_cluster_addr" 1>&2 - return 1 - fi - known_primary_cluster_addrs=$(jq -r '.data.known_primary_cluster_addrs' <<< "$pr_status") - if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_ADDR"; then - echo "$PRIMARY_LEADER_ADDR is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2 - return 1 - fi - fi - - echo "$pr_status" - return 0 -} - -if [ "$IP_VERSION" != 4 ] && [ "$IP_VERSION" != 6 ]; then - fail "unsupported IP_VERSION: $IP_VERSION" -fi - -# Retry for a while because it can take some time for replication to sync -retry 10 check_pr_status diff --git a/enos/modules/vault_verify_raft_auto_join_voter/main.tf b/enos/modules/vault_verify_raft_auto_join_voter/main.tf deleted file mode 100644 index 826b00b..0000000 --- a/enos/modules/vault_verify_raft_auto_join_voter/main.tf +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "ip_version" { - type = number - description = "The IP version to use for the Vault TCP listeners" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_cluster_addr_port" { - description = "The Raft cluster address port" - type = string -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -locals { - cluster_addrs = { - 4 : { for k, v in var.hosts : k => "${v.private_ip}:${var.vault_cluster_addr_port}" }, - 6 : { for k, v in var.hosts : k => "[${v.ipv6}]:${var.vault_cluster_addr_port}" }, - } -} - -resource "enos_remote_exec" "verify_raft_auto_join_voter" { - for_each = var.hosts - - environment = { - VAULT_ADDR = var.vault_addr - VAULT_CLUSTER_ADDR = local.cluster_addrs[var.ip_version][each.key] - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_LOCAL_BINARY_PATH = "${var.vault_install_dir}/vault" - VAULT_TOKEN = var.vault_root_token - } - - scripts = [abspath("${path.module}/scripts/verify-raft-auto-join-voter.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh b/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh deleted file mode 100644 index c20aade..0000000 --- a/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -binpath=${VAULT_INSTALL_DIR}/vault - -fail() { - echo "$1" 2>&1 - return 1 -} - -retry() { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - echo "retry $count" - else - return "$exit" - fi - done - - return 0 -} - -check_voter_status() { - voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR) | .voter == $expected') - - if [[ "$voter_status" != 'true' ]]; then - fail "expected $VAULT_CLUSTER_ADDR to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq -Mr --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR)')" - fi -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -# Retry a few times because it can take some time for things to settle after -# all the nodes are unsealed -retry 10 check_voter_status diff --git a/enos/modules/vault_verify_removed_node/main.tf b/enos/modules/vault_verify_removed_node/main.tf deleted file mode 100644 index ecbb0ef..0000000 --- a/enos/modules/vault_verify_removed_node/main.tf +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were removed" -} - -variable "retry_interval" { - type = number - description = "How many seconds to wait between each retry" - default = 2 -} - -variable "timeout" { - type = number - description = "The max number of seconds to wait before timing out" - default = 60 -} - -variable "listener_port" { - type = number - description = "The listener port for vault" -} -variable "vault_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The leader's host information" -} -variable "vault_addr" { - type = string - description = "The local address to use to query vault" -} -variable "cluster_port" { - type = number - description = "The cluster port for vault" -} - - -variable "ip_version" { - type = number - description = "The IP version to use for the Vault TCP listeners" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} -variable "vault_root_token" { - type = string - description = "The vault root token" -} -variable "vault_seal_type" { - type = string - description = "The Vault seal type" -} - -variable "add_back_nodes" { - type = bool - description = "whether to add the nodes back" -} - -variable "vault_unseal_keys" {} - -variable "vault_install_dir" { - type = string - description = "The directory where the vault binary is installed" -} - -resource "enos_remote_exec" "verify_raft_peer_removed" { - for_each = var.hosts - - environment = { - RETRY_INTERVAL = var.retry_interval - TIMEOUT_SECONDS = var.timeout - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/verify_raft_remove_peer.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -resource "enos_remote_exec" "verify_unseal_fails" { - for_each = { - for idx, host in var.hosts : idx => host - if var.vault_seal_type == "shamir" - } - - environment = { - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - UNSEAL_KEYS = join(",", var.vault_unseal_keys) - } - - scripts = [abspath("${path.module}/scripts/verify_unseal_fails.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -resource "enos_remote_exec" "verify_rejoin_fails" { - for_each = var.hosts - - environment = { - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - RETRY_INTERVAL = var.retry_interval - TIMEOUT_SECONDS = var.timeout - VAULT_LEADER_ADDR = "${var.ip_version == 4 ? "${var.vault_leader_host.private_ip}" : "[${var.vault_leader_host.ipv6}]"}:${var.listener_port}" - } - - scripts = [abspath("${path.module}/scripts/verify_manual_rejoin_fails.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} -module "restart" { - depends_on = [enos_remote_exec.verify_rejoin_fails, enos_remote_exec.verify_raft_peer_removed] - source = "../restart_vault" - hosts = var.hosts - vault_addr = var.vault_addr - vault_install_dir = var.vault_install_dir -} - -resource "enos_remote_exec" "verify_removed_after_restart" { - depends_on = [module.restart] - for_each = var.hosts - - environment = { - RETRY_INTERVAL = var.retry_interval - TIMEOUT_SECONDS = var.timeout - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - } - - scripts = [abspath("${path.module}/scripts/verify_raft_remove_peer.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -module "stop" { - depends_on = [enos_remote_exec.verify_removed_after_restart] - source = "../stop_vault" - count = var.add_back_nodes ? 1 : 0 - - hosts = var.hosts -} - -resource "enos_remote_exec" "delete_data" { - depends_on = [module.stop] - for_each = { - for idx, host in var.hosts : idx => host - if var.add_back_nodes - } - - inline = ["sudo rm -rf /opt/raft/data/*"] - - transport = { - ssh = { - host = each.value.public_ip - } - } - -} - -resource "enos_remote_exec" "start" { - depends_on = [enos_remote_exec.delete_data] - for_each = { - for idx, host in var.hosts : idx => host - if var.add_back_nodes - } - inline = ["sudo systemctl start vault; sleep 5"] - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -resource "enos_vault_unseal" "unseal" { - depends_on = [ - enos_remote_exec.start - ] - for_each = { - for idx, host in var.hosts : idx => host - if var.vault_seal_type == "shamir" && var.add_back_nodes - } - - bin_path = "${var.vault_install_dir}/vault" - vault_addr = var.vault_addr - seal_type = var.vault_seal_type - unseal_keys = var.vault_seal_type != "shamir" ? null : var.vault_unseal_keys - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -module "verify_rejoin_succeeds" { - source = "../vault_verify_raft_auto_join_voter" - depends_on = [enos_vault_unseal.unseal] - count = var.add_back_nodes ? 1 : 0 - hosts = var.hosts - ip_version = var.ip_version - vault_root_token = var.vault_root_token - vault_install_dir = var.vault_install_dir - vault_addr = var.vault_addr - vault_cluster_addr_port = var.cluster_port -} diff --git a/enos/modules/vault_verify_removed_node/scripts/verify_manual_rejoin_fails.sh b/enos/modules/vault_verify_removed_node/scripts/verify_manual_rejoin_fails.sh deleted file mode 100644 index ed3c359..0000000 --- a/enos/modules/vault_verify_removed_node/scripts/verify_manual_rejoin_fails.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" -[[ -z "$VAULT_LEADER_ADDR" ]] && fail "VAULT_LEADER_ADDR env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -result=$($binpath operator raft join "$VAULT_LEADER_ADDR") -output=$? -if [ $output -ne 2 ]; then - fail "Joining did not return code 2, instead $output: $result" -fi diff --git a/enos/modules/vault_verify_removed_node/scripts/verify_raft_remove_peer.sh b/enos/modules/vault_verify_removed_node/scripts/verify_raft_remove_peer.sh deleted file mode 100755 index b853512..0000000 --- a/enos/modules/vault_verify_removed_node/scripts/verify_raft_remove_peer.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" -[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -getSysHealth() { - $binpath read -format=json sys/health sealedcode=299 haunhealthycode=299 removedcode=299 | jq -eMc '.data.removed_from_cluster' -} - -getStatus() { - $binpath status --format=json | jq -eMc '.removed_from_cluster' -} - -expectRemoved() { - local status - if ! status=$(getStatus); then - echo "failed to get vault status: $status" - return 1 - fi - if [[ "$status" != "true" ]]; then - echo "unexpected status $status" - return 1 - fi - - local health - health=$(getSysHealth) - if ! health=$(getSysHealth); then - echo "failed to get health: $health" - return 1 - fi - if [[ "$health" != "true" ]]; then - echo "unexpected health $health" - fi - - return 0 -} - -begin_time=$(date +%s) -end_time=$((begin_time + TIMEOUT_SECONDS)) -while [ "$(date +%s)" -lt "$end_time" ]; do - if expectRemoved; then - exit 0 - fi - - sleep "$RETRY_INTERVAL" -done - -fail "Timed out waiting for raft removed status" diff --git a/enos/modules/vault_verify_removed_node/scripts/verify_unseal_fails.sh b/enos/modules/vault_verify_removed_node/scripts/verify_unseal_fails.sh deleted file mode 100644 index c2eded9..0000000 --- a/enos/modules/vault_verify_removed_node/scripts/verify_unseal_fails.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -IFS="," read -r -a keys <<< "${UNSEAL_KEYS}" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -result=$($binpath operator unseal "${keys[0]}") -code=$? -if [ $code -eq 0 ]; then - fail "expected unseal to fail but got exit code $code: $result" -fi diff --git a/enos/modules/vault_verify_removed_node_shim/main.tf b/enos/modules/vault_verify_removed_node_shim/main.tf deleted file mode 100644 index fae7944..0000000 --- a/enos/modules/vault_verify_removed_node_shim/main.tf +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster followers" -} - - -variable "retry_interval" { - type = number - description = "How many seconds to wait between each retry" - default = 2 -} - -variable "timeout" { - type = number - description = "The max number of seconds to wait before timing out" - default = 60 -} - -variable "listener_port" { - type = number - description = "The listener port for vault" -} - -variable "vault_leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The leader's host information" -} - -variable "vault_addr" { - type = string - description = "The local address to use to query vault" -} - -variable "cluster_port" { - type = number - description = "The cluster port for vault" -} - -variable "ip_version" { - type = number - description = "The IP version to use for the Vault TCP listeners" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} -variable "vault_root_token" { - type = string - description = "The vault root token" -} -variable "vault_seal_type" { - type = string - description = "The Vault seal type" -} - -variable "add_back_nodes" { - type = bool - description = "whether to add the nodes back" -} - -variable "vault_unseal_keys" {} - -variable "vault_install_dir" { - type = string - description = "The directory where the vault binary is installed" -} \ No newline at end of file diff --git a/enos/modules/vault_verify_replication/main.tf b/enos/modules/vault_verify_replication/main.tf deleted file mode 100644 index f9377d8..0000000 --- a/enos/modules/vault_verify_replication/main.tf +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_edition" { - type = string - description = "The vault product edition" - default = null -} - -resource "enos_remote_exec" "smoke-verify-replication" { - for_each = var.hosts - - environment = { - VAULT_ADDR = var.vault_addr - VAULT_EDITION = var.vault_edition - } - - scripts = [abspath("${path.module}/scripts/smoke-verify-replication.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh b/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh deleted file mode 100644 index 72ecbd2..0000000 --- a/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_EDITION" ]] && fail "VAULT_EDITION env variable has not been set" - -# Replication status endpoint should have data.mode disabled for CE release -status=$(curl "${VAULT_ADDR}/v1/sys/replication/status") -if [ "$VAULT_EDITION" == "ce" ]; then - if [ "$(jq -r '.data.mode' <<< "$status")" != "disabled" ]; then - fail "replication data mode is not disabled for CE release!" - fi -else - if [ "$(jq -r '.data.dr' <<< "$status")" == "" ]; then - fail "DR replication should be available for an ENT release!" - fi - if [ "$(jq -r '.data.performance' <<< "$status")" == "" ]; then - fail "Performance replication should be available for an ENT release!" - fi -fi diff --git a/enos/modules/vault_verify_ui/main.tf b/enos/modules/vault_verify_ui/main.tf deleted file mode 100644 index 61d7361..0000000 --- a/enos/modules/vault_verify_ui/main.tf +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -resource "enos_remote_exec" "smoke-verify-ui" { - for_each = var.hosts - - environment = { - VAULT_ADDR = var.vault_addr, - } - - scripts = [abspath("${path.module}/scripts/smoke-verify-ui.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh deleted file mode 100644 index 7500788..0000000 --- a/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -url_effective=$(curl -w "%{url_effective}\n" -I -L -s -S "${VAULT_ADDR}" -o /dev/null) -expected="${VAULT_ADDR}/ui/" -if [ "${url_effective}" != "${expected}" ]; then - fail "Expecting Vault to redirect to UI.\nExpected: ${expected}\nGot: ${url_effective}" -fi - -if curl -s "${VAULT_ADDR}/ui/" | grep -q 'Vault UI is not available'; then - fail "Vault UI is not available" -fi diff --git a/enos/modules/vault_verify_undo_logs/main.tf b/enos/modules/vault_verify_undo_logs/main.tf deleted file mode 100644 index 5547321..0000000 --- a/enos/modules/vault_verify_undo_logs/main.tf +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "expected_state" { - type = number - description = "The expected state to have in vault.core.replication.write_undo_logs telemetry. Must be either 1 for enabled or 0 for disabled." - - validation { - condition = contains([0, 1], var.expected_state) - error_message = "The expected_state must be either 0 or 1" - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster target hosts to check" -} - -variable "retry_interval" { - type = number - description = "How many seconds to wait between each retry" - default = 2 -} - -variable "timeout" { - type = number - description = "The max number of seconds to wait before timing out" - default = 60 -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -resource "enos_remote_exec" "smoke-verify-undo-logs" { - for_each = var.hosts - - environment = { - EXPECTED_STATE = var.expected_state - RETRY_INTERVAL = var.retry_interval - TIMEOUT_SECONDS = var.timeout - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_TOKEN = var.vault_root_token - } - - scripts = [abspath("${path.module}/scripts/smoke-verify-undo-logs.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh deleted file mode 100644 index 7736331..0000000 --- a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$EXPECTED_STATE" ]] && fail "EXPECTED_STAE env variable has not been set" -[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" -[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -begin_time=$(date +%s) -end_time=$((begin_time + TIMEOUT_SECONDS)) -while [ "$(date +%s)" -lt "$end_time" ]; do - state=$($binpath read sys/metrics -format=json | jq -r '.data.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")') - target_undo_logs_status="$(jq -r '.Value' <<< "$state")" - - if [ "$target_undo_logs_status" == "$EXPECTED_STATE" ]; then - echo "vault.core.replication.write_undo_logs has expected Value: \"${EXPECTED_STATE}\"" - exit 0 - fi - - echo "Waiting for vault.core.replication.write_undo_logs to have Value: \"${EXPECTED_STATE}\"" - sleep "$RETRY_INTERVAL" -done - -fail "Timed out waiting for vault.core.replication.write_undo_logs to have Value: \"${EXPECTED_STATE}\"" diff --git a/enos/modules/vault_verify_version/main.tf b/enos/modules/vault_verify_version/main.tf deleted file mode 100644 index 9c992bb..0000000 --- a/enos/modules/vault_verify_version/main.tf +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The Vault cluster instances that were created" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_build_date" { - type = string - description = "The Vault artifact build date" - default = null -} - -variable "vault_edition" { - type = string - description = "The Vault product edition" - default = null -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_product_version" { - type = string - description = "The Vault product version" - default = null -} - -variable "vault_revision" { - type = string - description = "The Vault product revision" - default = null -} - -variable "vault_root_token" { - type = string - description = "The Vault root token" - default = null -} - -resource "enos_remote_exec" "verify_cli_version" { - for_each = var.hosts - - environment = { - VAULT_ADDR = var.vault_addr, - VAULT_BUILD_DATE = var.vault_build_date, - VAULT_EDITION = var.vault_edition, - VAULT_INSTALL_DIR = var.vault_install_dir, - VAULT_REVISION = var.vault_revision, - VAULT_TOKEN = var.vault_root_token, - VAULT_VERSION = var.vault_product_version, - } - - scripts = [abspath("${path.module}/scripts/verify-cli-version.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -resource "enos_remote_exec" "verify_cluster_version" { - for_each = var.hosts - - environment = { - VAULT_ADDR = var.vault_addr, - VAULT_BUILD_DATE = var.vault_build_date, - VAULT_TOKEN = var.vault_root_token, - VAULT_VERSION = var.vault_product_version, - } - - scripts = [abspath("${path.module}/scripts/verify-cluster-version.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_verify_version/scripts/verify-cli-version.sh b/enos/modules/vault_verify_version/scripts/verify-cli-version.sh deleted file mode 100644 index ee8be4a..0000000 --- a/enos/modules/vault_verify_version/scripts/verify-cli-version.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# Verify the Vault "version" includes the correct base version, build date, -# revision SHA, and edition metadata. -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_BUILD_DATE" ]] && fail "VAULT_BUILD_DATE env variable has not been set" -[[ -z "$VAULT_EDITION" ]] && fail "VAULT_EDITION env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_REVISION" ]] && fail "VAULT_REVISION env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" -[[ -z "$VAULT_VERSION" ]] && fail "VAULT_VERSION env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -edition=${VAULT_EDITION} -version=${VAULT_VERSION} -sha=${VAULT_REVISION} -build_date=${VAULT_BUILD_DATE} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" -version_expected="Vault v$version ($sha), built $build_date" - -case "$edition" in - *ce) ;; - *ent) ;; - *ent.hsm) version_expected="$version_expected (cgo)" ;; - *ent.fips1403) version_expected="$version_expected (cgo)" ;; - *ent.hsm.fips1403) version_expected="$version_expected (cgo)" ;; - *) fail "Unknown Vault edition: ($edition)" ;; -esac - -version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') -version_output=$("$binpath" version) - -if [[ "$version_output" == "$version_expected_nosha" ]] || [[ "$version_output" == "$version_expected" ]]; then - echo "Version verification succeeded!" -else - msg="$(printf "\nThe Vault cluster did not match the expected version, expected:\n%s\nor\n%s\ngot:\n%s" "$version_expected" "$version_expected_nosha" "$version_output")" - if type diff &> /dev/null; then - # Diff exits non-zero if we have a diff, which we want, so we'll guard against failing early. - if ! version_diff=$(diff <(echo "$version_expected") <(echo "$version_output") -u -L expected -L got); then - msg="$(printf "\nThe Vault cluster did not match the expected version:\n%s" "$version_diff")" - fi - fi - - fail "$msg" -fi diff --git a/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh b/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh deleted file mode 100644 index f0afee6..0000000 --- a/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# Verify the Vault "version" includes the correct base version, build date, -# revision SHA, and edition metadata. -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_BUILD_DATE" ]] && fail "VAULT_BUILD_DATE env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" -[[ -z "$VAULT_VERSION" ]] && fail "VAULT_VERSION env variable has not been set" - -# The sys/version-history endpoint only includes major.minor.patch, any other semver fields need to -# be stripped out. -if ! version=$(cut -d + -f1 <<< "$VAULT_VERSION" | cut -d - -f1); then - fail "failed to parse the expected version: $version" -fi - -if ! vh=$(curl -s -X LIST -H "X-Vault-Token: $VAULT_TOKEN" http://127.0.0.1:8200/v1/sys/version-history | jq -eMc '.data'); then - fail "failed to Vault cluster version history: $vh" -fi - -if ! out=$(jq -eMc --arg version "$version" '.keys | contains([$version])' <<< "$vh"); then - fail "cluster version history does not include our expected version: expected: $version, versions: $(jq -eMc '.keys' <<< "$vh"): output: $out" -fi - -if ! out=$(jq -eMc --arg version "$version" --arg bd "$VAULT_BUILD_DATE" '.key_info[$version].build_date == $bd' <<< "$vh"); then - fail "cluster version history build date is not the expected date: expected: true, expected date: $VAULT_BUILD_DATE, key_info: $(jq -eMc '.key_info' <<< "$vh"), output: $out" -fi - -printf "Cluster version information is valid!: %s\n" "$vh" diff --git a/enos/modules/vault_wait_for_cluster_unsealed/main.tf b/enos/modules/vault_wait_for_cluster_unsealed/main.tf deleted file mode 100644 index ce9ee25..0000000 --- a/enos/modules/vault_wait_for_cluster_unsealed/main.tf +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "retry_interval" { - type = number - description = "How many seconds to wait between each retry" - default = 2 -} - -variable "timeout" { - type = number - description = "The max number of seconds to wait before timing out" - default = 60 -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -resource "enos_remote_exec" "verify_node_unsealed" { - for_each = var.hosts - - scripts = [abspath("${path.module}/scripts/verify-vault-node-unsealed.sh")] - - environment = { - HOST_IPV4 = each.value.public_ip - HOST_IPV6 = each.value.ipv6 - RETRY_INTERVAL = var.retry_interval - TIMEOUT_SECONDS = var.timeout - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - } - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault_wait_for_cluster_unsealed/scripts/verify-vault-node-unsealed.sh b/enos/modules/vault_wait_for_cluster_unsealed/scripts/verify-vault-node-unsealed.sh deleted file mode 100644 index 1bce520..0000000 --- a/enos/modules/vault_wait_for_cluster_unsealed/scripts/verify-vault-node-unsealed.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" -[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -getStatus() { - $binpath status -format json -} - -isUnsealed() { - local status - if ! status=$(getStatus); then - echo "failed to get vault status" 1>&2 - return 1 - fi - - if status=$(jq -Mre --argjson expected "false" '.sealed == $expected' <<< "$status"); then - echo "vault is unsealed: $status" - return 0 - fi - - echo "vault is sealed" 1>&2 - return 1 -} - -begin_time=$(date +%s) -end_time=$((begin_time + TIMEOUT_SECONDS)) -while [ "$(date +%s)" -lt "$end_time" ]; do - echo "waiting for vault to be unsealed..." - - if isUnsealed; then - exit 0 - fi - - sleep "$RETRY_INTERVAL" -done - -if [ -n "$HOST_IPV6" ]; then - fail "timed out waiting for Vault cluster on ${HOST_IPV6} to be unsealed" -fi -if [ -n "$HOST_IPV4" ]; then - fail "timed out waiting for Vault cluster on ${HOST_IPV4} to be unsealed" -fi -fail "timed out waiting for Vault cluster to be unsealed" diff --git a/enos/modules/vault_wait_for_leader/main.tf b/enos/modules/vault_wait_for_leader/main.tf deleted file mode 100644 index 7c29280..0000000 --- a/enos/modules/vault_wait_for_leader/main.tf +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster hosts that can be expected as a leader" -} - -variable "ip_version" { - type = number - description = "The IP version used for the Vault TCP listener" - - validation { - condition = contains([4, 6], var.ip_version) - error_message = "The ip_version must be either 4 or 6" - } -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -variable "timeout" { - type = number - description = "The max number of seconds to wait before timing out" - default = 60 -} - -variable "retry_interval" { - type = number - description = "How many seconds to wait between each retry" - default = 2 -} - -locals { - ipv6s = [for k, v in values(tomap(var.hosts)) : tostring(v["ipv6"])] - private_ips = [for k, v in values(tomap(var.hosts)) : tostring(v["private_ip"])] -} - -resource "enos_remote_exec" "wait_for_leader_in_hosts" { - environment = { - IP_VERSION = var.ip_version - TIMEOUT_SECONDS = var.timeout - RETRY_INTERVAL = var.retry_interval - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTANCE_IPV6S = jsonencode(local.ipv6s) - VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips) - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/wait-for-leader.sh")] - - transport = { - ssh = { - host = var.hosts[0].public_ip - } - } -} diff --git a/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh b/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh deleted file mode 100644 index dc97cb6..0000000 --- a/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" -[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -findLeaderInPrivateIPs() { - # Find the leader private IP address - local leader_private_ip - if ! leader_private_ip=$($binpath read sys/leader -format=json | jq -er '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then - # Some older versions of vault don't support reading sys/leader. Fallback to the cli status. - if ! leader_private_ip=$($binpath status -format json | jq -er '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then - return 1 - fi - fi - - if isIn=$(jq -er --arg ip "$leader_private_ip" 'map(select(. == $ip)) | length == 1' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then - if [[ "$isIn" == "true" ]]; then - echo "$leader_private_ip" - return 0 - fi - fi - - return 1 -} - -findLeaderInIPV6s() { - # Find the leader private IP address - local leader_ipv6 - if ! leader_ipv6=$($binpath read sys/leader -format=json | jq -er '.data.leader_address | scan("\\[(.+)\\]") | .[0]'); then - # Some older versions of vault don't support reading sys/leader. Fallback to the cli status. - if ! leader_ipv6=$($binpath status -format json | jq -er '.leader_address | scan("\\[(.+)\\]") | .[0]'); then - return 1 - fi - fi - - if isIn=$(jq -er --arg ip "$leader_ipv6" 'map(select(. == $ip)) | length == 1' <<< "$VAULT_INSTANCE_IPV6S"); then - if [[ "$isIn" == "true" ]]; then - echo "$leader_ipv6" - return 0 - fi - fi - - return 1 -} - -begin_time=$(date +%s) -end_time=$((begin_time + TIMEOUT_SECONDS)) -while [ "$(date +%s)" -lt "$end_time" ]; do - # Use the default package manager of the current Linux distro to install packages - case $IP_VERSION in - 4) - [[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set" - if findLeaderInPrivateIPs; then - exit 0 - fi - ;; - 6) - [[ -z "$VAULT_INSTANCE_IPV6S" ]] && fail "VAULT_INSTANCE_IPV6S env variable has not been set" - if findLeaderInIPV6s; then - exit 0 - fi - ;; - *) - fail "No matching package manager provided." - ;; - esac - - sleep "$RETRY_INTERVAL" -done - -case $IP_VERSION in - 4) - fail "Timed out waiting for one of $VAULT_INSTANCE_PRIVATE_IPS to be leader." - ;; - 6) - fail "Timed out waiting for one of $VAULT_INSTANCE_IPV6S to be leader." - ;; - *) - fail "Timed out waiting for leader" - ;; -esac diff --git a/enos/modules/vault_wait_for_seal_rewrap/main.tf b/enos/modules/vault_wait_for_seal_rewrap/main.tf deleted file mode 100644 index 920672a..0000000 --- a/enos/modules/vault_wait_for_seal_rewrap/main.tf +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster hosts that can be expected as a leader" -} - -variable "retry_interval" { - type = number - description = "How many seconds to wait between each retry" - default = 2 -} - -variable "timeout" { - type = number - description = "The max number of seconds to wait before timing out" - default = 60 -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -locals { - private_ips = [for k, v in values(tomap(var.hosts)) : tostring(v["private_ip"])] - first_key = element(keys(enos_remote_exec.wait_for_seal_rewrap_to_be_completed), 0) -} - -resource "enos_remote_exec" "wait_for_seal_rewrap_to_be_completed" { - for_each = var.hosts - environment = { - RETRY_INTERVAL = var.retry_interval - TIMEOUT_SECONDS = var.timeout - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/scripts/wait-for-seal-rewrap.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -output "stdout" { - value = enos_remote_exec.wait_for_seal_rewrap_to_be_completed[local.first_key].stdout -} - -output "stderr" { - value = enos_remote_exec.wait_for_seal_rewrap_to_be_completed[local.first_key].stdout -} diff --git a/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh b/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh deleted file mode 100644 index 67bc144..0000000 --- a/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" -[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -getRewrapData() { - $binpath read sys/sealwrap/rewrap -format=json | jq -eMc '.data' -} - -waitForRewrap() { - local data - if ! data=$(getRewrapData); then - echo "failed getting /v1/sys/sealwrap/rewrap data" 1>&2 - return 1 - fi - - if ! jq -e '.is_running == false' <<< "$data" &> /dev/null; then - echo "rewrap is running" 1>&2 - return 1 - fi - - if ! jq -e '.entries.failed == 0' <<< "$data" &> /dev/null; then - local entries - entries=$(jq -Mc '.entries.failed' <<< "$data") - echo "rewrap has $entries failed entries" 1>&2 - return 1 - fi - - if ! jq -e '.entries.processed == .entries.succeeded' <<< "$data" &> /dev/null; then - local processed - local succeeded - processed=$(jq -Mc '.entries.processed' <<< "$data") - succeeded=$(jq -Mc '.entries.succeeded' <<< "$data") - echo "the number of processed entries ($processed) does not equal then number of succeeded ($succeeded)" 1>&2 - return 1 - fi - - if jq -e '.entries.processed == 0' <<< "$data" &> /dev/null; then - echo "A seal rewrap has not been started yet. Number of processed entries is zero and a rewrap is not yet running." - return 1 - fi - - echo "$data" - return 0 -} - -begin_time=$(date +%s) -end_time=$((begin_time + TIMEOUT_SECONDS)) -while [ "$(date +%s)" -lt "$end_time" ]; do - if waitForRewrap; then - exit 0 - fi - - sleep "$RETRY_INTERVAL" -done - -fail "Timed out waiting for seal rewrap to be completed. Data:\n\t$(getRewrapData)" diff --git a/enos/modules/verify_log_secrets/main.tf b/enos/modules/verify_log_secrets/main.tf deleted file mode 100644 index ef53bf0..0000000 --- a/enos/modules/verify_log_secrets/main.tf +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "audit_log_file_path" { - type = string -} - -variable "leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - description = "The cluster leader host. Only the leader write to the audit log" -} - -variable "radar_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" - default = "/opt/vault-radar/bin" -} - -variable "radar_license_path" { - description = "The path to a vault-radar license file" -} - -variable "radar_version" { - description = "The version of Vault Radar to install" - default = "0.24.0" # must be >= 0.17.0 - // NOTE: A `semverconstraint` validation condition would be very useful here - // when we get around to exporting our custom enos funcs in the provider. -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" -} - -variable "vault_unit_name" { - type = string - description = "The vault unit name" - default = "vault" -} - -resource "enos_bundle_install" "radar" { - destination = var.radar_install_dir - - release = { - product = "vault-radar" - version = var.radar_version - // Radar doesn't have CE/Ent editions. CE is equivalent to no edition metadata. - edition = "ce" - } - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -resource "enos_remote_exec" "scan_logs_for_secrets" { - depends_on = [ - enos_bundle_install.radar, - ] - - environment = { - AUDIT_LOG_FILE_PATH = var.audit_log_file_path - VAULT_ADDR = var.vault_addr - VAULT_RADAR_INSTALL_DIR = var.radar_install_dir - VAULT_RADAR_LICENSE = file(var.radar_license_path) - VAULT_TOKEN = var.vault_root_token - VAULT_UNIT_NAME = var.vault_unit_name - } - - scripts = [abspath("${path.module}/scripts/scan_logs_for_secrets.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} diff --git a/enos/modules/verify_log_secrets/scripts/scan_logs_for_secrets.sh b/enos/modules/verify_log_secrets/scripts/scan_logs_for_secrets.sh deleted file mode 100644 index f1b3b83..0000000 --- a/enos/modules/verify_log_secrets/scripts/scan_logs_for_secrets.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -fail() { - echo "$1" 1>&2 - exit 1 -} - -verify_radar_scan_output_file() { - # Given a file with a radar scan output, filter out tagged false positives and verify that no - # other secrets remain. - if ! jq -eMcn '[inputs] | [.[] | select(.type != "aws_access_key_id") | select((.tags == null) or (.tags | contains(["ignore_rule"]) | not ))] | length == 0' < "$2"; then - found=$(jq -eMn '[inputs] | [.[] | select(.type != "aws_access_key_id") | select((.tags == null) or (.tags | contains(["ignore_rule"]) | not ))]' < "$2") - fail "failed to radar secrets output: vault radar detected secrets in $1!: $found" - fi -} - -set -e - -[[ -z "$AUDIT_LOG_FILE_PATH" ]] && fail "AUDIT_LOG_FILE_PATH env variable has not been set" -[[ -z "$VAULT_RADAR_INSTALL_DIR" ]] && fail "VAULT_RADAR_INSTALL_DIR env variable has not been set" -# Radar implicitly requires the following for creating the index and running radar itself -[[ -z "$VAULT_RADAR_LICENSE" ]] && fail "VAULT_RADAR_LICENSE env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" -[[ -z "$VAULT_UNIT_NAME" ]] && fail "VAULT_UNIT_NAME env variable has not been set" - -radar_bin_path=${VAULT_RADAR_INSTALL_DIR}/vault-radar -test -x "$radar_bin_path" || fail "failed to scan vault audit log: unable to locate radar binary at $radar_bin_path" - -# Make sure our audit log file exists. -if [ ! -f "$AUDIT_LOG_FILE_PATH" ]; then - fail "failed to scan vault audit log: no audit logifile found at $AUDIT_LOG_FILE_PATH" -fi - -# Create a readable copy of the audit log. -if ! sudo cp "$AUDIT_LOG_FILE_PATH" audit.log; then - fail "failed to scan vault audit log: could not copy audit log for scanning" -fi - -if ! sudo chmod +r audit.log; then - fail "failed to scan vault audit log: could not make audit log copy readable" -fi - -# Create a radar index file of our KVv2 secret values. -if ! out=$($radar_bin_path index vault --offline --disable-ui --outfile index.jsonl 2>&1); then - fail "failed to generate vault-radar index of vault cluster: $out" -fi - -# Write our ignore rules to avoid known false positives. -mkdir -p "$HOME/.hashicorp/vault-radar" -cat >> "$HOME/.hashicorp/vault-radar/ignore.yaml" << EOF -- secret_values: - - "hmac-sha256:*" -EOF - -# Scan the audit log for known secrets via the audit log and other secrets using radars built-in -# secret types. -if ! out=$("$radar_bin_path" scan file --offline --disable-ui -p audit.log --index-file index.jsonl -f json -o audit-secrets.json 2>&1); then - fail "failed to scan vault audit log: vault-radar scan file failed: $out" -fi - -verify_radar_scan_output_file vault-audit-log audit-secrets.json - -# Scan the vault journal for known secrets via the audit log and other secrets using radars built-in -# secret types. -if ! out=$(sudo journalctl --no-pager -u "$VAULT_UNIT_NAME" -a | "$radar_bin_path" scan file --offline --disable-ui --index-file index.jsonl -f json -o journal-secrets.json 2>&1); then - fail "failed to scan vault journal: vault-radar scan file failed: $out" -fi - -verify_radar_scan_output_file vault-journal journal-secrets.json diff --git a/enos/modules/verify_seal_type/main.tf b/enos/modules/verify_seal_type/main.tf deleted file mode 100644 index e8d8189..0000000 --- a/enos/modules/verify_seal_type/main.tf +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "seal_type" { - type = string - description = "The expected seal type" - default = "shamir" -} - - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -resource "enos_remote_exec" "verify_seal_type" { - for_each = var.hosts - - scripts = [abspath("${path.module}/scripts/verify-seal-type.sh")] - - environment = { - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - EXPECTED_SEAL_TYPE = var.seal_type - } - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/verify_seal_type/scripts/verify-seal-type.sh b/enos/modules/verify_seal_type/scripts/verify-seal-type.sh deleted file mode 100644 index 82a7985..0000000 --- a/enos/modules/verify_seal_type/scripts/verify-seal-type.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$EXPECTED_SEAL_TYPE" ]] && fail "EXPECTED_SEAL_TYPE env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -count=0 -retries=2 -while :; do - if seal_status=$($binpath read sys/seal-status -format=json); then - if jq -Mer --arg expected "$EXPECTED_SEAL_TYPE" '.data.type == $expected' <<< "$seal_status" &> /dev/null; then - exit 0 - fi - fi - - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - printf "Seal Status: %s\n" "$seal_status" - got=$(jq -Mer '.data.type' <<< "$seal_status") - fail "Expected seal type to be $EXPECTED_SEAL_TYPE, got: $got" - fi -done diff --git a/enos/modules/verify_secrets_engines/modules/create/auth.tf b/enos/modules/verify_secrets_engines/modules/create/auth.tf deleted file mode 100644 index f81f389..0000000 --- a/enos/modules/verify_secrets_engines/modules/create/auth.tf +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -locals { - // Variables - auth_userpass_path = "userpass" # auth/userpass - user_name = "testuser" # auth/userpass/users/testuser - user_password = "passtestuser1" # auth/userpass/login/passtestuser1 - user_policy_name = "reguser" # sys/policy/reguser - - auth_ldap_path = "ldap" # auth/ldap - - // Response data - user_login_data = jsondecode(enos_remote_exec.auth_login_testuser.stdout) - sys_auth_data = jsondecode(enos_remote_exec.read_sys_auth.stdout).data - - // Output - auth_output = { - sys = local.sys_auth_data - userpass = { - path = local.auth_userpass_path - user = { - name = local.user_name - password = local.user_password - policy_name = local.user_policy_name - login = local.user_login_data - } - } - } -} - -output "auth" { - value = local.auth_output -} - -# Enable userpass auth -resource "enos_remote_exec" "auth_enable_userpass" { - environment = { - AUTH_METHOD = "userpass" - AUTH_PATH = local.auth_userpass_path - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/auth-enable.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -# Get the sys/auth data after enabling our auth method -resource "enos_remote_exec" "read_sys_auth" { - depends_on = [ - enos_remote_exec.auth_enable_userpass, - ] - environment = { - REQPATH = "sys/auth" - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/read.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -# Create a default policy for our users that allows them to read and list. -resource "enos_remote_exec" "policy_read_reguser" { - environment = { - POLICY_NAME = local.user_policy_name - POLICY_CONFIG = <<-EOF - path "*" { - capabilities = ["read", "list"] - } - EOF - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/policy-write.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -# Create our user -resource "enos_remote_exec" "auth_create_testuser" { - depends_on = [ - enos_remote_exec.auth_enable_userpass, - enos_remote_exec.policy_read_reguser, - ] - - environment = { - AUTH_PATH = local.auth_userpass_path - PASSWORD = local.user_password - POLICIES = local.user_policy_name - USERNAME = local.user_name - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/auth-userpass-write.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -resource "enos_remote_exec" "auth_login_testuser" { - depends_on = [ - // Don't try to login until created our user and added it to the kv_writers group - enos_remote_exec.auth_create_testuser, - enos_remote_exec.identity_group_kv_writers, - ] - - environment = { - AUTH_PATH = local.auth_userpass_path - PASSWORD = local.user_password - USERNAME = local.user_name - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/auth-userpass-login.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -# Enable ldap auth -resource "enos_remote_exec" "auth_enable_ldap" { - environment = { - AUTH_METHOD = "ldap" - AUTH_PATH = local.auth_ldap_path - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/auth-enable.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -# Write the initial ldap config -# This is a one time write to the leader node. -resource "enos_remote_exec" "auth_write_ldap_config" { - depends_on = [ - enos_remote_exec.auth_enable_ldap - ] - - environment = { - AUTH_PATH = local.auth_ldap_path - GROUPATTR = "memberOf" - GROUPDN = "CN=Users,DC=corp,DC=example,DC=net" - INSECURE_TLS = "true" - POLICIES = local.auth_ldap_path - UPNDOMAIN = "corp.example.net" - URL = "ldaps://ldap.example.com" - USERATTR = "sAMAccountName" - USERDN = "CN=Users,DC=corp,DC=example,DC=net" - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_TOKEN = var.vault_root_token - } - - scripts = [abspath("${path.module}/../../scripts/auth-ldap-write.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -# Update the ldap config. Choose a random node each time to ensure that writes -# to all nodes are forwarded correctly and behave as we expect. -resource "random_integer" "auth_update_ldap_config_idx" { - min = 0 - max = length(var.hosts) - 1 -} - -resource "enos_remote_exec" "auth_update_ldap_config" { - depends_on = [ - enos_remote_exec.auth_write_ldap_config - ] - - environment = { - AUTH_PATH = local.auth_ldap_path - GROUPATTR = "memberOf" - GROUPDN = "CN=Users,DC=corp,DC=example,DC=net" - INSECURE_TLS = "true" - POLICIES = local.auth_ldap_path - UPNDOMAIN = "corp.example.net" - URL = "ldaps://ldap2.example.com" - USERATTR = "sAMAccountName" - USERDN = "CN=Users,DC=corp,DC=example,DC=net" - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_TOKEN = var.vault_root_token - } - - scripts = [abspath("${path.module}/../../scripts/auth-ldap-write.sh")] - - transport = { - ssh = { - host = var.hosts[random_integer.auth_update_ldap_config_idx.result].public_ip - } - } -} diff --git a/enos/modules/verify_secrets_engines/modules/create/aws.tf b/enos/modules/verify_secrets_engines/modules/create/aws.tf deleted file mode 100644 index a96902e..0000000 --- a/enos/modules/verify_secrets_engines/modules/create/aws.tf +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -module "create_aws_secrets_engine" { - count = var.create_aws_secrets_engine ? 1 : 0 - source = "./aws" - - hosts = var.hosts - leader_host = var.leader_host - vault_addr = var.vault_addr - vault_root_token = var.vault_root_token - vault_install_dir = var.vault_install_dir -} - -locals { - aws_state = var.create_aws_secrets_engine ? module.create_aws_secrets_engine[0].state : null -} - -output "aws" { - value = local.aws_state -} diff --git a/enos/modules/verify_secrets_engines/modules/create/aws/aws.tf b/enos/modules/verify_secrets_engines/modules/create/aws/aws.tf deleted file mode 100644 index afd9db9..0000000 --- a/enos/modules/verify_secrets_engines/modules/create/aws/aws.tf +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The Vault cluster instances that were created" -} - -variable "leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - - description = "Vault cluster leader host" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The Vault root token" - default = null -} - -locals { - // Variables - aws_mount = "aws" - vault_aws_role = "enos_test_role" - my_email = split("/", data.aws_caller_identity.current.arn)[2] - - // State output - state = { - aws_role = data.aws_iam_role.premade_demo_assumed_role.name - aws_role_arn = data.aws_iam_role.premade_demo_assumed_role.arn - aws_policy_arn = data.aws_iam_policy.premade_demo_user_policy.arn - aws_user_name = aws_iam_user.aws_enos_test_user.name - aws_access_key = aws_iam_access_key.aws_enos_test_user.id - aws_secret_key = aws_iam_access_key.aws_enos_test_user.secret - mount = local.aws_mount - region = data.aws_region.current.name - vault_aws_role = local.vault_aws_role - } -} - -output "state" { - value = local.state -} - -resource "random_id" "unique_suffix" { - byte_length = 4 -} - -data "aws_caller_identity" "current" {} - -data "aws_region" "current" {} - -# The "DemoUser" policy is a predefined policy created by the security team. -# This policy grants the necessary AWS permissions required for role generation via Vault. -# Reference: https://github.com/hashicorp/honeybee-templates/blob/main/templates/iam_policy/DemoUser.yaml -data "aws_iam_policy" "premade_demo_user_policy" { - name = "DemoUser" -} - -# This role was provisioned by the security team using the repository referenced below. -# This role includes the necessary policies to enable AWS credential generation and rotation via Vault. -# Reference: https://github.com/hashicorp/honeybee-templates/blob/main/templates/iam_role/vault-assumed-role-credentials-demo.yaml -data "aws_iam_role" "premade_demo_assumed_role" { - name = "vault-assumed-role-credentials-demo" -} - -# Creating new test user -resource "aws_iam_user" "aws_enos_test_user" { - name = "demo-${local.my_email}-${random_id.unique_suffix.hex}" - permissions_boundary = data.aws_iam_policy.premade_demo_user_policy.arn - force_destroy = true -} - -resource "aws_iam_user_policy_attachment" "aws_enos_test_user" { - user = aws_iam_user.aws_enos_test_user.name - policy_arn = data.aws_iam_policy.premade_demo_user_policy.arn -} - -resource "aws_iam_access_key" "aws_enos_test_user" { - user = aws_iam_user.aws_enos_test_user.name - lifecycle { - prevent_destroy = false - } -} - -# Enable AWS secrets engine -resource "enos_remote_exec" "secrets_enable_aws_secret" { - environment = { - ENGINE = local.aws_mount - MOUNT = local.aws_mount - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../../scripts/secrets-enable.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -# Generate AWS Credentials -resource "enos_remote_exec" "aws_generate_roles" { - depends_on = [enos_remote_exec.secrets_enable_aws_secret] - for_each = var.hosts - - environment = { - AWS_REGION = local.state.region - ENGINE = local.aws_mount - MOUNT = local.aws_mount - AWS_USER_NAME = local.state.aws_user_name - AWS_POLICY_ARN = local.state.aws_policy_arn - AWS_ROLE_ARN = local.state.aws_role_arn - AWS_ACCESS_KEY_ID = local.state.aws_access_key - AWS_SECRET_ACCESS_KEY = local.state.aws_secret_key - VAULT_AWS_ROLE = local.vault_aws_role - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../../scripts/aws-generate-roles.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/verify_secrets_engines/modules/create/identity.tf b/enos/modules/verify_secrets_engines/modules/create/identity.tf deleted file mode 100644 index 6ee8810..0000000 --- a/enos/modules/verify_secrets_engines/modules/create/identity.tf +++ /dev/null @@ -1,380 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -locals { - // Variables - identity_entity_metadata = { - "organization" = "vault", - "team" = "qt", - } - group_name_oidc_readers = "oidc_token_readers" // identity/group/name/oidc_token_readers - oidc_config_issuer_url = "https://enos.example.com:1234" // identity/oidc/config - oidc_key_algorithms = ["RS256", "RS384", "RS512", "ES256", "ES384", "ES512", "EdDSA"] - oidc_key_algorithm = local.oidc_key_algorithms[random_integer.oidc_key_algorithm_idx.result] - oidc_key_name = "reguser" // identity/oidc/key/reguser - oidc_key_rotation_period = 86400 // 24h - oidc_key_verification_ttl = 21600 // 6h - oidc_role_name = "reguser" // identity/oidc/role/reguser - oidc_role_ttl = 3600 // 1h - oidc_client_id = "reguser" // optional client ID but required if we want to scope a key and role together without a * - oidc_token_read_policy_name = "oidc_token_reader" - - // Response data - oidc_token_data = jsondecode(enos_remote_exec.oidc_token.stdout).data - group_oidc_token_readers_data = jsondecode(enos_remote_exec.identity_group_oidc_token_readers.stdout).data - initial_oidc_token_data = jsondecode(enos_remote_exec.initial_oidc_token.stdout).data - user_entity_data = jsondecode(enos_remote_exec.identity_entity_testuser.stdout).data - user_entity_alias_data = jsondecode(enos_remote_exec.identity_entity_alias_testuser.stdout).data - - // Output - identity_output = { - oidc = { - reader_group_name = local.group_name_oidc_readers - reader_policy_name = local.oidc_token_read_policy_name - issuer_url = local.oidc_config_issuer_url - key_algorithm = local.oidc_key_algorithm - key_name = local.oidc_key_name - key_rotation_period = local.oidc_key_rotation_period - key_verification_ttl = local.oidc_key_verification_ttl - role_name = local.oidc_role_name - role_ttl = local.oidc_role_ttl - client_id = local.oidc_client_id - } - identity_entity_metadata = local.identity_entity_metadata - data = { - entity = local.user_entity_data - entity_alias = local.user_entity_alias_data - oidc_token = local.oidc_token_data - group_oidc_token_readers = local.group_oidc_token_readers_data - } - } -} - -output "identity" { - value = local.identity_output -} - -// Get a random index for our algorithms so that we can randomly rotate through the various algorithms -resource "random_integer" "oidc_key_algorithm_idx" { - min = 0 - max = length(local.oidc_key_algorithms) - 1 -} - -// Create identity entity for our user -resource "enos_remote_exec" "identity_entity_testuser" { - depends_on = [ - enos_remote_exec.auth_create_testuser, - ] - - environment = { - REQPATH = "identity/entity" - PAYLOAD = jsonencode({ - name = local.user_name, - metadata = local.identity_entity_metadata, - policies = [local.user_policy_name], - }) - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Create identity entity alias for our user -resource "enos_remote_exec" "identity_entity_alias_testuser" { - environment = { - REQPATH = "identity/entity-alias" - PAYLOAD = jsonencode({ - name = local.user_name, - canonical_id = local.user_entity_data.id - mount_accessor = local.sys_auth_data["${local.auth_userpass_path}/"].accessor - policies = [local.user_policy_name], - }) - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Configure our the oidc token backend -resource "enos_remote_exec" "oidc_config" { - environment = { - REQPATH = "identity/oidc/config" - PAYLOAD = jsonencode({ - issuer = local.oidc_config_issuer_url, - }) - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Create a named key that can sign OIDC identity token -resource "enos_remote_exec" "oidc_key" { - environment = { - REQPATH = "identity/oidc/key/${local.oidc_key_name}" - PAYLOAD = jsonencode({ - allowed_client_ids = [local.oidc_client_id], - algorithm = local.oidc_key_algorithm, - rotation_period = local.oidc_key_rotation_period, - verification_ttl = local.oidc_key_verification_ttl, - }) - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Create a role with custom template and that uses the named key -resource "enos_remote_exec" "oidc_role" { - depends_on = [ - enos_remote_exec.oidc_key, - ] - - environment = { - REQPATH = "identity/oidc/role/${local.oidc_role_name}" - PAYLOAD = jsonencode({ - client_id = local.oidc_client_id, - key = local.oidc_key_name, - ttl = local.oidc_role_ttl - template = base64encode(<<-EOF - { - "team": {{identity.entity.metadata.team}}, - "organization": {{identity.entity.metadata.organization}}, - "groups": {{identity.entity.groups.names}} - } - EOF - ), - }) - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Create a group policy that allows "reading" a new signed OIDC token -resource "enos_remote_exec" "policy_write_oidc_token" { - depends_on = [ - enos_remote_exec.secrets_enable_kv_secret, - ] - environment = { - POLICY_NAME = local.oidc_token_read_policy_name - POLICY_CONFIG = <<-EOF - path "identity/oidc/token/*" { - capabilities = ["read"] - } - EOF - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/policy-write.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Create oidc_token_readers group and add our testuser to it -resource "enos_remote_exec" "identity_group_oidc_token_readers" { - environment = { - REQPATH = "identity/group" - PAYLOAD = jsonencode({ - member_entity_ids = [local.user_entity_data.id], - name = local.group_name_oidc_readers, - policies = [local.oidc_token_read_policy_name], - }) - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Generate a signed ID token with our test user -resource "enos_remote_exec" "initial_oidc_token" { - depends_on = [ - enos_remote_exec.oidc_role, - ] - - environment = { - REQPATH = "identity/oidc/token/${local.oidc_role_name}" - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = local.user_login_data.auth.client_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/read.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Introspect the signed ID and verify it -resource "enos_remote_exec" "oidc_introspect_initial_token" { - environment = { - ASSERT_ACTIVE = true // Our token should be "active" - PAYLOAD = jsonencode({ - token = local.initial_oidc_token_data.token, - client_id = local.initial_oidc_token_data.client_id - }) - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/identity-oidc-introspect-token.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Rotate the key with a zero TTL to force expiration -resource "enos_remote_exec" "oidc_key_rotate" { - depends_on = [ - enos_remote_exec.oidc_introspect_initial_token, - ] - - environment = { - REQPATH = "identity/oidc/key/${local.oidc_key_name}/rotate" - PAYLOAD = jsonencode({ - verification_ttl = 0, - }) - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Introspect it again to make sure it's no longer active -resource "enos_remote_exec" "oidc_introspect_initial_token_post_rotate" { - depends_on = [ - enos_remote_exec.oidc_key_rotate, - ] - - environment = { - ASSERT_ACTIVE = false // Our token should not be "active" - PAYLOAD = jsonencode({ - token = local.initial_oidc_token_data.token, - client_id = local.initial_oidc_token_data.client_id - }) - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/identity-oidc-introspect-token.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Generate a new token that we can use later -resource "enos_remote_exec" "oidc_token" { - depends_on = [ - enos_remote_exec.oidc_introspect_initial_token_post_rotate, - ] - - environment = { - REQPATH = "identity/oidc/token/${local.oidc_role_name}" - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = local.user_login_data.auth.client_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/read.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Introspect the new token to ensure it's active before we export it for user later via outputs -resource "enos_remote_exec" "oidc_introspect_token" { - environment = { - ASSERT_ACTIVE = true // Our token should be "active" - PAYLOAD = jsonencode({ - token = local.oidc_token_data.token, - client_id = local.oidc_token_data.client_id - }) - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/identity-oidc-introspect-token.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} diff --git a/enos/modules/verify_secrets_engines/modules/create/kv.tf b/enos/modules/verify_secrets_engines/modules/create/kv.tf deleted file mode 100644 index e2174c6..0000000 --- a/enos/modules/verify_secrets_engines/modules/create/kv.tf +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -locals { - // Variables - group_name_kv_writers = "kv_writers" # identity/group/name/kv_writers - kv_mount = "secret" # secret - kv_write_policy_name = "kv_writer" # sys/policy/kv_writer - kv_test_data_path_prefix = "smoke" - kv_test_data_value_prefix = "fire" - kv_version = 2 - - // Response data - identity_group_kv_writers_data = jsondecode(enos_remote_exec.identity_group_kv_writers.stdout).data - - // Output - kv_output = { - reader_group_name = local.group_name_kv_writers - writer_policy_name = local.kv_write_policy_name - mount = local.kv_mount - version = local.kv_version - test = { - path_prefix = local.kv_test_data_path_prefix - value_prefix = local.kv_test_data_value_prefix - } - data = { - identity_group_kv_writers = local.identity_group_kv_writers_data - } - } -} - -output "kv" { - value = local.kv_output -} - -# Enable kv secrets engine -resource "enos_remote_exec" "secrets_enable_kv_secret" { - environment = { - ENGINE = "kv" - MOUNT = local.kv_mount - SECRETS_META = "-version=${local.kv_version}" - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/secrets-enable.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -# Create a group policy that allows writing to our kv store -resource "enos_remote_exec" "policy_write_kv_writer" { - depends_on = [ - enos_remote_exec.secrets_enable_kv_secret, - ] - environment = { - POLICY_NAME = local.kv_write_policy_name - POLICY_CONFIG = <<-EOF - path "${local.kv_mount}/*" { - capabilities = ["create", "update", "read", "delete", "list"] - } - EOF - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/policy-write.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -# Create kv_writers group and add our testuser to it -resource "enos_remote_exec" "identity_group_kv_writers" { - environment = { - REQPATH = "identity/group" - PAYLOAD = jsonencode({ - member_entity_ids = [local.user_entity_data.id], // Created in identity.tf - name = local.group_name_kv_writers, - policies = [local.kv_write_policy_name], - }) - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/write-payload.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -// Write test data as our user. -resource "enos_remote_exec" "kv_put_secret_test" { - depends_on = [ - enos_remote_exec.secrets_enable_kv_secret, - enos_remote_exec.policy_write_kv_writer, - enos_remote_exec.identity_group_kv_writers - ] - for_each = var.hosts - - environment = { - MOUNT = local.kv_mount - SECRET_PATH = "${local.kv_test_data_path_prefix}-${each.key}" - KEY = "${local.kv_test_data_path_prefix}-${each.key}" - VALUE = "${local.kv_test_data_value_prefix}-${each.key}" - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = local.user_login_data.auth.client_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/kv-put.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/verify_secrets_engines/modules/create/main.tf b/enos/modules/verify_secrets_engines/modules/create/main.tf deleted file mode 100644 index 265d738..0000000 --- a/enos/modules/verify_secrets_engines/modules/create/main.tf +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "create_aws_secrets_engine" { - type = bool - description = <<-EOF - Whether or not we'll verify the AWS secrets engine. Due to the various security requirements in - Doormat managed AWS accounts, our implementation of the verification requires us to use a - an external 'DemoUser' role and associated policy in order to create additional users. This is - configured in vault_ci and vault_enterprise_ci but does not exist in all AWS accounts. As such, - it's disabled by default. - See: https://github.com/hashicorp/honeybee-templates/blob/main/templates/iam_policy/DemoUser.yaml - EOF - default = false -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The Vault cluster instances that were created" -} - -variable "leader_host" { - type = object({ - ipv6 = string - private_ip = string - public_ip = string - }) - - description = "Vault cluster leader host" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The Vault root token" - default = null -} - -output "state" { - value = { - auth = local.auth_output - identity = local.identity_output - kv = local.kv_output - pki = local.pki_output - aws = local.aws_state - } -} diff --git a/enos/modules/verify_secrets_engines/modules/create/pki.tf b/enos/modules/verify_secrets_engines/modules/create/pki.tf deleted file mode 100644 index 1a69ca4..0000000 --- a/enos/modules/verify_secrets_engines/modules/create/pki.tf +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -locals { - // Variables - pki_mount = "pki" # secret - pki_issuer_name = "issuer" - pki_common_name = "common" - pki_default_ttl = "72h" - pki_test_dir = "tmp-test-results" - - // Output - pki_output = { - common_name = local.pki_common_name - issuer_name = local.pki_issuer_name - mount = local.pki_mount - ttl = local.pki_default_ttl - test_dir = local.pki_test_dir - } - -} - -output "pki" { - value = local.pki_output -} - -# Enable pki secrets engine -resource "enos_remote_exec" "secrets_enable_pki_secret" { - environment = { - ENGINE = local.pki_mount - MOUNT = local.pki_mount - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/secrets-enable.sh")] - - transport = { - ssh = { - host = var.leader_host.public_ip - } - } -} - -# Issue RSA Certificate -resource "enos_remote_exec" "pki_issue_certificates" { - depends_on = [enos_remote_exec.secrets_enable_pki_secret] - for_each = var.hosts - - environment = { - MOUNT = local.pki_mount - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_TOKEN = var.vault_root_token - COMMON_NAME = local.pki_common_name - ISSUER_NAME = local.pki_issuer_name - TTL = local.pki_default_ttl - TEST_DIR = local.pki_test_dir - } - - scripts = [abspath("${path.module}/../../scripts/pki-issue-certificates.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/verify_secrets_engines/modules/read/auth.tf b/enos/modules/verify_secrets_engines/modules/read/auth.tf deleted file mode 100644 index 2ea06de..0000000 --- a/enos/modules/verify_secrets_engines/modules/read/auth.tf +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -locals { - user_login_data = jsondecode(enos_remote_exec.auth_login_testuser.stdout) -} - -resource "enos_remote_exec" "auth_login_testuser" { - environment = { - AUTH_PATH = var.create_state.auth.userpass.path - PASSWORD = var.create_state.auth.userpass.user.password - USERNAME = var.create_state.auth.userpass.user.name - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/auth-userpass-login.sh")] - - transport = { - ssh = { - host = var.hosts[0].public_ip - } - } -} diff --git a/enos/modules/verify_secrets_engines/modules/read/aws.tf b/enos/modules/verify_secrets_engines/modules/read/aws.tf deleted file mode 100644 index e2e9a8a..0000000 --- a/enos/modules/verify_secrets_engines/modules/read/aws.tf +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -module "verify_aws_secrets_engine" { - count = var.verify_aws_secrets_engine ? 1 : 0 - source = "./aws" - - create_state = var.create_state - vault_addr = var.vault_addr - vault_root_token = var.vault_root_token - vault_install_dir = var.vault_install_dir - verify_aws_engine_creds = var.verify_aws_engine_creds - - hosts = var.hosts -} diff --git a/enos/modules/verify_secrets_engines/modules/read/aws/aws.tf b/enos/modules/verify_secrets_engines/modules/read/aws/aws.tf deleted file mode 100644 index 5a3dbe4..0000000 --- a/enos/modules/verify_secrets_engines/modules/read/aws/aws.tf +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The Vault cluster instances that were created" -} - -variable "create_state" { - description = "The state of the secrets engines from the 'create' module" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The Vault root token" - default = null -} - -variable "verify_aws_engine_creds" { - type = bool -} - -# Verify AWS Engine -resource "enos_remote_exec" "aws_verify_new_creds" { - for_each = var.hosts - - environment = { - AWS_REGION = "${var.create_state.aws.region}" - MOUNT = "${var.create_state.aws.mount}" - AWS_USER_NAME = "${var.create_state.aws.aws_user_name}" - AWS_ACCESS_KEY_ID = "${var.create_state.aws.aws_access_key}" - AWS_SECRET_ACCESS_KEY = "${var.create_state.aws.aws_secret_key}" - VAULT_AWS_ROLE = "${var.create_state.aws.vault_aws_role}" - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - VERIFY_AWS_ENGINE_CERTS = var.verify_aws_engine_creds - } - - scripts = [abspath("${path.module}/../../../scripts/aws-verify-new-creds.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/verify_secrets_engines/modules/read/identity.tf b/enos/modules/verify_secrets_engines/modules/read/identity.tf deleted file mode 100644 index 0f34796..0000000 --- a/enos/modules/verify_secrets_engines/modules/read/identity.tf +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -// Read our testuser identity entity and verify that it matches our expected alias, groups, policy, -// and metadata. -resource "enos_remote_exec" "identity_verify_entity" { - for_each = var.hosts - - environment = { - ENTITY_ALIAS_ID = var.create_state.identity.data.entity_alias.id - ENTITY_GROUP_IDS = jsonencode([ - var.create_state.kv.data.identity_group_kv_writers.id, - var.create_state.identity.data.group_oidc_token_readers.id, - ]) - ENTITY_METADATA = jsonencode(var.create_state.identity.identity_entity_metadata) - ENTITY_NAME = var.create_state.identity.data.entity.name - ENTITY_POLICIES = jsonencode([var.create_state.auth.userpass.user.policy_name]) - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = local.user_login_data.auth.client_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/identity-verify-entity.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -// Read our OIDC key and role and verify that they have the correct configuration, TTLs, and algorithms. -resource "enos_remote_exec" "identity_verify_oidc" { - for_each = var.hosts - - environment = { - OIDC_ISSUER_URL = var.create_state.identity.oidc.issuer_url - OIDC_KEY_NAME = var.create_state.identity.oidc.key_name - OIDC_KEY_ROTATION_PERIOD = var.create_state.identity.oidc.key_rotation_period - OIDC_KEY_VERIFICATION_TTL = var.create_state.identity.oidc.key_verification_ttl - OIDC_KEY_ALGORITHM = var.create_state.identity.oidc.key_algorithm - OIDC_ROLE_NAME = var.create_state.identity.oidc.role_name - OIDC_ROLE_TTL = var.create_state.identity.oidc.role_ttl - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = local.user_login_data.auth.client_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/identity-verify-oidc.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/verify_secrets_engines/modules/read/kv.tf b/enos/modules/verify_secrets_engines/modules/read/kv.tf deleted file mode 100644 index 6983e74..0000000 --- a/enos/modules/verify_secrets_engines/modules/read/kv.tf +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -resource "enos_remote_exec" "kv_get_verify_test_data" { - for_each = var.hosts - - environment = { - MOUNT = var.create_state.kv.mount - SECRET_PATH = "${var.create_state.kv.test.path_prefix}-${each.key}" - KEY = "${var.create_state.kv.test.path_prefix}-${each.key}" - KV_VERSION = var.create_state.kv.version - VALUE = "${var.create_state.kv.test.value_prefix}-${each.key}" - VAULT_ADDR = var.vault_addr - VAULT_TOKEN = local.user_login_data.auth.client_token - VAULT_INSTALL_DIR = var.vault_install_dir - } - - scripts = [abspath("${path.module}/../../scripts/kv-verify-value.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/verify_secrets_engines/modules/read/main.tf b/enos/modules/verify_secrets_engines/modules/read/main.tf deleted file mode 100644 index 66a3c29..0000000 --- a/enos/modules/verify_secrets_engines/modules/read/main.tf +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -variable "hosts" { - type = map(object({ - ipv6 = string - private_ip = string - public_ip = string - })) - description = "The Vault cluster instances that were created" -} - -variable "create_state" { - description = "The state of the secrets engines from the 'create' module" -} - -variable "vault_addr" { - type = string - description = "The local vault API listen address" -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_root_token" { - type = string - description = "The Vault root token" - default = null -} - -variable "verify_aws_secrets_engine" { - type = bool - description = <<-EOF - Whether or not we'll verify the AWS secrets engine. Due to the various security requirements in - Doormat managed AWS accounts, our implementation of the verification requires us to use a - an external 'DemoUser' role and associated policy in order to create additional users. This is - configured in vault_ci and vault_enterprise_ci but does not exist in all AWS accounts. As such, - it's disabled by default. - See: https://github.com/hashicorp/honeybee-templates/blob/main/templates/iam_policy/DemoUser.yaml - EOF - default = false -} - -variable "verify_aws_engine_creds" { - type = bool - default = true -} - -variable "verify_pki_certs" { - type = bool - description = "Flag to verify pki certificates" - default = true -} - -locals { - vault_bin_path = "${var.vault_install_dir}/vault" -} diff --git a/enos/modules/verify_secrets_engines/modules/read/pki.tf b/enos/modules/verify_secrets_engines/modules/read/pki.tf deleted file mode 100644 index cde0cc9..0000000 --- a/enos/modules/verify_secrets_engines/modules/read/pki.tf +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -# Verify PKI Certificate -resource "enos_remote_exec" "pki_verify_certificates" { - for_each = var.hosts - - environment = { - MOUNT = var.create_state.pki.mount - AUTH_PATH = "${var.create_state.auth.userpass.path}" - USERNAME = "${var.create_state.auth.userpass.user.name}" - PASSWORD = "${var.create_state.auth.userpass.user.password}" - VAULT_ADDR = var.vault_addr - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_TOKEN = var.vault_root_token - COMMON_NAME = var.create_state.pki.common_name - ISSUER_NAME = var.create_state.pki.issuer_name - TTL = var.create_state.pki.ttl - TEST_DIR = var.create_state.pki.test_dir - VERIFY_PKI_CERTS = var.verify_pki_certs - } - - scripts = [abspath("${path.module}/../../scripts/pki-verify-certificates.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - diff --git a/enos/modules/verify_secrets_engines/scripts/auth-enable.sh b/enos/modules/verify_secrets_engines/scripts/auth-enable.sh deleted file mode 100644 index 5601715..0000000 --- a/enos/modules/verify_secrets_engines/scripts/auth-enable.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$AUTH_METHOD" ]] && fail "AUTH_METHOD env variable has not been set" -[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json -"$binpath" auth enable -path="$AUTH_PATH" "$AUTH_METHOD" diff --git a/enos/modules/verify_secrets_engines/scripts/auth-ldap-write.sh b/enos/modules/verify_secrets_engines/scripts/auth-ldap-write.sh deleted file mode 100644 index dac712b..0000000 --- a/enos/modules/verify_secrets_engines/scripts/auth-ldap-write.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" -[[ -z "$GROUPATTR" ]] && fail "GROUPATTR env variable has not been set" -[[ -z "$GROUPDN" ]] && fail "GROUPDN env variable has not been set" -[[ -z "$INSECURE_TLS" ]] && fail "INSECURE_TLS env variable has not been set" -[[ -z "$UPNDOMAIN" ]] && fail "UPNDOMAIN env variable has not been set" -[[ -z "$URL" ]] && fail "URL env variable has not been set" -[[ -z "$USERATTR" ]] && fail "USERATTR env variable has not been set" -[[ -z "$USERDN" ]] && fail "USERDN env variable has not been set" - -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json -"$binpath" write "auth/$AUTH_PATH/config" \ - url="$URL" \ - userdn="$USERDN" \ - userattr="$USERATTR" \ - groupdn="$GROUPDN" \ - groupattr="$GROUPATTR" \ - upndomain="$UPNDOMAIN" \ - insecure_tls="$INSECURE_TLS" diff --git a/enos/modules/verify_secrets_engines/scripts/auth-userpass-login.sh b/enos/modules/verify_secrets_engines/scripts/auth-userpass-login.sh deleted file mode 100644 index 31b756f..0000000 --- a/enos/modules/verify_secrets_engines/scripts/auth-userpass-login.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" -[[ -z "$PASSWORD" ]] && fail "PASSWORD env variable has not been set" -[[ -z "$USERNAME" ]] && fail "USERNAME env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json -"$binpath" write "auth/$AUTH_PATH/login/$USERNAME" password="$PASSWORD" diff --git a/enos/modules/verify_secrets_engines/scripts/auth-userpass-write.sh b/enos/modules/verify_secrets_engines/scripts/auth-userpass-write.sh deleted file mode 100644 index b8cca8b..0000000 --- a/enos/modules/verify_secrets_engines/scripts/auth-userpass-write.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" -[[ -z "$PASSWORD" ]] && fail "PASSWORD env variable has not been set" -[[ -z "$POLICIES" ]] && fail "POLICIES env variable has not been set" -[[ -z "$USERNAME" ]] && fail "USERNAME env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json -"$binpath" write "auth/$AUTH_PATH/users/$USERNAME" password="$PASSWORD" policies="$POLICIES" diff --git a/enos/modules/verify_secrets_engines/scripts/aws-generate-roles.sh b/enos/modules/verify_secrets_engines/scripts/aws-generate-roles.sh deleted file mode 100755 index 806d45c..0000000 --- a/enos/modules/verify_secrets_engines/scripts/aws-generate-roles.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" -[[ -z "$VAULT_AWS_ROLE" ]] && fail "VAULT_AWS_ROLE env variable has not been set" -[[ -z "$AWS_REGION" ]] && fail "AWS_REGION env variable has not been set" -[[ -z "$AWS_POLICY_ARN" ]] && fail "AWS_POLICY_ARN env variable has not been set" -[[ -z "$AWS_ROLE_ARN" ]] && fail "AWS_ROLE_ARN env variable has not been set" -[[ -z "$AWS_USER_NAME" ]] && fail "AWS_USER_NAME env variable has not been set" -[[ -z "$AWS_ACCESS_KEY_ID" ]] && fail "AWS_ACCESS_KEY_ID env variable has not been set" -[[ -z "$AWS_SECRET_ACCESS_KEY" ]] && fail "AWS_SECRET_ACCESS_KEY env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json - -while true; do - echo -e "Waiting for IAM user to be done setting up...\n" - # Fetch the IAM user creation date and convert it to a Unix timestamp - create_timestamp=$(aws iam get-user --user-name "${AWS_USER_NAME}" --query 'User.CreateDate' --output text | sed 's/\([+-][0-9]\{2\}:[0-9]\{2\}\)$//' | date -f - "+%s") - if (($(date +%s) - create_timestamp > 75)); then - break - fi - sleep 2 -done - -echo -e "Configuring Vault AWS \n" -USERNAME_TEMPLATE="{{ if (eq .Type \"STS\") }}{{ printf \"${AWS_USER_NAME}-%s-%s\" (random 20) (unix_time) | truncate 32 }}{{ else }}{{ printf \"${AWS_USER_NAME}-%s-%s\" (unix_time) (random 20) | truncate 60 }}{{ end }}" -"$binpath" write "${MOUNT}/config/root" access_key="${AWS_ACCESS_KEY_ID}" secret_key="${AWS_SECRET_ACCESS_KEY}" region="${AWS_REGION}" username_template="${USERNAME_TEMPLATE}" - -echo -e "Creating Role to create user \n" -"$binpath" write "aws/roles/${VAULT_AWS_ROLE}" \ - credential_type=iam_user \ - permissions_boundary_arn="${AWS_POLICY_ARN}" \ - policy_document=- << EOF -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["ec2:DescribeRegions"], - "Resource": ["*"] - } - ] -} -EOF - -echo -e "Verifying root config \n" -"$binpath" read "${MOUNT}/config/root" -ROOT_USERNAME_TEMPLATE=$("$binpath" read "${MOUNT}/config/root" | jq -r '.data.username_template') -[[ "$ROOT_USERNAME_TEMPLATE" == *"$AWS_USER_NAME"* ]] || fail "Uername Template does not include the current role" diff --git a/enos/modules/verify_secrets_engines/scripts/aws-verify-new-creds.sh b/enos/modules/verify_secrets_engines/scripts/aws-verify-new-creds.sh deleted file mode 100755 index 6484704..0000000 --- a/enos/modules/verify_secrets_engines/scripts/aws-verify-new-creds.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" -[[ -z "$VAULT_AWS_ROLE" ]] && fail "VAULT_AWS_ROLE env variable has not been set" -[[ -z "$VERIFY_AWS_ENGINE_CERTS" ]] && fail "VERIFY_AWS_ENGINE_CERTS env variable has not been set" -[[ -z "$AWS_REGION" ]] && fail "AWS_REGION env variable has not been set" -[[ -z "$AWS_USER_NAME" ]] && fail "AWS_USER_NAME env variable has not been set" -[[ -z "$AWS_ACCESS_KEY_ID" ]] && fail "AWS_ACCESS_KEY_ID env variable has not been set" -[[ -z "$AWS_SECRET_ACCESS_KEY" ]] && fail "AWS_SECRET_ACCESS_KEY env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json - -if [ "${VERIFY_AWS_ENGINE_CERTS}" = false ]; then - echo "AWS Engine certificate verification is disabled. Skipping verification." - exit 0 -fi - -echo -e "Configuring Vault AWS \n" -USERNAME_TEMPLATE="{{ if (eq .Type \"STS\") }}{{ printf \"${AWS_USER_NAME}-%s-%s\" (random 20) (unix_time) | truncate 32 }}{{ else }}{{ printf \"${AWS_USER_NAME}-%s-%s\" (unix_time) (random 20) | truncate 60 }}{{ end }}" -"$binpath" write "${MOUNT}/config/root" access_key="${AWS_ACCESS_KEY_ID}" secret_key="${AWS_SECRET_ACCESS_KEY}" username_template="${USERNAME_TEMPLATE}" - -echo -e "Verifying root config \n" -"$binpath" read "${MOUNT}/config/root" -ROOT_USERNAME_TEMPLATE=$("$binpath" read "${MOUNT}/config/root" | jq -r '.data.username_template') -[[ "$ROOT_USERNAME_TEMPLATE" == *"$AWS_USER_NAME"* ]] || fail "Uername Template does not include the current role" - -echo -e "Verifying roles list \n" -"$binpath" list "${MOUNT}/roles" -ROLE=$("$binpath" list "${MOUNT}/roles" | jq -r '.[]') -[[ -z "$ROLE" ]] && fail "No AWS roles created!" - -echo -e "Generate New Credentials \n" -TEMP_IAM_USER=$("$binpath" read "${MOUNT}/creds/${VAULT_AWS_ROLE}") || fail "Failed to generate new credentials for iam user: ${VAULT_AWS_ROLE}" -TEMP_ACCESS_KEY=$(echo "${TEMP_IAM_USER}" | jq -r '.data.access_key') || fail "Failed to get access key from: ${VAULT_AWS_ROLE}" -if [[ -z "$TEMP_ACCESS_KEY" && "$TEMP_ACCESS_KEY" != "$AWS_USER_NAME" ]]; then - failed "The new access key is empty or is matching the old one: ${TEMP_ACCESS_KEY}" -fi diff --git a/enos/modules/verify_secrets_engines/scripts/identity-oidc-introspect-token.sh b/enos/modules/verify_secrets_engines/scripts/identity-oidc-introspect-token.sh deleted file mode 100644 index 0e6e1ea..0000000 --- a/enos/modules/verify_secrets_engines/scripts/identity-oidc-introspect-token.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$PAYLOAD" ]] && fail "PAYLOAD env variable has not been set" -[[ -z "$ASSERT_ACTIVE" ]] && fail "ASSERT_ACTIVE env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json -if ! output=$("$binpath" write identity/oidc/introspect - <<< "$PAYLOAD" 2>&1); then - # Attempt to write our error on stdout as JSON as our consumers of the script expect it to be JSON - printf '{"data":{"error":"%s"}}' "$output" - # Fail on stderr with a human readable message - fail "failed to write payload to identity/oidc/introspect: payload=$PAYLOAD output=$output" -fi - -printf "%s\n" "$output" # Write our response output JSON to stdout -if ! jq -Me --argjson ACTIVE "$ASSERT_ACTIVE" '.data.active == $ACTIVE' <<< "$output" &> /dev/null; then - # Write a failure message on STDERR - fail "token active state is invalid, expected .data.active='$ASSERT_ACTIVE'" -fi diff --git a/enos/modules/verify_secrets_engines/scripts/identity-verify-entity.sh b/enos/modules/verify_secrets_engines/scripts/identity-verify-entity.sh deleted file mode 100644 index 2ee9503..0000000 --- a/enos/modules/verify_secrets_engines/scripts/identity-verify-entity.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$ENTITY_ALIAS_ID" ]] && fail "ENTITY_ALIAS_ID env variable has not been set" -[[ -z "$ENTITY_GROUP_IDS" ]] && fail "ENTITY_GROUP_IDS env variable has not been set" -[[ -z "$ENTITY_METADATA" ]] && fail "ENTITY_METADATA env variable has not been set" -[[ -z "$ENTITY_NAME" ]] && fail "ENTITY_NAME env variable has not been set" -[[ -z "$ENTITY_POLICIES" ]] && fail "ENTITY_POLICIES env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json -if ! output=$("$binpath" read "identity/entity/name/$ENTITY_NAME" 2>&1); then - fail "failed to read identity/entity/name/$ENTITY_NAME: $output" -fi - -if ! jq -Mec --arg ALIAS "$ENTITY_ALIAS_ID" '.data.aliases[0].id == $ALIAS' <<< "$output"; then - fail "entity alias ID does not match, expected: $ENTITY_ALIAS_ID, got: $(jq -Mrc '.data.aliases' <<< "$output")" -fi - -if ! jq -Mec --argjson GROUPS "$ENTITY_GROUP_IDS" '.data.group_ids | sort as $have | $GROUPS | sort as $want | $have == $want' <<< "$output"; then - fail "entity group ID's do not match, expected: $ENTITY_GROUP_IDS, got: $(jq -Mrc '.data.group_ids' <<< "$output")" -fi - -if ! jq -Mec --argjson METADATA "$ENTITY_METADATA" '.data.metadata == $METADATA' <<< "$output"; then - fail "entity metadata does not match, expected: $ENTITY_METADATA, got: $(jq -Mrc '.data.metadata' <<< "$output")" -fi - -if ! jq -Mec --argjson POLICIES "$ENTITY_POLICIES" '.data.policies == $POLICIES' <<< "$output"; then - fail "entity policies do not match, expected: $ENTITY_POLICIES, got: $(jq -Mrc '.data.policies' <<< "$output")" -fi diff --git a/enos/modules/verify_secrets_engines/scripts/identity-verify-oidc.sh b/enos/modules/verify_secrets_engines/scripts/identity-verify-oidc.sh deleted file mode 100644 index 3b09557..0000000 --- a/enos/modules/verify_secrets_engines/scripts/identity-verify-oidc.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$OIDC_ISSUER_URL" ]] && fail "OIDC_ISSUER_URL env variable has not been set" -[[ -z "$OIDC_KEY_NAME" ]] && fail "OIDC_KEY_NAME env variable has not been set" -[[ -z "$OIDC_KEY_ROTATION_PERIOD" ]] && fail "OIDC_KEY_ROTATION_PERIOD env variable has not been set" -[[ -z "$OIDC_KEY_VERIFICATION_TTL" ]] && fail "OIDC_KEY_VERIFICATION_TTL env variable has not been set" -[[ -z "$OIDC_KEY_ALGORITHM" ]] && fail "OIDC_KEY_ALGORITHM env variable has not been set" -[[ -z "$OIDC_ROLE_NAME" ]] && fail "OIDC_ROLE_NAME env variable has not been set" -[[ -z "$OIDC_ROLE_TTL" ]] && fail "OIDC_ROLE_TTL env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json - -# Verify that we have the correct issuer URL -if ! cfg=$("$binpath" read identity/oidc/config); then - fail "failed to read identity/oidc/config: $cfg" -elif ! jq -Merc --arg URL "$OIDC_ISSUER_URL" '.data.issuer == $URL' <<< "$cfg"; then - fail "oidc issuer URL is incorrect, expected: $OIDC_ISSUER_URL, got $(jq -Mrc '.data.issuer' <<< "$cfg")" -fi - -# Verify that our token algorithm, rotation period and verification TTL are correct -if ! key_res=$("$binpath" read "identity/oidc/key/$OIDC_KEY_NAME"); then - fail "failed to read identity/oidc/key/$OIDC_KEY_NAME: $key_res" -fi - -if ! jq -Merc --arg ALG "$OIDC_KEY_ALGORITHM" '.data.algorithm == $ALG' <<< "$key_res"; then - fail "oidc token algorithm is incorrect, expected: $OIDC_KEY_ALGORITHM, got $(jq -Mrc '.data.algorithm' <<< "$key_res")" -fi - -if ! jq -Merc --argjson RP "$OIDC_KEY_ROTATION_PERIOD" '.data.rotation_period == $RP' <<< "$key_res"; then - fail "oidc token rotation_period is incorrect, expected: $OIDC_KEY_ROTATION_PERIOD, got $(jq -Mrc '.data.rotation_period' <<< "$key_res")" -fi - -if ! jq -Merc --argjson TTL "$OIDC_KEY_VERIFICATION_TTL" '.data.verification_ttl == $TTL' <<< "$key_res"; then - fail "oidc token verification_ttl is incorrect, expected: $OIDC_KEY_VERIFICATION_TTL, got $(jq -Mrc '.data.verification_ttl' <<< "$key_res")" -fi - -# Verify that our role key and TTL are correct. -if ! role_res=$("$binpath" read "identity/oidc/role/$OIDC_ROLE_NAME"); then - fail "failed to read identity/oidc/role/$OIDC_ROLE_NAME: $role_res" -fi - -if ! jq -Merc --arg KEY "$OIDC_KEY_NAME" '.data.key == $KEY' <<< "$role_res"; then - fail "oidc role key is incorrect, expected: $OIDC_KEY_NAME, got $(jq -Mrc '.data.key' <<< "$role_res")" -fi - -if ! jq -Merc --argjson TTL "$OIDC_ROLE_TTL" '.data.ttl == $TTL' <<< "$role_res"; then - fail "oidc role ttl is incorrect, expected: $OIDC_ROLE_TTL, got $(jq -Mrc '.data.ttl' <<< "$role_res")" -fi diff --git a/enos/modules/verify_secrets_engines/scripts/kv-put.sh b/enos/modules/verify_secrets_engines/scripts/kv-put.sh deleted file mode 100644 index 46e858f..0000000 --- a/enos/modules/verify_secrets_engines/scripts/kv-put.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$KEY" ]] && fail "KEY env variable has not been set" -[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" -[[ -z "$SECRET_PATH" ]] && fail "SECRET_PATH env variable has not been set" -[[ -z "$VALUE" ]] && fail "VALUE env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json - -"$binpath" kv put -mount="$MOUNT" "$SECRET_PATH" "$KEY=$VALUE" diff --git a/enos/modules/verify_secrets_engines/scripts/kv-verify-value.sh b/enos/modules/verify_secrets_engines/scripts/kv-verify-value.sh deleted file mode 100644 index 64d6f29..0000000 --- a/enos/modules/verify_secrets_engines/scripts/kv-verify-value.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" -[[ -z "$SECRET_PATH" ]] && fail "SECRET_PATH env variable has not been set" -[[ -z "$KEY" ]] && fail "KEY env variable has not been set" -[[ -z "$VALUE" ]] && fail "VALUE env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json -if res=$("$binpath" kv get -mount="$MOUNT" "$SECRET_PATH"); then - # Note that this expects KVv2 response payloads. KVv1 does not include doubly nested .data - if jq -Merc --arg VALUE "$VALUE" --arg KEY "$KEY" '.data.data[$KEY] == $VALUE' <<< "$res"; then - printf "kv %s/%s %s=%s is valid\n" "$MOUNT" "$SECRET_PATH" "$KEY" "$VALUE" - exit 0 - fi - fail "kv $MOUNT/$SECRET_PATH $KEY=$VALUE invalid! Got: $(jq -Mrc --arg KEY "$KEY" '.data[$KEY]' <<< "$res")" -else - fail "failed to read kv data for $MOUNT/$SECRET_PATH: $res" -fi diff --git a/enos/modules/verify_secrets_engines/scripts/pki-issue-certificates.sh b/enos/modules/verify_secrets_engines/scripts/pki-issue-certificates.sh deleted file mode 100755 index f4592eb..0000000 --- a/enos/modules/verify_secrets_engines/scripts/pki-issue-certificates.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" -[[ -z "$COMMON_NAME" ]] && fail "COMMON_NAME env variable has not been set" -[[ -z "$ISSUER_NAME" ]] && fail "ISSUER_NAME env variable has not been set" -[[ -z "$TTL" ]] && fail "TTL env variable has not been set" -[[ -z "$TEST_DIR" ]] && fail "TEST_DIR env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" -export VAULT_FORMAT=json - -# ------ Generate and sign certificate ------ -CA_NAME="${MOUNT}-ca.pem" -ISSUED_CERT_NAME="${MOUNT}-issued.pem" -ROLE_NAME="${COMMON_NAME}-role" -SUBJECT="test.${COMMON_NAME}" -TMP_TTL="1h" -rm -rf "${TEST_DIR}" -mkdir "${TEST_DIR}" - -## Setting AIA fields for Certificate -"$binpath" write "${MOUNT}/config/urls" issuing_certificates="${VAULT_ADDR}/v1/pki/ca" crl_distribution_points="${VAULT_ADDR}/v1/pki/crl" - -# Generating CA Certificate -"$binpath" write "${MOUNT}/root/generate/internal" common_name="${COMMON_NAME}.com" issuer_name="${ISSUER_NAME}" ttl="${TTL}" | jq -r '.data.issuing_ca' > "${TEST_DIR}/${CA_NAME}" -# Creating a role -"$binpath" write "${MOUNT}/roles/${ROLE_NAME}" allowed_domains="${COMMON_NAME}.com" allow_subdomains=true max_ttl="${TMP_TTL}" -# Issuing Signed Certificate -"$binpath" write "${MOUNT}/issue/${ROLE_NAME}" common_name="${SUBJECT}.com" ttl="${TMP_TTL}" | jq -r '.data.certificate' > "${TEST_DIR}/${ISSUED_CERT_NAME}" - -# ------ Generate and sign intermediate ------ -INTERMEDIATE_COMMON_NAME="intermediate-${COMMON_NAME}" -INTERMEDIATE_ISSUER_NAME="intermediate-${ISSUER_NAME}" -INTERMEDIATE_ROLE_NAME="intermediate-${COMMON_NAME}-role" -INTERMEDIATE_CA_NAME="${MOUNT}-${INTERMEDIATE_COMMON_NAME}.pem" -INTERMEDIATE_SIGNED_NAME="${MOUNT}-${INTERMEDIATE_COMMON_NAME}-ca.pem" -INTERMEDIATE_ISSUED_NAME="${MOUNT}-${INTERMEDIATE_COMMON_NAME}-issued.pem" - -# Generate Intermediate CSR -"$binpath" write "${MOUNT}/intermediate/generate/internal" common_name="${INTERMEDIATE_COMMON_NAME}.com" issuer_name="${INTERMEDIATE_ISSUER_NAME}" ttl="${TTL}" | jq -r '.data.csr' > "${TEST_DIR}/${INTERMEDIATE_CA_NAME}" -# Creating a intermediate role -"$binpath" write "${MOUNT}/roles/${INTERMEDIATE_ROLE_NAME}" allowed_domains="${INTERMEDIATE_COMMON_NAME}.com" allow_subdomains=true max_ttl="${TMP_TTL}" -# Sign Intermediate Certificate -"$binpath" write "${MOUNT}/root/sign-intermediate" csr="@${TEST_DIR}/${INTERMEDIATE_CA_NAME}" format=pem_bundle ttl="${TMP_TTL}" | jq -r '.data.certificate' > "${TEST_DIR}/${INTERMEDIATE_SIGNED_NAME}" -# Import Signed Intermediate Certificate into Vault -"$binpath" write "${MOUNT}/intermediate/set-signed" certificate="@${TEST_DIR}/${INTERMEDIATE_SIGNED_NAME}" -# Issuing Signed Certificate with the intermediate role -"$binpath" write "${MOUNT}/issue/${INTERMEDIATE_ROLE_NAME}" common_name="www.${INTERMEDIATE_COMMON_NAME}.com" ttl="${TMP_TTL}" | jq -r '.data.certificate' > "${TEST_DIR}/${INTERMEDIATE_ISSUED_NAME}" diff --git a/enos/modules/verify_secrets_engines/scripts/pki-verify-certificates.sh b/enos/modules/verify_secrets_engines/scripts/pki-verify-certificates.sh deleted file mode 100755 index e738bd7..0000000 --- a/enos/modules/verify_secrets_engines/scripts/pki-verify-certificates.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$AUTH_PATH" ]] && fail "AUTH_PATH env variable has not been set" -[[ -z "$USERNAME" ]] && fail "USERNAME env variable has not been set" -[[ -z "$PASSWORD" ]] && fail "PASSWORD env variable has not been set" -[[ -z "$VERIFY_PKI_CERTS" ]] && fail "VERIFY_CERT_DETAILS env variable has not been set" -[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" -[[ -z "$COMMON_NAME" ]] && fail "COMMON_NAME env variable has not been set" -[[ -z "$ISSUER_NAME" ]] && fail "ISSUER_NAME env variable has not been set" -[[ -z "$TTL" ]] && fail "TTL env variable has not been set" -[[ -z "$TEST_DIR" ]] && fail "TEST_DIR env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" || fail "The certificate appears to be improperly configured or contains errors" -export VAULT_FORMAT=json - -# Log in so this vault instance have access to the primary pki roles, issuers, and etc -if [ "${VERIFY_PKI_CERTS}" = false ]; then - echo "Logging in Vault with username and password: ${USERNAME}" - VAULT_TOKEN=$("$binpath" write "auth/$AUTH_PATH/login/$USERNAME" password="$PASSWORD" | jq -r '.auth.client_token') -fi - -# Verifying List Roles -ROLE=$("$binpath" list "${MOUNT}/roles" | jq -r '.[]') -[[ -z "$ROLE" ]] && fail "No roles created!" - -# Verifying List Issuer -ISSUER=$("$binpath" list "${MOUNT}/issuers" | jq -r '.[]') -[[ -z "$ISSUER" ]] && fail "No issuers created!" - -# Verifying Root CA Certificate -ROOT_CA_CERT=$("$binpath" read pki/cert/ca | jq -r '.data.certificate') -[[ -z "$ROOT_CA_CERT" ]] && fail "No root ca certificate generated" - -# Verifying Certificates -if [ "${VERIFY_PKI_CERTS}" = true ]; then - if [ ! -d "${TEST_DIR}" ]; then - echo "Directory does not exist. Creating it now." - mkdir -p "${TEST_DIR}" # Need to create this directory for Enterprise test - fi - TMP_FILE="tmp-vault-cert.pem" - - # Verify List Certificate - VAULT_CERTS=$("$binpath" list "${MOUNT}/certs" | jq -r '.[]') - [[ -z "$VAULT_CERTS" ]] && fail "VAULT_CERTS should include vault certificates" - for CERT in $VAULT_CERTS; do - echo "Getting certificate from Vault PKI: ${CERT}" - "$binpath" read "${MOUNT}/cert/${CERT}" | jq -r '.data.certificate' > "${TEST_DIR}/${TMP_FILE}" - echo "Verifying certificate contents..." - openssl x509 -in "${TEST_DIR}/${TMP_FILE}" -text -noout || fail "The certificate appears to be improperly configured or contains errors" - CURR_CERT_SERIAL=$(echo "${CERT}" | tr -d ':' | tr '[:lower:]' '[:upper:]') - if ! TMP_CERT_SUBJECT=$(openssl x509 -in "${TEST_DIR}/${TMP_FILE}" -noout -subject | cut -d '=' -f2-); then - fail "failed to read certificate subject: $TMP_CERT_SUBJECT" - fi - TMP_CERT_ISSUER=$(openssl x509 -in "${TEST_DIR}/${TMP_FILE}" -noout -issuer | cut -d '=' -f2-) - TMP_CERT_SERIAL=$(openssl x509 -in "${TEST_DIR}/${TMP_FILE}" -noout -serial | cut -d '=' -f2-) - [[ "${TMP_CERT_SUBJECT}" == *"${COMMON_NAME}.com"* ]] || fail "Subject is incorrect. Actual Subject: ${TMP_CERT_SUBJECT}" - [[ "${TMP_CERT_ISSUER}" == *"${COMMON_NAME}.com"* ]] || fail "Issuer is incorrect. Actual Issuer: ${TMP_CERT_ISSUER}" - [[ "${TMP_CERT_SERIAL}" == *"${CURR_CERT_SERIAL}"* ]] || fail "Certificate Serial is incorrect. Actual certificate Serial: ${CURR_CERT_SERIAL},${TMP_CERT_SERIAL}" - echo "Successfully verified certificate contents." - - # Setting up variables for types of certificates - IS_CA=$(openssl x509 -in "${TEST_DIR}/${TMP_FILE}" -text -noout | grep -q "CA:TRUE" && echo "TRUE" || echo "FALSE") - if [[ "${IS_CA}" == "TRUE" ]]; then - if [[ "${COMMON_NAME}.com" == "${TMP_CERT_SUBJECT}" ]]; then - CA_CERT=${CERT} - elif [[ "intermediate-${COMMON_NAME}.com" == "${TMP_CERT_SUBJECT}" ]]; then - INTERMEDIATE_CA_CERT=${CERT} - fi - elif [[ "${IS_CA}" == "FALSE" ]]; then - INTERMEDIATE_ISSUED_CERT=${CERT} - fi - - done - - echo "Verifying that Vault PKI has successfully generated valid certificates for the CA, Intermediate CA, and issued certificates..." - if [[ -n "${CA_CERT}" ]] && [[ -n "${INTERMEDIATE_CA_CERT}" ]] && [[ -n "${INTERMEDIATE_ISSUED_CERT}" ]]; then - CA_NAME="ca.pem" - INTERMEDIATE_CA_NAME="intermediate-ca.pem" - ISSUED_NAME="issued.pem" - "$binpath" read "${MOUNT}/cert/${CA_CERT}" | jq -r '.data.certificate' > "${TEST_DIR}/${CA_NAME}" - "$binpath" read "${MOUNT}/cert/${INTERMEDIATE_CA_CERT}" | jq -r '.data.certificate' > "${TEST_DIR}/${INTERMEDIATE_CA_NAME}" - "$binpath" read "${MOUNT}/cert/${INTERMEDIATE_ISSUED_CERT}" | jq -r '.data.certificate' > "${TEST_DIR}/${ISSUED_NAME}" - openssl verify --CAfile "${TEST_DIR}/${CA_NAME}" -untrusted "${TEST_DIR}/${INTERMEDIATE_CA_NAME}" "${TEST_DIR}/${ISSUED_NAME}" || fail "One or more Certificate is not valid." - else - echo "CA Cert: ${CA_CERT}, Intermedidate Cert: ${INTERMEDIATE_CA_CERT}, Issued Cert: ${INTERMEDIATE_ISSUED_CERT}" - fi - - echo "Revoking certificate: ${INTERMEDIATE_ISSUED_CERT}" - "$binpath" write "${MOUNT}/revoke" serial_number="${INTERMEDIATE_ISSUED_CERT}" || fail "Could not revoke certificate ${INTERMEDIATE_ISSUED_CERT}" - echo "Verifying Revoked Certificate" - REVOKED_CERT_FROM_LIST=$("$binpath" list "${MOUNT}/certs/revoked" | jq -r '.[0]') - [[ "${INTERMEDIATE_ISSUED_CERT}" == "${REVOKED_CERT_FROM_LIST}" ]] || fail "Expected: ${INTERMEDIATE_ISSUED_CERT}, actual: ${REVOKED_CERT_FROM_LIST}" - echo "Successfully verified revoked certificate" -else - echo "Skipping verify certificates!" -fi diff --git a/enos/modules/verify_secrets_engines/scripts/policy-write.sh b/enos/modules/verify_secrets_engines/scripts/policy-write.sh deleted file mode 100644 index 18e011c..0000000 --- a/enos/modules/verify_secrets_engines/scripts/policy-write.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$POLICY_NAME" ]] && fail "POLICY_NAME env variable has not been set" -[[ -z "$POLICY_CONFIG" ]] && fail "POLICY_CONFIG env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json -"$binpath" policy write "$POLICY_NAME" - <<< "$POLICY_CONFIG" diff --git a/enos/modules/verify_secrets_engines/scripts/read.sh b/enos/modules/verify_secrets_engines/scripts/read.sh deleted file mode 100644 index b522c6f..0000000 --- a/enos/modules/verify_secrets_engines/scripts/read.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$REQPATH" ]] && fail "REQPATH env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json -"$binpath" read "$REQPATH" diff --git a/enos/modules/verify_secrets_engines/scripts/secrets-enable.sh b/enos/modules/verify_secrets_engines/scripts/secrets-enable.sh deleted file mode 100644 index 0e8174a..0000000 --- a/enos/modules/verify_secrets_engines/scripts/secrets-enable.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$MOUNT" ]] && fail "MOUNT env variable has not been set" -[[ -z "$ENGINE" ]] && fail "MOUNT env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json -eval "$binpath" secrets enable -path="$MOUNT" "$SECRETS_META" "$ENGINE" diff --git a/enos/modules/verify_secrets_engines/scripts/write-payload.sh b/enos/modules/verify_secrets_engines/scripts/write-payload.sh deleted file mode 100644 index 922fb2e..0000000 --- a/enos/modules/verify_secrets_engines/scripts/write-payload.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} - -[[ -z "$REQPATH" ]] && fail "REQPATH env variable has not been set" -[[ -z "$PAYLOAD" ]] && fail "PAYLOAD env variable has not been set" -[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" -[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" -[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" - -binpath=${VAULT_INSTALL_DIR}/vault -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_FORMAT=json -if output=$("$binpath" write "$REQPATH" - <<< "$PAYLOAD" 2>&1); then - printf "%s\n" "$output" -else - fail "failed to write payload: path=$REQPATH payload=$PAYLOAD out=$output" -fi From 7875f6bd7ccee3b7cb9e85fa124d56cef0882528 Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Thu, 31 Jul 2025 12:57:19 -0700 Subject: [PATCH 05/26] add enos vars template for developer --- enos/ci/aws-nuke.yml | 409 --------------------- enos/ci/bootstrap/main.tf | 69 ---- enos/ci/bootstrap/outputs.tf | 23 -- enos/ci/bootstrap/variables.tf | 16 - enos/ci/service-user-iam/main.tf | 242 ------------ enos/ci/service-user-iam/outputs.tf | 16 - enos/ci/service-user-iam/providers.tf | 22 -- enos/ci/service-user-iam/service-quotas.tf | 65 ---- enos/ci/service-user-iam/variables.tf | 11 - enos/enos.vars.hcl | 118 ++++++ 10 files changed, 118 insertions(+), 873 deletions(-) delete mode 100644 enos/ci/aws-nuke.yml delete mode 100644 enos/ci/bootstrap/main.tf delete mode 100644 enos/ci/bootstrap/outputs.tf delete mode 100644 enos/ci/bootstrap/variables.tf delete mode 100644 enos/ci/service-user-iam/main.tf delete mode 100644 enos/ci/service-user-iam/outputs.tf delete mode 100644 enos/ci/service-user-iam/providers.tf delete mode 100644 enos/ci/service-user-iam/service-quotas.tf delete mode 100644 enos/ci/service-user-iam/variables.tf create mode 100644 enos/enos.vars.hcl diff --git a/enos/ci/aws-nuke.yml b/enos/ci/aws-nuke.yml deleted file mode 100644 index fd7dd54..0000000 --- a/enos/ci/aws-nuke.yml +++ /dev/null @@ -1,409 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -regions: -- eu-north-1 -- ap-south-1 -- eu-west-3 -- eu-west-2 -- eu-west-1 -- ap-northeast-3 -- ap-northeast-2 -- ap-northeast-1 -- sa-east-1 -- ca-central-1 -- ap-southeast-1 -- ap-southeast-2 -- eu-central-1 -- us-east-1 -- us-east-2 -- us-west-1 -- us-west-2 -- global - -blocklist: - - 1234567890 - -accounts: - # replaced in CI - ACCOUNT_NUM: - presets: - - default - - olderthan - - honeybee - - enos - -presets: - default: - # Ignores default VPC resources - filters: - EC2VPC: - - property: IsDefault - value: "true" - EC2RouteTable: - - property: DefaultVPC - value: "true" - EC2DHCPOption: - - property: DefaultVPC - value: "true" - EC2InternetGateway: - - property: DefaultVPC - value: "true" - EC2Subnet: - - property: DefaultVPC - value: "true" - EC2InternetGatewayAttachment: - - property: DefaultVPC - value: "true" - - olderthan: - # Filters resources by age (when available) - # TIME_LIMIT replaced in CI - filters: - EC2Instance: - - property: LaunchTime - type: dateOlderThan - value: "TIME_LIMIT" - EC2NetworkACL: - EC2RouteTable: - EC2SecurityGroup: - EC2Subnet: - EC2Volume: - EC2VPC: - - property: tag:cloud-nuke-first-seen - type: dateOlderThan - value: "TIME_LIMIT" - ELBv2: - - property: tag:cloud-nuke-first-seen - type: dateOlderThan - value: "TIME_LIMIT" - ELBv2TargetGroup: - EC2NetworkInterface: - EC2InternetGateway: - EC2InternetGatewayAttachment: - RDSInstance: - - property: InstanceCreateTime - type: dateOlderThan - value: "TIME_LIMIT" - - honeybee: - # Cloudsec - filters: - IAMRole: - - property: tag:hc-config-as-code - value: "honeybee" - - property: Name - type: glob - value: "vault-assumed-role-credentials-demo" - IAMRolePolicy: - - property: tag:role:hc-config-as-code - value: "honeybee" - - property: role:RoleName - type: glob - value: "vault-assumed-role-credentials-demo" - IAMRolePolicyAttachment: - - property: tag:role:hc-config-as-code - value: "honeybee" - - property: Name - type: glob - value: "vault-assumed-role-credentials-demo" - - enos: - # Existing CI to be cleaned up later - filters: - LambdaFunction: - - property: Name - value: "enos_cleanup" - IAMRole: - - property: Name - type: glob - value: "github_actions-*" - - property: Name - value: "rds-monitoring-role" - IAMRolePolicy: - - property: role:RoleName - type: glob - value: "github_actions*" - - property: role:RoleName - type: glob - value: "rds-*" - IAMRolePolicyAttachment: - - "rds-monitoring-role -> AmazonRDSEnhancedMonitoringRole" - IAMUserPolicy: - - "github_actions-vault_ci -> AssumeServiceUserRole" - - "github_actions-vault_enterprise_ci -> AssumeServiceUserRole" - -resource-types: - # Run against everything, excluding these: - excludes: - # Avoid cloudsec things - - IAMUser - - IAMPolicy - - IAMUserAccessKey - - S3Object - - S3Bucket - - EC2KeyPair - - CloudWatchEventsTarget - - CloudWatchEventsRule - - CloudWatchLogsLogGroup - - ConfigServiceConfigurationRecorder - - ConfigServiceConfigRule - - ConfigServiceDeliveryChannel - - CloudTrailTrail - - RDSSnapshot - - RDSClusterSnapshot - - WAFWebACL - - WAFv2WebACL - - WAFRegionalWebACL - - GuardDutyDetector - - # Unused services, filtering these speeds up runs and - # removes errors about things we don't have enabled - - ACMCertificate - - ACMPCACertificateAuthority - - ACMPCACertificateAuthorityState - - AMGWorkspace - - AMPWorkspace - - APIGatewayAPIKey - - APIGatewayClientCertificate - - APIGatewayDomainName - - APIGatewayRestAPI - - APIGatewayUsagePlan - - APIGatewayV2API - - APIGatewayV2VpcLink - - APIGatewayVpcLink - - AWS::AppFlow::ConnectorProfile - - AWS::AppFlow::Flow - - AWS::AppRunner::Service - - AWS::ApplicationInsights::Application - - AWS::Backup::Framework - - AWS::MWAA::Environment - - AWS::NetworkFirewall::Firewall - - AWS::NetworkFirewall::FirewallPolicy - - AWS::NetworkFirewall::RuleGroup - - AWS::Synthetics::Canary - - AWS::Timestream::Database - - AWS::Timestream::ScheduledQuery - - AWS::Timestream::Table - - AWS::Transfer::Workflow - - AWSBackupPlan - - AWSBackupRecoveryPoint - - AWSBackupSelection - - AWSBackupVault - - AWSBackupVaultAccessPolicy - - AccessAnalyzer - - AppMeshMesh - - AppMeshRoute - - AppMeshVirtualGateway - - AppMeshVirtualNode - - AppMeshVirtualRouter - - AppMeshVirtualService - - AppStreamDirectoryConfig - - AppStreamFleet - - AppStreamFleetState - - AppStreamImage - - AppStreamImageBuilder - - AppStreamImageBuilderWaiter - - AppStreamStack - - AppStreamStackFleetAttachment - - AppSyncGraphqlAPI - - ApplicationAutoScalingScalableTarget - - ArchiveRule - - AthenaNamedQuery - - AthenaWorkGroup - - BatchComputeEnvironment - - BatchComputeEnvironmentState - - BatchJobQueue - - BatchJobQueueState - - BillingCostandUsageReport - - Budget - - Cloud9Environment - - CloudDirectoryDirectory - - CloudDirectorySchema - - CodeArtifactDomain - - CodeArtifactRepository - - CodeBuildProject - - CodeCommitRepository - - CodeDeployApplication - - CodePipelinePipeline - - CodeStarConnection - - CodeStarNotificationRule - - CodeStarProject - - CognitoIdentityPool - - CognitoIdentityProvider - - CognitoUserPool - - CognitoUserPoolClient - - CognitoUserPoolDomain - - ComprehendDocumentClassifier - - ComprehendDominantLanguageDetectionJob - - ComprehendEndpoint - - ComprehendEntitiesDetectionJob - - ComprehendEntityRecognizer - - ComprehendKeyPhrasesDetectionJob - - ComprehendSentimentDetectionJob - - ConfigServiceConfigRule - - ConfigServiceConfigurationRecorder - - ConfigServiceDeliveryChannel - - DAXCluster - - DAXParameterGroup - - DAXSubnetGroup - - DataPipelinePipeline - - DatabaseMigrationServiceCertificate - - DatabaseMigrationServiceEndpoint - - DatabaseMigrationServiceEventSubscription - - DatabaseMigrationServiceReplicationInstance - - DatabaseMigrationServiceReplicationTask - - DatabaseMigrationServiceSubnetGroup - - DeviceFarmProject - - DirectoryServiceDirectory - - EC2ClientVpnEndpointAttachment - - EC2ClientVpnEndpoint - - EC2DefaultSecurityGroupRule - - FMSNotificationChannel - - FMSPolicy - - FSxBackup - - FSxFileSystem - - FirehoseDeliveryStream - - GlobalAccelerator - - GlobalAcceleratorEndpointGroup - - GlobalAcceleratorListener - - GlueClassifier - - GlueConnection - - GlueCrawler - - GlueDatabase - - GlueDevEndpoint - - GlueJob - - GlueTrigger - - Inspector2 - - InspectorAssessmentRun - - InspectorAssessmentTarget - - InspectorAssessmentTemplate - - IoTAuthorizer - - IoTCACertificate - - IoTCertificate - - IoTJob - - IoTOTAUpdate - - IoTPolicy - - IoTRoleAlias - - IoTStream - - IoTThing - - IoTThingGroup - - IoTThingType - - IoTThingTypeState - - IoTTopicRule - - KendraIndex - - KinesisAnalyticsApplication - - KinesisStream - - KinesisVideoProject - - LexBot - - LexIntent - - LexModelBuildingServiceBotAlias - - LexSlotType - - LifecycleHook - - LightsailDisk - - LightsailDomain - - LightsailInstance - - LightsailKeyPair - - LightsailLoadBalancer - - LightsailStaticIP - - MQBroker - - MSKCluster - - MSKConfiguration - - MachineLearningBranchPrediction - - MachineLearningDataSource - - MachineLearningEvaluation - - MachineLearningMLModel - - Macie - - MediaConvertJobTemplate - - MediaConvertPreset - - MediaConvertQueue - - MediaLiveChannel - - MediaLiveInput - - MediaLiveInputSecurityGroup - - MediaPackageChannel - - MediaPackageOriginEndpoint - - MediaStoreContainer - - MediaStoreDataItems - - MediaTailorConfiguration - - MobileProject - - NeptuneCluster - - NeptuneInstance - - NetpuneSnapshot - - OpsWorksApp - - OpsWorksCMBackup - - OpsWorksCMServer - - OpsWorksCMServerState - - OpsWorksInstance - - OpsWorksLayer - - OpsWorksUserProfile - - QLDBLedger - - RoboMakerRobotApplication - - RoboMakerSimulationApplication - - RoboMakerSimulationJob - - SESConfigurationSet - - SESIdentity - - SESReceiptFilter - - SESReceiptRuleSet - - SESTemplate - - SSMActivation - - SSMAssociation - - SSMDocument - - SSMMaintenanceWindow - - SSMParameter - - SSMPatchBaseline - - SSMResourceDataSync - - SageMakerApp - - SageMakerDomain - - SageMakerEndpoint - - SageMakerEndpointConfig - - SageMakerModel - - SageMakerNotebookInstance - - SageMakerNotebookInstanceLifecycleConfig - - SageMakerNotebookInstanceState - - SageMakerUserProfiles - - ServiceCatalogConstraintPortfolioAttachment - - ServiceCatalogPortfolio - - ServiceCatalogPortfolioProductAttachment - - ServiceCatalogPortfolioShareAttachment - - ServiceCatalogPrincipalPortfolioAttachment - - ServiceCatalogProduct - - ServiceCatalogProvisionedProduct - - ServiceCatalogTagOption - - ServiceCatalogTagOptionPortfolioAttachment - - ServiceDiscoveryInstance - - ServiceDiscoveryNamespace - - ServiceDiscoveryService - - SimpleDBDomain - - StorageGatewayFileShare - - StorageGatewayGateway - - StorageGatewayTape - - StorageGatewayVolume - - TransferServer - - TransferServerUser - - WAFRegionalByteMatchSet - - WAFRegionalByteMatchSetIP - - WAFRegionalIPSet - - WAFRegionalIPSetIP - - WAFRegionalRateBasedRule - - WAFRegionalRateBasedRulePredicate - - WAFRegionalRegexMatchSet - - WAFRegionalRegexMatchTuple - - WAFRegionalRegexPatternSet - - WAFRegionalRegexPatternString - - WAFRegionalRule - - WAFRegionalRuleGroup - - WAFRegionalRulePredicate - - WAFRegionalWebACL - - WAFRegionalWebACLRuleAttachment - - WAFRule - - WAFWebACL - - WAFWebACLRuleAttachment - - WAFv2IPSet - - WAFv2RegexPatternSet - - WAFv2RuleGroup - - WAFv2WebACL - - WorkLinkFleet - - WorkSpacesWorkspace - - XRayGroup - - XRaySamplingRule - diff --git a/enos/ci/bootstrap/main.tf b/enos/ci/bootstrap/main.tf deleted file mode 100644 index db89663..0000000 --- a/enos/ci/bootstrap/main.tf +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - } - - cloud { - hostname = "app.terraform.io" - organization = "hashicorp-qti" - // workspace must be exported in the environment as: TF_WORKSPACE=-ci-enos-boostrap - } -} - -provider "aws" { - region = "us-east-1" - alias = "us_east_1" -} - -provider "aws" { - region = "us-east-2" - alias = "us_east_2" -} - -provider "aws" { - region = "us-west-1" - alias = "us_west_1" -} - -provider "aws" { - region = "us-west-2" - alias = "us_west_2" -} - - -locals { - key_name = "${var.repository}-ci-ssh-key" -} - -resource "aws_key_pair" "enos_ci_key_us_east_1" { - key_name = local.key_name - public_key = var.aws_ssh_public_key - - provider = aws.us_east_1 -} - -resource "aws_key_pair" "enos_ci_key_us_east_2" { - key_name = local.key_name - public_key = var.aws_ssh_public_key - - provider = aws.us_east_2 -} - -resource "aws_key_pair" "enos_ci_key_us_west_1" { - key_name = local.key_name - public_key = var.aws_ssh_public_key - - provider = aws.us_west_1 -} - -resource "aws_key_pair" "enos_ci_key_us_west_2" { - key_name = local.key_name - public_key = var.aws_ssh_public_key - - provider = aws.us_west_2 -} diff --git a/enos/ci/bootstrap/outputs.tf b/enos/ci/bootstrap/outputs.tf deleted file mode 100644 index a83ef9e..0000000 --- a/enos/ci/bootstrap/outputs.tf +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -output "keys" { - value = { - "us-east-1" = { - name = aws_key_pair.enos_ci_key_us_east_1.key_name - arn = aws_key_pair.enos_ci_key_us_east_1.arn - } - "us-east-2" = { - name = aws_key_pair.enos_ci_key_us_east_2.key_name - arn = aws_key_pair.enos_ci_key_us_east_2.arn - } - "us-west-1" = { - name = aws_key_pair.enos_ci_key_us_west_1.key_name - arn = aws_key_pair.enos_ci_key_us_west_1.arn - } - "us-west-2" = { - name = aws_key_pair.enos_ci_key_us_west_2.key_name - arn = aws_key_pair.enos_ci_key_us_west_2.arn - } - } -} diff --git a/enos/ci/bootstrap/variables.tf b/enos/ci/bootstrap/variables.tf deleted file mode 100644 index 7e80d5c..0000000 --- a/enos/ci/bootstrap/variables.tf +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "aws_ssh_public_key" { - description = "The public key to use for the ssh key" - type = string -} - -variable "repository" { - description = "The repository to bootstrap the ci for, either 'vault' or 'vault-enterprise'" - type = string - validation { - condition = contains(["vault", "vault-enterprise"], var.repository) - error_message = "Repository must be one of either 'vault' or 'vault-enterprise'" - } -} diff --git a/enos/ci/service-user-iam/main.tf b/enos/ci/service-user-iam/main.tf deleted file mode 100644 index da5f20b..0000000 --- a/enos/ci/service-user-iam/main.tf +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - } - - cloud { - hostname = "app.terraform.io" - organization = "hashicorp-qti" - // workspace must be exported in the environment as: TF_WORKSPACE=-ci-enos-service-user-iam - } -} - -locals { - enterprise_repositories = ["vault-enterprise"] - is_ent = contains(local.enterprise_repositories, var.repository) - ci_account_prefix = local.is_ent ? "vault_enterprise" : "vault" - service_user = "github_actions-${local.ci_account_prefix}_ci" - aws_account_id = local.is_ent ? "505811019928" : "040730498200" -} - -resource "aws_iam_role" "role" { - provider = aws.us_east_1 - name = local.service_user - assume_role_policy = data.aws_iam_policy_document.assume_role_policy_document.json -} - -data "aws_iam_policy_document" "assume_role_policy_document" { - provider = aws.us_east_1 - - statement { - effect = "Allow" - actions = ["sts:AssumeRole"] - - principals { - type = "AWS" - identifiers = ["arn:aws:iam::${local.aws_account_id}:user/${local.service_user}"] - } - } -} - -resource "aws_iam_role_policy" "role_policy" { - provider = aws.us_east_1 - role = aws_iam_role.role.name - name = "${local.service_user}_policy" - policy = data.aws_iam_policy_document.role_policy.json -} - -data "aws_iam_policy_document" "role_policy" { - source_policy_documents = [ - data.aws_iam_policy_document.enos_scenario.json, - data.aws_iam_policy_document.aws_nuke.json, - ] -} - -data "aws_iam_policy_document" "aws_nuke" { - provider = aws.us_east_1 - - statement { - effect = "Allow" - actions = [ - "ec2:DescribeInternetGateways", - "ec2:DescribeNatGateways", - "ec2:DescribeRegions", - "ec2:DescribeVpnGateways", - "iam:DeleteAccessKey", - "iam:DeleteUser", - "iam:DeleteUserPolicy", - "iam:DetachUserPolicy", - "iam:GetUser", - "iam:ListAccessKeys", - "iam:ListAccountAliases", - "iam:ListGroupsForUser", - "iam:ListSSHPublicKeys", - "iam:ListUserPolicies", - "iam:ListUserTags", - "iam:ListUsers", - "iam:ListVirtualMFADevices", - "iam:UntagUser", - "servicequotas:ListServiceQuotas" - ] - - resources = ["*"] - } -} - -data "aws_iam_policy_document" "enos_scenario" { - provider = aws.us_east_1 - - statement { - effect = "Allow" - actions = [ - "ec2:AssociateRouteTable", - "ec2:AttachInternetGateway", - "ec2:AuthorizeSecurityGroupEgress", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CancelSpotFleetRequests", - "ec2:CancelSpotInstanceRequests", - "iam:CreateAccessKey", - "ec2:CreateEgressOnlyInternetGateway", - "ec2:CreateInternetGateway", - "ec2:CreateKeyPair", - "ec2:CreateFleet", - "ec2:CreateLaunchTemplate", - "ec2:CreateLaunchTemplateVersion", - "ec2:CreateRoute", - "ec2:CreateRouteTable", - "ec2:CreateSecurityGroup", - "ec2:CreateSpotDatafeedSubscription", - "ec2:CreateSubnet", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:CreateVPC", - "ec2:DeleteEgressOnlyInternetGateway", - "ec2:DeleteFleets", - "ec2:DeleteInternetGateway", - "ec2:DeleteLaunchTemplate", - "ec2:DeleteLaunchTemplateVersions", - "ec2:DeleteKeyPair", - "ec2:DeleteRoute", - "ec2:DeleteRouteTable", - "ec2:DeleteSecurityGroup", - "ec2:DeleteSpotDatafeedSubscription", - "ec2:DeleteSubnet", - "ec2:DeleteTags", - "ec2:DeleteVolume", - "ec2:DeleteVPC", - "ec2:DescribeAccountAttributes", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeEgressOnlyInternetGateways", - "ec2:DescribeFleets", - "ec2:DescribeFleetHistory", - "ec2:DescribeFleetInstances", - "ec2:DescribeImages", - "ec2:DescribeInstanceAttribute", - "ec2:DescribeInstanceCreditSpecifications", - "ec2:DescribeInstances", - "ec2:DescribeInstanceTypeOfferings", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInternetGateways", - "ec2:DescribeKeyPairs", - "ec2:DescribeLaunchTemplates", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeNatGateways", - "ec2:DescribeNetworkAcls", - "ec2:DescribeNetworkInterfaces", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSpotDatafeedSubscription", - "ec2:DescribeSpotFleetInstances", - "ec2:DescribeSpotFleetInstanceRequests", - "ec2:DescribeSpotFleetRequests", - "ec2:DescribeSpotFleetRequestHistory", - "ec2:DescribeSpotInstanceRequests", - "ec2:DescribeSpotPriceHistory", - "ec2:DescribeSubnets", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeVpcAttribute", - "ec2:DescribeVpcClassicLink", - "ec2:DescribeVpcClassicLinkDnsSupport", - "ec2:DescribeVpcs", - "ec2:DescribeVpnGateways", - "ec2:DetachInternetGateway", - "ec2:DisassociateRouteTable", - "ec2:GetLaunchTemplateData", - "ec2:GetSpotPlacementScores", - "ec2:ImportKeyPair", - "ec2:ModifyFleet", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyLaunchTemplate", - "ec2:ModifySpotFleetRequest", - "ec2:ModifySubnetAttribute", - "ec2:ModifyVPCAttribute", - "ec2:RequestSpotInstances", - "ec2:RequestSpotFleet", - "ec2:ResetInstanceAttribute", - "ec2:RevokeSecurityGroupEgress", - "ec2:RevokeSecurityGroupIngress", - "ec2:RunInstances", - "ec2:SendSpotInstanceInterruptions", - "ec2:TerminateInstances", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTargetGroups", - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:AttachUserPolicy", - "iam:CreateInstanceProfile", - "iam:CreatePolicy", - "iam:CreateRole", - "iam:CreateServiceLinkedRole", - "iam:CreateUser", - "iam:DeleteInstanceProfile", - "iam:DeleteLoginProfile", - "iam:DeletePolicy", - "iam:DeleteRole", - "iam:DeleteRolePolicy", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetPolicy", - "iam:GetPolicyVersion", - "iam:GetRole", - "iam:GetRolePolicy", - "iam:ListAccountAliases", - "iam:ListAttachedRolePolicies", - "iam:ListAttachedUserPolicies", - "iam:ListInstanceProfiles", - "iam:ListInstanceProfilesForRole", - "iam:ListMFADevices", - "iam:ListPolicies", - "iam:ListRolePolicies", - "iam:ListRoles", - "iam:ListServiceSpecificCredentials", - "iam:ListSigningCertificates", - "iam:PassRole", - "iam:PutRolePolicy", - "iam:RemoveRoleFromInstanceProfile", - "iam:UpdateUser", - "kms:CreateAlias", - "kms:CreateKey", - "kms:Decrypt", - "kms:DeleteAlias", - "kms:DescribeKey", - "kms:Encrypt", - "kms:GetKeyPolicy", - "kms:GetKeyRotationStatus", - "kms:ListAliases", - "kms:ListKeys", - "kms:ListResourceTags", - "kms:ScheduleKeyDeletion", - "kms:TagResource", - "servicequotas:ListServiceQuotas" - ] - - resources = ["*"] - } -} diff --git a/enos/ci/service-user-iam/outputs.tf b/enos/ci/service-user-iam/outputs.tf deleted file mode 100644 index 348696b..0000000 --- a/enos/ci/service-user-iam/outputs.tf +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -output "ci_role" { - value = { - name = aws_iam_role.role.name - arn = aws_iam_role.role.arn - } -} - -output "ci_role_policy" { - value = { - name = aws_iam_role_policy.role_policy.name - policy = aws_iam_role_policy.role_policy.policy - } -} diff --git a/enos/ci/service-user-iam/providers.tf b/enos/ci/service-user-iam/providers.tf deleted file mode 100644 index cf2d21e..0000000 --- a/enos/ci/service-user-iam/providers.tf +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -provider "aws" { - region = "us-east-1" - alias = "us_east_1" -} - -provider "aws" { - region = "us-east-2" - alias = "us_east_2" -} - -provider "aws" { - region = "us-west-1" - alias = "us_west_1" -} - -provider "aws" { - region = "us-west-2" - alias = "us_west_2" -} diff --git a/enos/ci/service-user-iam/service-quotas.tf b/enos/ci/service-user-iam/service-quotas.tf deleted file mode 100644 index 676bbb0..0000000 --- a/enos/ci/service-user-iam/service-quotas.tf +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -locals { - // This is the code of the service quota to request a change for. Each adjustable limit has a - // unique code. See, https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/servicequotas_service_quota#quota_code - subnets_per_vpcs_quota = "L-F678F1CE" - standard_spot_instance_requests_quota = "L-34B43A08" -} - -resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_1" { - provider = aws.us_east_1 - quota_code = local.subnets_per_vpcs_quota - service_code = "vpc" - value = 100 -} - -resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_2" { - provider = aws.us_east_2 - quota_code = local.subnets_per_vpcs_quota - service_code = "vpc" - value = 100 -} - -resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_1" { - provider = aws.us_west_1 - quota_code = local.subnets_per_vpcs_quota - service_code = "vpc" - value = 100 -} - -resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_2" { - provider = aws.us_west_2 - quota_code = local.subnets_per_vpcs_quota - service_code = "vpc" - value = 100 -} - -resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_1" { - provider = aws.us_east_1 - quota_code = local.standard_spot_instance_requests_quota - service_code = "ec2" - value = 640 -} - -resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_2" { - provider = aws.us_east_2 - quota_code = local.standard_spot_instance_requests_quota - service_code = "ec2" - value = 640 -} - -resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_west_1" { - provider = aws.us_west_1 - quota_code = local.standard_spot_instance_requests_quota - service_code = "ec2" - value = 640 -} - -resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_west_2" { - provider = aws.us_west_2 - quota_code = local.standard_spot_instance_requests_quota - service_code = "ec2" - value = 640 -} diff --git a/enos/ci/service-user-iam/variables.tf b/enos/ci/service-user-iam/variables.tf deleted file mode 100644 index b69c07b..0000000 --- a/enos/ci/service-user-iam/variables.tf +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "repository" { - description = "The GitHub repository, either vault or vault-enterprise" - type = string - validation { - condition = contains(["vault", "vault-enterprise"], var.repository) - error_message = "Invalid repository, only vault or vault-enterprise are supported" - } -} diff --git a/enos/enos.vars.hcl b/enos/enos.vars.hcl new file mode 100644 index 0000000..b749d90 --- /dev/null +++ b/enos/enos.vars.hcl @@ -0,0 +1,118 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// artifactory_token is the token to use when authenticating to artifactory. +// artifactory_token = "yourtoken" + +// artifactory_host is the artifactory host to search for vault artifacts. +// artifactory_host = "https://artifactory.hashicorp.engineering/artifactory" + +// artifactory_repo is the artifactory repo to search for vault artifacts. +// artifactory_repo = "hashicorp-crt-stable-local*" + +// aws_region is the AWS region where we'll create infrastructure +// for the smoke scenario +// aws_region = "us-east-1" + +// aws_ssh_keypair_name is the AWS keypair to use for SSH +// aws_ssh_keypair_name = "enos-ci-ssh-key" + +// aws_ssh_private_key_path is the path to the AWS keypair private key +// aws_ssh_private_key_path = "./support/private_key.pem" + +// backend_license_path is the license for the backend if applicable (Consul Enterprise)". +// backend_license_path = "./support/consul.hclic" + +// backend_log_level is the server log level for the backend. Supported values include 'trace', +// 'debug', 'info', 'warn', 'error'" +// backend_log_level = "trace" + +// backend_instance_type is the instance type to use for the Vault backend. Must support arm64 +// backend_instance_type = "t4g.small" + +// project_name is the description of the project. It will often be used to tag infrastructure +// resources. +// project_name = "vault-enos-integration" + +// distro_version_amzn is the version of Amazon Linux 2 to use for "distro:amzn" variants +// distro_version_amzn = "2" + +// distro_version_leap is the version of openSUSE Leap to use for "distro:leap" variants +// distro_version_leap = "15.5" + +// distro_version_rhel is the version of RHEL to use for "distro:rhel" variants. +// distro_version_rhel = "9.3" // or "8.9" + +// distro_version_sles is the version of SUSE SLES to use for "distro:sles" variants. +// distro_version_sles = "v15_sp5_standard" + +// distro_version_ubuntu is the version of ubuntu to use for "distro:ubuntu" variants +// distro_version_ubuntu = "22.04" // or "20.04" + +// tags are a map of tags that will be applied to infrastructure resources that +// support tagging. +// tags = { "Project Name" : "Vault", "Something Cool" : "Value" } + +// terraform_plugin_cache_dir is the directory to cache Terraform modules and providers. +// It must exist. +// terraform_plugin_cache_dir = "/Users//.terraform/plugin-cache-dir + +// ui_test_filter is the test filter to limit the ui tests to execute for the ui scenario. It will +// be appended to the ember test command as '-f=\"\"'. +// ui_test_filter = "sometest" + +// ui_run_tests sets whether to run the UI tests or not for the ui scenario. If set to false a +// cluster will be created but no tests will be run. +// ui_run_tests = true + +// vault_artifact_path is the path to CRT generated or local vault.zip bundle. When +// using the "builder:local" variant a bundle will be built from the current branch. +// In CI it will use the output of the build workflow. +// vault_artifact_path = "./dist/vault.zip" + +// vault_artifact_type is the type of Vault artifact to use when installing Vault from artifactory. +// It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" +// vault_artifact_type = "bundle" + +// vault_build_date is the build date for Vault artifact. Some validations will require the binary build +// date to match" +// vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example + +// vault_enable_audit_devices sets whether or not to enable every audit device. It true +// a file audit device will be enabled at the path /var/log/vault_audit.log, the syslog +// audit device will be enabled, and a socket audit device connecting to 127.0.0.1:9090 +// will be enabled. The netcat program is run in listening mode to provide an endpoint +// that the socket audit device can connect to. +// vault_enable_audit_devices = true + +// vault_install_dir is the directory where the vault binary will be installed on +// the remote machines. +// vault_install_dir = "/opt/vault/bin" + +// vault_local_binary_path is the path of the local binary that we're upgrading to. +// vault_local_binary_path = "./support/vault" + +// vault_instance_type is the instance type to use for the Vault backend +// vault_instance_type = "t3.small" + +// vault_instance_count is how many instances to create for the Vault cluster. +// vault_instance_count = 3 + +// vault_license_path is the path to a valid Vault enterprise edition license. +// This is only required for non-ce editions" +// vault_license_path = "./support/vault.hclic" + +// vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants. +// vault_local_build_tags = ["ui", "ent"] + +// vault_log_level is the server log level for Vault logs. Supported values (in order of detail) are +// trace, debug, info, warn, and err." +// vault_log_level = "trace" + +// vault_product_version is the version of Vault we are testing. Some validations will expect the vault +// binary and cluster to report this version. +// vault_product_version = "1.15.0" + +// vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault +// binary and cluster to report this revision. +// vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de" From fd1fe0b71521a5cd1c1e0604f74fd77a3f2d26e5 Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Thu, 31 Jul 2025 15:07:29 -0700 Subject: [PATCH 06/26] refrence target_ec2_instances from vault repo --- enos/enos-modules.hcl | 2 +- enos/modules/target_ec2_instances/locals.tf | 11 - enos/modules/target_ec2_instances/main.tf | 223 ------------------ enos/modules/target_ec2_instances/outputs.tf | 11 - .../modules/target_ec2_instances/variables.tf | 85 ------- 5 files changed, 1 insertion(+), 331 deletions(-) delete mode 100644 enos/modules/target_ec2_instances/locals.tf delete mode 100644 enos/modules/target_ec2_instances/main.tf delete mode 100644 enos/modules/target_ec2_instances/outputs.tf delete mode 100644 enos/modules/target_ec2_instances/variables.tf diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index 9ea9f12..76a0077 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -152,7 +152,7 @@ module "target_ec2_fleet" { // create target instances using ec2:RunInstances module "target_ec2_instances" { - source = "./modules/target_ec2_instances" + source = "git::https://github.com/hashicorp/vault.git//enos/target_ec2_instances?ref=main" common_tags = var.tags ports_ingress = values(global.ports) diff --git a/enos/modules/target_ec2_instances/locals.tf b/enos/modules/target_ec2_instances/locals.tf deleted file mode 100644 index 8831b7e..0000000 --- a/enos/modules/target_ec2_instances/locals.tf +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -locals { - hosts = { for idx in range(var.instance_count) : idx => { - ipv6 = try(aws_instance.targets[idx].ipv6_addresses[0], "") - public_ip = aws_instance.targets[idx].public_ip - private_ip = aws_instance.targets[idx].private_ip - } - } -} diff --git a/enos/modules/target_ec2_instances/main.tf b/enos/modules/target_ec2_instances/main.tf deleted file mode 100644 index 649b871..0000000 --- a/enos/modules/target_ec2_instances/main.tf +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - # We need to specify the provider source in each module until we publish it - # to the public registry - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - version = ">= 0.3.24" - } - } -} - -data "aws_vpc" "vpc" { - id = var.vpc_id -} - -data "aws_ami" "ami" { - filter { - name = "image-id" - values = [var.ami_id] - } -} - -data "aws_ec2_instance_type_offerings" "instance" { - filter { - name = "instance-type" - values = [local.instance_type] - } - - location_type = "availability-zone" -} - -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "zone-name" - values = data.aws_ec2_instance_type_offerings.instance.locations - } -} - -data "aws_subnets" "vpc" { - filter { - name = "availability-zone" - values = data.aws_availability_zones.available.names - } - - filter { - name = "vpc-id" - values = [var.vpc_id] - } -} - -data "aws_iam_policy_document" "target" { - statement { - resources = ["*"] - - actions = [ - "ec2:DescribeInstances", - "secretsmanager:*" - ] - } - - dynamic "statement" { - for_each = var.seal_key_names - - content { - resources = [statement.value] - - actions = [ - "kms:DescribeKey", - "kms:ListKeys", - "kms:Encrypt", - "kms:Decrypt", - "kms:GenerateDataKey" - ] - } - } -} - -data "aws_iam_policy_document" "target_instance_role" { - statement { - actions = ["sts:AssumeRole"] - - principals { - type = "Service" - identifiers = ["ec2.amazonaws.com"] - } - } -} - -data "enos_environment" "localhost" {} - -locals { - cluster_name = coalesce(var.cluster_name, random_string.cluster_name.result) - instance_type = local.instance_types[data.aws_ami.ami.architecture] - instance_types = { - "arm64" = var.instance_types["arm64"] - "x86_64" = var.instance_types["amd64"] - } - instances = toset([for idx in range(var.instance_count) : tostring(idx)]) - name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" -} - -resource "random_string" "cluster_name" { - length = 8 - lower = true - upper = false - numeric = false - special = false -} - -resource "random_string" "unique_id" { - length = 4 - lower = true - upper = false - numeric = false - special = false -} - -resource "aws_iam_role" "target_instance_role" { - name = "${local.name_prefix}-instance-role" - assume_role_policy = data.aws_iam_policy_document.target_instance_role.json -} - -resource "aws_iam_instance_profile" "target" { - name = "${local.name_prefix}-instance-profile" - role = aws_iam_role.target_instance_role.name -} - -resource "aws_iam_role_policy" "target" { - name = "${local.name_prefix}-role-policy" - role = aws_iam_role.target_instance_role.id - policy = data.aws_iam_policy_document.target.json -} - -resource "aws_security_group" "target" { - name = "${local.name_prefix}-sg" - description = "Target instance security group" - vpc_id = var.vpc_id - - # External ingress - dynamic "ingress" { - for_each = var.ports_ingress - - content { - from_port = ingress.value.port - to_port = ingress.value.port - protocol = ingress.value.protocol - cidr_blocks = flatten([ - formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), - join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), - formatlist("%s/32", var.ssh_allow_ips) - ]) - ipv6_cidr_blocks = data.aws_vpc.vpc.ipv6_cidr_block != "" ? [data.aws_vpc.vpc.ipv6_cidr_block] : null - } - } - - # Internal traffic - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - self = true - } - - # External traffic - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - ipv6_cidr_blocks = ["::/0"] - } - - tags = merge( - var.common_tags, - { - Name = "${local.name_prefix}-sg" - }, - ) -} - -resource "aws_instance" "targets" { - for_each = local.instances - - ami = var.ami_id - iam_instance_profile = aws_iam_instance_profile.target.name - // Some scenarios (autopilot, pr_replication) shutdown instances to simulate failure. In those - // cases we should terminate the instance entirely rather than get stuck in stopped limbo. - instance_initiated_shutdown_behavior = "terminate" - instance_type = local.instance_type - key_name = var.ssh_keypair - subnet_id = data.aws_subnets.vpc.ids[tonumber(each.key) % length(data.aws_subnets.vpc.ids)] - vpc_security_group_ids = [aws_security_group.target.id] - - root_block_device { - encrypted = true - } - - metadata_options { - http_tokens = "required" - http_endpoint = "enabled" - } - - tags = merge( - var.common_tags, - { - Name = "${local.name_prefix}-${var.cluster_tag_key}-instance-target" - "${var.cluster_tag_key}" = local.cluster_name - }, - ) -} - -module "disable_selinux" { - depends_on = [aws_instance.targets] - source = "../disable_selinux" - count = var.disable_selinux == true ? 1 : 0 - - hosts = local.hosts -} diff --git a/enos/modules/target_ec2_instances/outputs.tf b/enos/modules/target_ec2_instances/outputs.tf deleted file mode 100644 index 674c5cf..0000000 --- a/enos/modules/target_ec2_instances/outputs.tf +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -output "cluster_name" { - value = local.cluster_name -} - -output "hosts" { - description = "The ec2 instance target hosts" - value = local.hosts -} diff --git a/enos/modules/target_ec2_instances/variables.tf b/enos/modules/target_ec2_instances/variables.tf deleted file mode 100644 index 9718f2f..0000000 --- a/enos/modules/target_ec2_instances/variables.tf +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -variable "ami_id" { - description = "The machine image identifier" - type = string -} - -variable "cluster_name" { - type = string - description = "A unique cluster identifier" - default = null -} - -variable "cluster_tag_key" { - type = string - description = "The key name for the cluster tag" - default = "TargetCluster" -} - -variable "common_tags" { - description = "Common tags for cloud resources" - type = map(string) - default = { "Project" : "vault-ci" } -} - -variable "ports_ingress" { - description = "Ports mappings to allow for ingress" - type = list(object({ - description = string - port = number - protocol = string - })) -} - -variable "disable_selinux" { - description = "Optionally disable SELinux for certain distros/versions" - type = bool - default = true -} - -variable "instance_count" { - description = "The number of target instances to create" - type = number - default = 3 -} - -variable "instance_types" { - description = "The instance types to use depending on architecture" - type = object({ - amd64 = string - arm64 = string - }) - default = { - amd64 = "t3a.medium" - arm64 = "t4g.medium" - } -} - -variable "project_name" { - description = "A unique project name" - type = string -} - -variable "seal_key_names" { - type = list(string) - description = "The key management seal key names" - default = [] -} - -variable "ssh_allow_ips" { - description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" - type = list(string) - default = [] -} - -variable "ssh_keypair" { - description = "SSH keypair used to connect to EC2 instances" - type = string -} - -variable "vpc_id" { - description = "The identifier of the VPC where the target instances will be created" - type = string -} From 142963a68a203bb1ca969b156d31dfa551f2e66d Mon Sep 17 00:00:00 2001 From: Hamza Shili <98858609+HamzaShili65@users.noreply.github.com> Date: Fri, 1 Aug 2025 15:38:44 -0700 Subject: [PATCH 07/26] Vault 37081 plugin quality add scenario for Vault cluster setup, OpenLDAP server setup, and plugin configuration (#174) * add environment setup and teardown srcipts and make targets for ldap server * add terraform module for ldap server setup * add terraform module for building, registering, enabling, and configuring the plugin * add terraform module for bootstrapping vault cluster * add enos modules * add enos descriptions * add ingress for ldap server and machine os and arch outputs * add smoke scenario for openldap * ignore the .enos dir * fix formatting error * removed copied modules from vault repo * add remote references to tf modules borrowed from vault * add variables for ldap ports and reference to remote module * clean configure plugin module * replace hardcoded variables with tf vars * change name for LDAP_VERSION to IMAGE_TAG and remove PLUGIN_DEST_DIR * remove unnecessary sudos * remove PLUGIN_DEST_DIR * renmae LDAP_VERSION to IMAGE TAG * refactor out plugin setup module from plugin configure * fmt * remove consul storage backend related setup * add cluster tag for ldap server target * add module that builds plugin binary and bundles it from local branch * wip: build_ldap step currently only supports local builds * replace file copying logic with enos_bundle_install resource as it supports local build, releases, and artifactory * wip: introduce build_ldap step * change license to MPL-2.0 on scripts * add build_ldap description * fmt * change license to MPL-2.0 on scripts * make the ref configurable for all external tf modules * remove references to unused modules * remove unused qualities * remove consul variable * remove unused descriptions * add suport for building ldap from artifactory and releases * reference target_ec2_instances module from vault * change artifact path in setup_plugin to be nonull only for local builds * use same ldap image tag for mkae targets and enos * update go.sum * remove unused variables and update enos.vars.hcl with template enos vars setup for developer * result of mod tidy * change lease to MPL-2.0 --------- Co-authored-by: Hamza ElMokhtar Shili --- .gitignore | 1 + Makefile | 73 ++- bootstrap/ldif/seed.ldif | 41 ++ bootstrap/setup-docker.sh | 55 ++ bootstrap/setup-openldap.sh | 55 ++ bootstrap/teardown-env.sh | 33 + enos/enos-descriptions.hcl | 119 ++++ enos/enos-dev-variables.hcl | 9 +- enos/enos-dynamic-config.hcl | 2 +- enos/enos-globals.hcl | 81 +-- enos/enos-modules.hcl | 339 ++-------- enos/enos-providers.hcl | 2 +- enos/enos-qualities.hcl | 459 +------------- enos/enos-scenario-openldap.hcl | 581 ++++++++++++++++++ enos/enos-terraform.hcl | 2 +- enos/enos-variables.hcl | 145 +++-- enos/enos.vars.hcl | 48 +- enos/modules/backend_servers_setup/main.tf | 69 +++ enos/modules/backend_servers_setup/outputs.tf | 12 + .../backend_servers_setup/variables.tf | 23 + .../build_artifactory_artifact/main.tf | 109 ++++ enos/modules/build_local/main.tf | 58 ++ .../build_local/scripts/plugin-build.sh | 55 ++ enos/modules/build_releases/main.tf | 31 + enos/modules/configure_plugin/ldap/main.tf | 30 + .../ldap/scripts/plugin-configure.sh | 39 ++ .../configure_plugin/ldap/variables.tf | 48 ++ enos/modules/ec2_bootstrap_tools/main.tf | 90 +++ .../scripts/create-plugin-dir.sh | 10 + .../scripts/install-shasum.sh | 49 ++ .../scripts/vault-unseal.sh | 22 + enos/modules/ec2_bootstrap_tools/variables.tf | 32 + enos/modules/setup_plugin/main.tf | 61 ++ .../setup_plugin/scripts/plugin-enable.sh | 29 + .../setup_plugin/scripts/plugin-register.sh | 60 ++ enos/modules/setup_plugin/variables.tf | 77 +++ 36 files changed, 2080 insertions(+), 869 deletions(-) create mode 100644 bootstrap/ldif/seed.ldif create mode 100755 bootstrap/setup-docker.sh create mode 100755 bootstrap/setup-openldap.sh create mode 100755 bootstrap/teardown-env.sh create mode 100644 enos/enos-descriptions.hcl create mode 100644 enos/enos-scenario-openldap.hcl create mode 100644 enos/modules/backend_servers_setup/main.tf create mode 100644 enos/modules/backend_servers_setup/outputs.tf create mode 100644 enos/modules/backend_servers_setup/variables.tf create mode 100644 enos/modules/build_artifactory_artifact/main.tf create mode 100644 enos/modules/build_local/main.tf create mode 100755 enos/modules/build_local/scripts/plugin-build.sh create mode 100644 enos/modules/build_releases/main.tf create mode 100644 enos/modules/configure_plugin/ldap/main.tf create mode 100755 enos/modules/configure_plugin/ldap/scripts/plugin-configure.sh create mode 100644 enos/modules/configure_plugin/ldap/variables.tf create mode 100644 enos/modules/ec2_bootstrap_tools/main.tf create mode 100644 enos/modules/ec2_bootstrap_tools/scripts/create-plugin-dir.sh create mode 100644 enos/modules/ec2_bootstrap_tools/scripts/install-shasum.sh create mode 100644 enos/modules/ec2_bootstrap_tools/scripts/vault-unseal.sh create mode 100644 enos/modules/ec2_bootstrap_tools/variables.tf create mode 100644 enos/modules/setup_plugin/main.tf create mode 100755 enos/modules/setup_plugin/scripts/plugin-enable.sh create mode 100755 enos/modules/setup_plugin/scripts/plugin-register.sh create mode 100644 enos/modules/setup_plugin/variables.tf diff --git a/.gitignore b/.gitignore index 40ea26c..91331c3 100644 --- a/.gitignore +++ b/.gitignore @@ -83,4 +83,5 @@ scripts/custom.sh # enos /enos/.enos/* +/enos/enos.vars.hcl diff --git a/Makefile b/Makefile index 0a2d43d..e8989c7 100644 --- a/Makefile +++ b/Makefile @@ -7,9 +7,47 @@ ifndef $(GOPATH) GOPATH=$(shell go env GOPATH) export GOPATH endif -PLUGIN_DIR ?= $$GOPATH/vault-plugins +PLUGIN_DIR ?= $(GOPATH)/vault-plugins PLUGIN_PATH ?= local-secrets-ldap +# env vars + +#setup ldap server: +LDAP_DOMAIN ?= example.com +LDAP_ORG ?= example +LDAP_ADMIN_PW ?= adminpassword +IMAGE_TAG ?= 1.5.0 +LDAP_PORT ?= 389 +LDIF_PATH ?= $(PWD)/bootstrap/ldif/seed.ldif + +#configure ldap plugin +MAKEFILE_DIR ?= $(PWD) +PLUGIN_SOURCE_TYPE ?= local_build +PLUGIN_DIR_VAULT ?= /etc/vault/plugins +LDAP_URL ?= ldap://127.0.0.1:389 +LDAP_BIND_DN ?= cn=admin,dc=example,dc=com +LDAP_BIND_PASS ?= adminpassword +LDAP_USER_DN ?= ou=users,dc=example,dc=com +LDAP_SCHEMA ?= openldap + +export LDAP_DOMAIN +export LDAP_ORG +export LDAP_ADMIN_PW +export IMAGE_TAG +export LDAP_PORT +export PLUGIN_DIR +export PLUGIN_NAME +export PLUGIN_PATH +export PLUGIN_SOURCE_TYPE +export MAKEFILE_DIR +export PLUGIN_DIR_VAULT +export LDAP_URL +export LDAP_BIND_DN +export LDAP_BIND_PASS +export LDAP_USER_DN +export LDAP_SCHEMA +export LDIF_PATH + .PHONY: default default: dev @@ -48,8 +86,31 @@ fmtcheck: fmt: gofumpt -l -w . -configure: dev - ./bootstrap/configure.sh \ - $(PLUGIN_DIR) \ - $(PLUGIN_NAME) \ - $(PLUGIN_PATH) +.PHONY: setup-env +setup-env: + cd bootstrap && ./setup-docker.sh + cd bootstrap && ./setup-openldap.sh + +.PHONY: plugin-build +plugin-build: + cd enos/modules/build_local && ./scripts/plugin-build.sh + +.PHONY: plugin-register +plugin-register: + cd enos/modules/setup_plugin && \ + PLUGIN_BINARY_SRC="$(PLUGIN_DIR)/$(PLUGIN_NAME)" ./scripts/plugin-register.sh + +.PHONY: plugin-enable +plugin-enable: + cd enos/modules/setup_plugin && ./scripts/plugin-enable.sh + +.PHONY: plugin-configure +plugin-configure: + cd enos/modules/configure_plugin/ldap && ./scripts/plugin-configure.sh + +.PHONY: configure +configure: plugin-build plugin-register plugin-enable plugin-configure + +.PHONY: teardown-env +teardown-env: + cd bootstrap && ./teardown-env.sh diff --git a/bootstrap/ldif/seed.ldif b/bootstrap/ldif/seed.ldif new file mode 100644 index 0000000..ec7371f --- /dev/null +++ b/bootstrap/ldif/seed.ldif @@ -0,0 +1,41 @@ +# Define Organizational Units +dn: ou=groups,dc=example,dc=com +objectClass: organizationalUnit +ou: groups + +dn: ou=users,dc=example,dc=com +objectClass: organizationalUnit +ou: users + +dn: cn=dev,ou=groups,dc=example,dc=com +objectClass: groupOfUniqueNames +cn: dev +uniqueMember: cn=staticuser,ou=users,dc=example,dc=com +uniqueMember: cn=bob.johnson,ou=users,dc=example,dc=com +uniqueMember: cn=mary.smith,ou=users,dc=example,dc=com +description: Development group + +# Add users for static role rotation +dn: uid=staticuser,ou=users,dc=example,dc=com +objectClass: inetOrgPerson +cn: staticuser +sn: staticuser +uid: staticuser +memberOf: cn=dev,ou=groups,dc=example,dc=com +userPassword: defaultpassword + +dn: uid=bob.johnson,ou=users,dc=example,dc=com +objectClass: inetOrgPerson +cn: bob.johnson +sn: bob.johnson +uid: bob.johnson +memberOf: cn=dev,ou=groups,dc=example,dc=com +userPassword: defaultpassword + +dn: uid=mary.smith,ou=users,dc=example,dc=com +objectClass: inetOrgPerson +cn: mary.smith +sn: mary.smith +uid: mary.smith +memberOf: cn=dev,ou=groups,dc=example,dc=com +userPassword: defaultpassword \ No newline at end of file diff --git a/bootstrap/setup-docker.sh b/bootstrap/setup-docker.sh new file mode 100755 index 0000000..90e1954 --- /dev/null +++ b/bootstrap/setup-docker.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +# Function to check if Docker is already installed +check_docker_installed() { + if command -v docker &> /dev/null; then + echo "Docker is already installed: $(docker --version)" + exit 0 + fi +} + +# Function to detect the OS +detect_os() { + if [ -f /etc/os-release ]; then + # shellcheck disable=SC1091 + . /etc/os-release + echo "$ID" + else + echo "Unknown OS: /etc/os-release not found" + fi +} + +# Main logic +check_docker_installed + +os_id=$(detect_os) +echo "Installing Docker for: ${os_id}" +case "$os_id" in + amzn) + sudo dnf upgrade --refresh -y + sudo dnf install -y docker + ;; + ubuntu) + sudo apt update -y + sudo apt install apt-transport-https ca-certificates curl software-properties-common -y + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt update + sudo apt install docker-ce docker-ce-cli containerd.io -y + ;; + rhel | centos) + sudo yum update -y + sudo yum install -y docker + ;; + *) + echo "Unsupported or unknown OS: $os_id" + exit 1 + ;; +esac + +echo "Successfully installed Docker." +sudo docker --version \ No newline at end of file diff --git a/bootstrap/setup-openldap.sh b/bootstrap/setup-openldap.sh new file mode 100755 index 0000000..35e16ce --- /dev/null +++ b/bootstrap/setup-openldap.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$LDAP_DOMAIN" ]] && fail "LDAP_DOMAIN env variable has not been set" +[[ -z "$LDAP_ORG" ]] && fail "LDAP_ORG env variable has not been set" +[[ -z "$LDAP_ADMIN_PW" ]] && fail "LDAP_ADMIN_PW env variable has not been set" +[[ -z "$IMAGE_TAG" ]] && fail "IMAGE_TAG env variable has not been set" +[[ -z "$LDAP_PORT" ]] && fail "LDAP_PORT env variable has not been set" +[[ -z "$LDIF_PATH" ]] && fail "LDIF_PATH env variable has not been set" + +LDAP_HOSTNAME="${LDAP_HOSTNAME:-openldap}" + +# Pulling image +echo "Pulling image: ${LDAP_DOCKER_NAME}" +LDAP_DOCKER_NAME="docker.io/osixia/openldap:${IMAGE_TAG}" +docker pull "${LDAP_DOCKER_NAME}" + +# Run OpenLDAP container +echo "Starting OpenLDAP container..." +docker run -d \ + --name openldap \ + --hostname "${LDAP_HOSTNAME}" \ + -p "${LDAP_PORT}:${LDAP_PORT}" \ + -p 1636:636 \ + -e LDAP_ORGANISATION="${LDAP_ORG}" \ + -e LDAP_DOMAIN="${LDAP_DOMAIN}" \ + -e LDAP_ADMIN_PASSWORD="${LDAP_ADMIN_PW}" \ + "${LDAP_DOCKER_NAME}" + +echo "OpenLDAP server is now running in Docker!" + +# Wait for the container to be up and running +echo "Waiting for OpenLDAP to start..." +sleep 5 + +# Check container status +status=$(docker ps --filter name=openldap --format "{{.Status}}") +if [[ -n "$status" ]]; then + echo "OpenLDAP container is running. Status: $status" +else + echo "OpenLDAP container is NOT running!" + echo "Check logs with: docker logs openldap" + exit 1 +fi + +# Run ldapadd inside the container +docker exec -i openldap ldapadd -x -w "${LDAP_ADMIN_PW}" -D "cn=admin,dc=${LDAP_DOMAIN//./,dc=}" -f /dev/stdin < "${LDIF_PATH}" \ No newline at end of file diff --git a/bootstrap/teardown-env.sh b/bootstrap/teardown-env.sh new file mode 100755 index 0000000..ac73f86 --- /dev/null +++ b/bootstrap/teardown-env.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_NAME" ]] && fail "PLUGIN_NAME env variable has not been set" +[[ -z "$PLUGIN_DIR" ]] && fail "PLUGIN_DIR env variable has not been set" + +MAKEFILE_DIR="${MAKEFILE_DIR:-$(pwd)}" +PROJECT_BIN_DIR="${MAKEFILE_DIR}/bin" + +echo "[teardown] Stopping and removing openldap docker container if it exists..." +docker rm -f openldap 2>/dev/null || echo "[teardown] No openldap container found." + +# Remove from bin directory +if [ -f "${PROJECT_BIN_DIR}/${PLUGIN_NAME}" ]; then + echo "[teardown] Removing existing plugin at ${PROJECT_BIN_DIR}/${PLUGIN_NAME}" + rm -f "${PROJECT_BIN_DIR}/${PLUGIN_NAME}" +fi + +# Remove from destination directory +if [ -f "${PLUGIN_DIR}/${PLUGIN_NAME}" ]; then + echo "[teardown] Removing existing plugin at ${PLUGIN_DIR}/${PLUGIN_NAME}" + rm -f "${PLUGIN_DIR}/${PLUGIN_NAME}" +fi + +echo "[teardown] Teardown complete." \ No newline at end of file diff --git a/enos/enos-descriptions.hcl b/enos/enos-descriptions.hcl new file mode 100644 index 0000000..a3cbd07 --- /dev/null +++ b/enos/enos-descriptions.hcl @@ -0,0 +1,119 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +globals { + description = { + bootstrap_vault_cluster_targets = <<-EOF + Installs bootstrap tools (e.g. shasum) on the Vault cluster targets. + EOF + + build_vault = <<-EOF + Determine which Vault artifact we want to use for the scenario. Depending on the + 'artifact_source' variant we'll either build Vault from the local branch, fetch a candidate + build from Artifactory, or use a local artifact that was built in CI via CRT. + EOF + + build_ldap = <<-EOF + Determine which openldap plugin artifact we want to use for the scenario. Depending on the + 'artifact_source' variant we'll either build openldap secrets engine plugin from the local branch or + fetch a candidate build from Artifactory. + EOF + + configure_plugin = <<-EOF + Configure the Vault plugin. + EOF + + create_ldap_server = <<-EOF + Sets up the docker container and ldap server. + EOF + + create_ldap_server_target = <<-EOF + Create the target machines that we'll setup the LDAP server onto. + EOF + + create_seal_key = <<-EOF + Create the necessary seal key infrastructure for Vaults auto-unseal functionality. Depending + on the 'seal' variant this step will perform different actions. When using 'shamir' the step + is a no-op as we won't require an external seal mechanism. When using 'pkcs11' this step will + create a SoftHSM slot and associated token which can be distributed to all target nodes. When + using 'awskms' a new AWSKMS key will be created. The necessary security groups and policies + for Vault target nodes to access it the AWSKMS key are handled in the target modules. + EOF + + create_vault_cluster = <<-EOF + Create the the Vault cluster. In this module we'll install, configure, start, initialize and + unseal all the nodes in the Vault. After initialization it also enables various audit engines. + EOF + + create_vault_cluster_targets = <<-EOF + Create the target machines that we'll install Vault onto. We also handle creating AWS instance + profiles and security groups that allow for auto-discovery via the retry_join functionality in + Consul. The security group firewall rules will automatically allow SSH access from the host + external IP address of the machine executing Enos, in addition to all of the required ports + for Vault to function and be accessible in the VPC. + Note: Consul is not supported for plugin testing with enos. + EOF + + create_vpc = <<-EOF + Create an AWS VPC, internet gateway, default security group, and default subnet that allows + egress traffic via the internet gateway. + EOF + + ec2_info = <<-EOF + Query various endpoints in AWS Ec2 to gather metadata we'll use later in our run when creating + infrastructure for the Vault cluster. This metadata includes: + - AMI IDs for different Linux distributions and platform architectures + - Available Ec2 Regions + - Availability Zones for our desired machine instance types + EOF + + get_local_metadata = <<-EOF + Performs several Vault quality verification that are dynamically modified based on the Vault + binary version, commit SHA, build-date (commit SHA date), and edition metadata. When we're + testing existing artifacts this expected metadata is passed in via Enos variables. When we're + building a local by using the 'artifact_source:local' variant, this step executes and + populates the expected metadata with that of our branch so that we don't have to update the + Enos variables on each commit. + EOF + + get_vault_cluster_ip_addresses = <<-EOF + Map the public and private IP addresses of the Vault cluster nodes and segregate them by + their leader status. This allows us to easily determine the public IP addresses of the leader + and follower nodes. + EOF + + read_vault_license = <<-EOF + When deploying Vault Enterprise, ensure a Vault Enterprise license is present on disk and + read its contents so that we can utilize it when configuring the Vault Enterprise cluster. + Must have the 'edition' variant to be set to any Enterprise edition. + EOF + + setup_plugin = <<-EOF + Build, register, and enable the Vault plugin. + EOF + + verify_log_secrets = <<-EOF + Verify that the vault audit log and systemd journal do not leak secret values. + EOF + + verify_raft_cluster_all_nodes_are_voters = <<-EOF + When configured with a 'backend:raft' variant, verify that all nodes in the cluster are + healthy and are voters. + EOF + + verify_vault_unsealed = <<-EOF + Verify that the Vault cluster has successfully unsealed. + EOF + + verify_vault_version = <<-EOF + Verify that the Vault CLI has the correct embedded version metadata and that the Vault Cluster + verision history includes our expected version. The CLI metadata that is validated includes + the Vault version, edition, build date, and any special prerelease metadata. + EOF + + wait_for_cluster_to_have_leader = <<-EOF + Wait for a leader election to occur before we proceed with any further quality verification. + EOF + + } +} diff --git a/enos/enos-dev-variables.hcl b/enos/enos-dev-variables.hcl index ed7ab24..f50a39b 100644 --- a/enos/enos-dev-variables.hcl +++ b/enos/enos-dev-variables.hcl @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 variable "dev_build_local_ui" { type = bool @@ -12,10 +12,3 @@ variable "dev_config_mode" { description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." default = "file" // or "env" } - -variable "dev_consul_version" { - type = string - description = "The version of Consul to use when using Consul for storage!" - default = "1.18.1" - // NOTE: You can also set the "backend_edition" if you want to use Consul Enterprise -} diff --git a/enos/enos-dynamic-config.hcl b/enos/enos-dynamic-config.hcl index 15f7de6..0aabdf1 100644 --- a/enos/enos-dynamic-config.hcl +++ b/enos/enos-dynamic-config.hcl @@ -1,5 +1,5 @@ # Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 # Code generated by pipeline generate enos-dynamic-config DO NOT EDIT. diff --git a/enos/enos-globals.hcl b/enos/enos-globals.hcl index 59ec11a..0504052 100644 --- a/enos/enos-globals.hcl +++ b/enos/enos-globals.hcl @@ -1,13 +1,13 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 globals { - archs = ["amd64", "arm64"] - artifact_sources = ["local", "crt", "artifactory"] - artifact_types = ["bundle", "package"] - backends = ["consul", "raft"] - backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic")) - backend_tag_key = "VaultStorage" + archs = ["amd64", "arm64"] + artifact_sources = ["local", "crt", "artifactory"] + ldap_artifact_sources = ["local", "releases", "artifactory"] + artifact_types = ["bundle", "package"] + backends = ["raft"] + backend_tag_key = "VaultStorage" build_tags = { "ce" = ["ui"] "ent" = ["ui", "enterprise", "ent"] @@ -15,10 +15,8 @@ globals { "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] "ent.hsm.fips1403" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_3", "ent.hsm.fips1403"] } - config_modes = ["env", "file"] - consul_editions = ["ce", "ent"] - consul_versions = ["1.14.11", "1.15.7", "1.16.3", "1.17.0"] - distros = ["amzn", "leap", "rhel", "sles", "ubuntu"] + config_modes = ["env", "file"] + distros = ["amzn", "leap", "rhel", "sles", "ubuntu"] // Different distros may require different packages, or use different aliases for the same package distro_packages = { amzn = { @@ -70,6 +68,11 @@ globals { port = 22 protocol = "tcp" }, + ldap : { + description = "LDAP" + port = 389 + protocol = "tcp" + }, vault_agent : { description = "Vault Agent" port = 8100 @@ -90,61 +93,6 @@ globals { port = 8201 protocol = "tcp" }, - consul_rpc : { - description = "Consul internal communication" - port = 8300 - protocol = "tcp" - }, - consul_serf_lan_tcp : { - description = "Consul Serf LAN TCP" - port = 8301 - protocol = "tcp" - }, - consul_serf_lan_udp : { - description = "Consul Serf LAN UDP" - port = 8301 - protocol = "udp" - }, - consul_serf_wan_tcp : { - description = "Consul Serf WAN TCP" - port = 8302 - protocol = "tcp" - }, - consul_serf_wan_udp : { - description = "Consul Serf WAN UDP" - port = 8302 - protocol = "udp" - }, - consul_http : { - description = "Consul HTTP API" - port = 8500 - protocol = "tcp" - }, - consul_https : { - description = "Consul HTTPS API" - port = 8501 - protocol = "tcp" - }, - consul_grpc : { - description = "Consul gRPC API" - port = 8502 - protocol = "tcp" - }, - consul_grpc_tls : { - description = "Consul gRPC TLS API" - port = 8503 - protocol = "tcp" - }, - consul_dns_tcp : { - description = "Consul TCP DNS Server" - port = 8600 - protocol = "tcp" - }, - consul_dns_udp : { - description = "Consul UDP DNS Server" - port = 8600 - protocol = "udp" - }, } seals = ["awskms", "pkcs11", "shamir"] tags = merge({ @@ -158,5 +106,6 @@ globals { } vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) vault_tag_key = "vault-cluster" + ldap_tag_key = "ldap-server-cluster" vault_disable_mlock = false } diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index 76a0077..35a8915 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -1,158 +1,121 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 -module "autopilot_upgrade_storageconfig" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/autopilot_upgrade_storageconfig?ref=main" +module "backend_raft" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/backend_raft?ref=${var.vault_repo_ref}" } -module "backend_consul" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/backend_consul?ref=main" - - license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path)) - log_level = var.backend_log_level +// Bootstrap Vault cluster targets +module "bootstrap_vault_cluster_targets" { + source = "./modules/ec2_bootstrap_tools" } -module "backend_raft" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/backend_raft?ref=main" +// Find any artifact in Artifactory. Requires the version, revision, and edition. +module "build_vault_artifactory" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_artifactory_artifact?ref=${var.vault_repo_ref}" } // Find any artifact in Artifactory. Requires the version, revision, and edition. -module "build_artifactory" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_artifactory_artifact?ref=main" +module "build_ldap_artifactory" { + source = "./modules/build_artifactory_artifact" } // Find any released RPM or Deb in Artifactory. Requires the version, edition, distro, and distro // version. -module "build_artifactory_package" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_artifactory_package?ref=main" +module "build_vault_artifactory_package" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_artifactory_package?ref=${var.vault_repo_ref}" +} + +// A shim "build module" suitable for use when using locally pre-built artifacts or a zip bundle +// from releases.hashicorp.com. When using a local pre-built artifact it requires the local +// artifact path. When using a release zip it does nothing as you'll need to configure the +// vault_cluster module with release info instead. +module "build_vault_crt" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_crt?ref=${var.vault_repo_ref}" } // A shim "build module" suitable for use when using locally pre-built artifacts or a zip bundle // from releases.hashicorp.com. When using a local pre-built artifact it requires the local // artifact path. When using a release zip it does nothing as you'll need to configure the // vault_cluster module with release info instead. -module "build_crt" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_crt?ref=main" +module "build_ldap_releases" { + source = "./modules/build_releases" } // Build the local branch and package it into a zip artifact. Requires the goarch, goos, build tags, // and bundle path. -module "build_local" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/build_local?ref=main" +module "build_ldap_local" { + source = "./modules/build_local" +} + +// Configure the Vault plugin +module "configure_plugin" { + source = "./modules/configure_plugin/ldap" +} + +// Setup Docker and OpenLDAP on backend server with seed data +module "create_backend_server" { + source = "./modules/backend_servers_setup" } module "create_vpc" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/create_vpc?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/create_vpc?ref=${var.vault_repo_ref}" environment = "ci" common_tags = var.tags } -module "choose_follower_host" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/choose_follower_host?ref=main" -} - module "ec2_info" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/ec2_info?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/ec2_info?ref=${var.vault_repo_ref}" } module "get_local_metadata" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/get_local_metadata?ref=main" -} - -module "generate_dr_operation_token" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/generate_dr_operation_token?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "generate_failover_secondary_token" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/generate_failover_secondary_token?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "generate_secondary_public_key" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/generate_secondary_public_key?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "generate_secondary_token" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/generate_secondary_token?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "install_packages" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/install_packages?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/get_local_metadata?ref=${var.vault_repo_ref}" } module "read_license" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/read_license?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/read_license?ref=${var.vault_repo_ref}" } module "replication_data" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/replication_data?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/replication_data?ref=${var.vault_repo_ref}" } module "restart_vault" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/restart_vault?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/restart_vault?ref=${var.vault_repo_ref}" vault_install_dir = var.vault_install_dir } module "seal_awskms" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_awskms?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_awskms?ref=${var.vault_repo_ref}" cluster_ssh_keypair = var.aws_ssh_keypair_name common_tags = var.tags } module "seal_shamir" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_shamir?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_shamir?ref=${var.vault_repo_ref}" cluster_ssh_keypair = var.aws_ssh_keypair_name common_tags = var.tags } module "seal_pkcs11" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_pkcs11?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_pkcs11?ref=${var.vault_repo_ref}" cluster_ssh_keypair = var.aws_ssh_keypair_name common_tags = var.tags } -module "shutdown_node" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/shutdown_node?ref=main" -} - -module "shutdown_multiple_nodes" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/shutdown_multiple_nodes?ref=main" -} - -module "start_vault" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/start_vault?ref=main" - - install_dir = var.vault_install_dir - log_level = var.vault_log_level -} - -module "stop_vault" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/stop_vault?ref=main" +// Register, and enable the Vault plugin +module "setup_plugin" { + source = "./modules/setup_plugin" } -// create target instances using ec2:CreateFleet -module "target_ec2_fleet" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/target_ec2_fleet?ref=main" - - common_tags = var.tags - project_name = var.project_name - ssh_keypair = var.aws_ssh_keypair_name -} // create target instances using ec2:RunInstances module "target_ec2_instances" { - source = "git::https://github.com/hashicorp/vault.git//enos/target_ec2_instances?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/target_ec2_instances?ref=${var.vault_repo_ref}" common_tags = var.tags ports_ingress = values(global.ports) @@ -162,7 +125,7 @@ module "target_ec2_instances" { // don't create instances but satisfy the module interface module "target_ec2_shim" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/target_ec2_shim?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/target_ec2_shim?ref=${var.vault_repo_ref}" common_tags = var.tags ports_ingress = values(global.ports) @@ -170,230 +133,42 @@ module "target_ec2_shim" { ssh_keypair = var.aws_ssh_keypair_name } -// create target instances using ec2:RequestSpotFleet -module "target_ec2_spot_fleet" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/target_ec2_spot_fleet?ref=main" - - common_tags = var.tags - project_name = var.project_name - ssh_keypair = var.aws_ssh_keypair_name -} - -module "vault_agent" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_agent?ref=main" - - vault_install_dir = var.vault_install_dir - vault_agent_port = global.ports["vault_agent"]["port"] -} - -module "vault_proxy" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_proxy?ref=main" - - vault_install_dir = var.vault_install_dir - vault_proxy_port = global.ports["vault_proxy"]["port"] -} - -module "vault_verify_agent_output" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_agent_output?ref=main" -} - module "vault_cluster" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_cluster?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_cluster?ref=${var.vault_repo_ref}" install_dir = var.vault_install_dir - consul_license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path)) + consul_license = null cluster_tag_key = global.vault_tag_key log_level = var.vault_log_level } module "vault_get_cluster_ips" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_get_cluster_ips?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_failover_demote_dr_primary" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_failover_demote_dr_primary?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_failover_promote_dr_secondary" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_failover_promote_dr_secondary?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_failover_update_dr_primary" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_failover_update_dr_primary?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_raft_remove_node_and_verify" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_raft_remove_node_and_verify?ref=main" - vault_install_dir = var.vault_install_dir -} - -module "vault_raft_remove_peer" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_raft_remove_peer?ref=main" - vault_install_dir = var.vault_install_dir -} - -module "vault_setup_dr_primary" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_setup_dr_primary?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_get_cluster_ips?ref=${var.vault_repo_ref}" vault_install_dir = var.vault_install_dir } -module "vault_setup_perf_primary" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_setup_perf_primary?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_setup_replication_secondary" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_setup_replication_secondary?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_step_down" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_step_down?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_test_ui" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_test_ui?ref=main" - - ui_run_tests = var.ui_run_tests -} - -module "vault_unseal_replication_followers" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_unseal_replication_followers?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_upgrade" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_upgrade?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_verify_autopilot" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_autopilot?ref=main" - - vault_autopilot_upgrade_status = "await-server-removal" - vault_install_dir = var.vault_install_dir -} - -module "vault_verify_dr_replication" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_dr_replication?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_verify_removed_node" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_removed_node?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_verify_removed_node_shim" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_removed_node_shim?ref=main" - vault_install_dir = var.vault_install_dir -} - -module "vault_verify_secrets_engines_create" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/verify_secrets_engines/modules/create?ref=main" - - create_aws_secrets_engine = var.verify_aws_secrets_engine - vault_install_dir = var.vault_install_dir -} - -module "vault_verify_secrets_engines_read" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/verify_secrets_engines/modules/read?ref=main" - - verify_aws_secrets_engine = var.verify_aws_secrets_engine - vault_install_dir = var.vault_install_dir -} - -module "vault_verify_default_lcq" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_default_lcq?ref=main" - - vault_autopilot_default_max_leases = "300000" -} - -module "vault_verify_performance_replication" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_performance_replication?ref=main" +module "vault_wait_for_cluster_unsealed" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_wait_for_cluster_unsealed?ref=${var.vault_repo_ref}" vault_install_dir = var.vault_install_dir } module "vault_verify_raft_auto_join_voter" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_raft_auto_join_voter?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_raft_auto_join_voter?ref=${var.vault_repo_ref}" vault_install_dir = var.vault_install_dir vault_cluster_addr_port = global.ports["vault_cluster"]["port"] } -module "vault_verify_replication" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_replication?ref=main" -} - -module "vault_verify_ui" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_ui?ref=main" -} - -module "vault_verify_undo_logs" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_undo_logs?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_wait_for_cluster_unsealed" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_wait_for_cluster_unsealed?ref=main" - - vault_install_dir = var.vault_install_dir -} - module "vault_verify_version" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_version?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_version?ref=${var.vault_repo_ref}" vault_install_dir = var.vault_install_dir } module "vault_wait_for_leader" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_wait_for_leader?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_wait_for_seal_rewrap" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_wait_for_seal_rewrap?ref=main" + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_wait_for_leader?ref=${var.vault_repo_ref}" vault_install_dir = var.vault_install_dir } - -module "verify_log_secrets" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/verify_log_secrets?ref=main" - - radar_license_path = var.vault_radar_license_path != null ? abspath(var.vault_radar_license_path) : null -} - -module "verify_seal_type" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/verify_seal_type?ref=main" - - vault_install_dir = var.vault_install_dir -} - -module "vault_verify_billing_start_date" { - source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_billing_start_date" - - vault_install_dir = var.vault_install_dir - vault_instance_count = var.vault_instance_count - vault_cluster_addr_port = global.ports["vault_cluster"]["port"] -} - diff --git a/enos/enos-providers.hcl b/enos/enos-providers.hcl index 89c79bd..ab745b4 100644 --- a/enos/enos-providers.hcl +++ b/enos/enos-providers.hcl @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 provider "aws" "default" { region = var.aws_region diff --git a/enos/enos-qualities.hcl b/enos/enos-qualities.hcl index 59fcdd9..6731a6b 100644 --- a/enos/enos-qualities.hcl +++ b/enos/enos-qualities.hcl @@ -1,151 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -quality "consul_api_agent_host_read" { - description = "The /v1/agent/host Consul API returns host info for each node in the cluster" -} - -quality "consul_api_health_node_read" { - description = <<-EOF - The /v1/health/node/ Consul API returns health info for each node in the cluster - EOF -} - -quality "consul_api_operator_raft_config_read" { - description = "The /v1/operator/raft/configuration Consul API returns raft info for the cluster" -} - -quality "consul_autojoin_aws" { - description = "The Consul cluster auto-joins with AWS tag discovery" -} - -quality "consul_cli_validate" { - description = "The 'consul validate' command validates the Consul configuration" -} - -quality "consul_config_file" { - description = "Consul starts when configured with a configuration file" -} - -quality "consul_ha_leader_election" { - description = "The Consul cluster elects a leader node on start up" -} - -quality "consul_health_state_passing_read_nodes_minimum" { - description = <<-EOF - The Consul cluster meets the minimum of number of healthy nodes according to the - /v1/health/state/passing Consul API - EOF -} - -quality "consul_operator_raft_configuration_read_voters_minimum" { - description = <<-EOF - The Consul cluster meets the minimum number of raft voters according to the - /v1/operator/raft/configuration Consul API - EOF -} - -quality "consul_service_start_client" { - description = "The Consul service starts in client mode" -} - -quality "consul_service_start_server" { - description = "The Consul service starts in server mode" -} - -quality "consul_service_systemd_notified" { - description = "The Consul binary notifies systemd when the service is active" -} - -quality "consul_service_systemd_unit" { - description = "The 'consul.service' systemd unit starts the service" -} - -quality "vault_agent_auto_auth_approle" { - description = <<-EOF - Vault running in Agent mode utilizes the approle auth method to do auto-auth via a role and - read secrets from a file source - EOF -} - -quality "vault_agent_log_template" { - description = global.description.verify_agent_output -} - -quality "vault_api_auth_userpass_login_write" { - description = "The v1/auth/userpass/login/ Vault API creates a token for a user" -} - -quality "vault_api_auth_userpass_user_write" { - description = "The v1/auth/userpass/users/ Vault API associates a policy with a user" -} - -quality "vault_api_identity_entity_read" { - description = <<-EOF - The v1/identity/entity Vault API returns an identity entity, has the correct metadata, and is - associated with the expected entity-alias, groups, and policies - EOF -} - -quality "vault_api_identity_entity_write" { - description = "The v1/identity/entity Vault API creates an identity entity" -} - -quality "vault_api_identity_entity_alias_write" { - description = "The v1/identity/entity-alias Vault API creates an identity entity alias" -} - -quality "vault_api_identity_group_write" { - description = "The v1/identity/group/ Vault API creates an identity group" -} - -quality "vault_api_identity_oidc_config_read" { - description = <<-EOF - The v1/identity/oidc/config Vault API returns the built-in identity secrets engine configuration - EOF -} - -quality "vault_api_identity_oidc_config_write" { - description = "The v1/identity/oidc/config Vault API configures the built-in identity secrets engine" -} - -quality "vault_api_identity_oidc_introspect_write" { - description = "The v1/identity/oidc/introspect Vault API creates introspect verifies the active state of a signed OIDC token" -} - -quality "vault_api_identity_oidc_key_read" { - description = <<-EOF - The v1/identity/oidc/key Vault API returns the OIDC signing key and verifies the key's algorithm, - rotation_period, and verification_ttl are correct - EOF -} - -quality "vault_api_identity_oidc_key_write" { - description = "The v1/identity/oidc/key Vault API creates an OIDC signing key" -} - -quality "vault_api_identity_oidc_key_rotate_write" { - description = "The v1/identity/oidc/key//rotate Vault API rotates an OIDC signing key and applies a new verification TTL" -} - -quality "vault_api_identity_oidc_role_read" { - description = <<-EOF - The v1/identity/oidc/role Vault API returns the OIDC role and verifies that the roles key and - ttl are corect. - EOF -} - -quality "vault_api_identity_oidc_role_write" { - description = "The v1/identity/oidc/role Vault API creates an OIDC role associated with a key and clients" -} - -quality "vault_api_identity_oidc_token_read" { - description = "The v1/identity/oidc/token Vault API creates an OIDC token associated with a role" -} - -quality "vault_api_sys_auth_userpass_user_write" { - description = "The v1/sys/auth/userpass/users/ Vault API associates a superuser policy with a user" -} +// SPDX-License-Identifier: MPL-2.0 quality "vault_api_sys_config_read" { description = <<-EOF @@ -173,130 +27,6 @@ quality "vault_api_sys_leader_read" { description = "The v1/sys/leader Vault API returns the cluster leader info" } -quality "vault_api_sys_metrics_vault_core_replication_write_undo_logs_enabled" { - description = <<-EOF - The v1/sys/metrics Vault API returns metrics and verifies that - 'Gauges[vault.core.replication.write_undo_logs]' is enabled - EOF -} - -quality "vault_api_sys_policy_write" { - description = "The v1/sys/policy Vault API writes a policy" -} - -quality "vault_api_sys_quotas_lease_count_read_max_leases_default" { - description = <<-EOF - The v1/sys/quotas/lease-count/default Vault API returns the lease 'count' and 'max_leases' is - set to 300,000 - EOF -} - -quality "vault_api_sys_replication_dr_primary_enable_write" { - description = <<-EOF - The v1/sys/replication/dr/primary/enable Vault API enables DR replication - EOF -} - -quality "vault_api_sys_replication_dr_primary_secondary_token_write" { - description = <<-EOF - The v1/sys/replication/dr/primary/secondary-token Vault API configures the DR replication - secondary token - EOF -} - -quality "vault_api_sys_replication_dr_secondary_enable_write" { - description = <<-EOF - The v1/sys/replication/dr/secondary/enable Vault API enables DR replication - EOF -} - -quality "vault_api_sys_replication_dr_read_connection_status_connected" { - description = <<-EOF - The v1/sys/replication/dr/status Vault API returns status info and the - 'connection_status' is correct for the given node - EOF -} - -quality "vault_api_sys_replication_dr_status_known_primary_cluster_addrs" { - description = <<-EOF - The v1/sys/replication/dr/status Vault API returns the DR replication status and - 'known_primary_cluster_address' is the expected primary cluster leader - EOF -} - -quality "vault_api_sys_replication_dr_status_read" { - description = <<-EOF - The v1/sys/replication/dr/status Vault API returns the DR replication status - EOF -} - -quality "vault_api_sys_replication_dr_status_read_cluster_address" { - description = <<-EOF - The v1/sys/replication/dr/status Vault API returns the DR replication status - and the '{primaries,secondaries}[*].cluster_address' is correct for the given node - EOF -} - -quality "vault_api_sys_replication_dr_status_read_state_not_idle" { - description = <<-EOF - The v1/sys/replication/dr/status Vault API returns the DR replication status - and the state is not idle - EOF -} - -quality "vault_api_sys_replication_performance_primary_enable_write" { - description = <<-EOF - The v1/sys/replication/performance/primary/enable Vault API enables performance replication - EOF -} - -quality "vault_api_sys_replication_performance_primary_secondary_token_write" { - description = <<-EOF - The v1/sys/replication/performance/primary/secondary-token Vault API configures the replication - token - EOF -} - -quality "vault_api_sys_replication_performance_secondary_enable_write" { - description = <<-EOF - The v1/sys/replication/performance/secondary/enable Vault API enables performance replication - EOF -} - -quality "vault_api_sys_replication_performance_read_connection_status_connected" { - description = <<-EOF - The v1/sys/replication/performance/status Vault API returns status info and the - 'connection_status' is correct for the given node - EOF -} - -quality "vault_api_sys_replication_performance_status_known_primary_cluster_addrs" { - description = <<-EOF - The v1/sys/replication/performance/status Vault API returns the replication status and - 'known_primary_cluster_address' is the expected primary cluster leader - EOF -} - -quality "vault_api_sys_replication_performance_status_read" { - description = <<-EOF - The v1/sys/replication/performance/status Vault API returns the performance replication status - EOF -} - -quality "vault_api_sys_replication_performance_status_read_cluster_address" { - description = <<-EOF - The v1/sys/replication/performance/status Vault API returns the performance replication status - and the '{primaries,secondaries}[*].cluster_address' is correct for the given node - EOF -} - -quality "vault_api_sys_replication_performance_status_read_state_not_idle" { - description = <<-EOF - The v1/sys/replication/performance/status Vault API returns the performance replication status - and the state is not idle - EOF -} - quality "vault_api_sys_replication_status_read" { description = <<-EOF The v1/sys/replication/status Vault API returns the performance replication status of the @@ -311,29 +41,6 @@ quality "vault_api_sys_seal_status_api_read_matches_sys_health" { EOF } -quality "vault_api_sys_sealwrap_rewrap_read_entries_processed_eq_entries_succeeded_post_rewrap" { - description = global.description.verify_seal_rewrap_entries_processed_eq_entries_succeeded_post_rewrap -} - -quality "vault_api_sys_sealwrap_rewrap_read_entries_processed_gt_zero_post_rewrap" { - description = global.description.verify_seal_rewrap_entries_processed_is_gt_zero_post_rewrap -} - -quality "vault_api_sys_sealwrap_rewrap_read_is_running_false_post_rewrap" { - description = global.description.verify_seal_rewrap_is_running_false_post_rewrap -} - -quality "vault_api_sys_sealwrap_rewrap_read_no_entries_fail_during_rewrap" { - description = global.description.verify_seal_rewrap_no_entries_fail_during_rewrap -} - -quality "vault_api_sys_step_down_steps_down" { - description = <<-EOF - The v1/sys/step-down Vault API forces the cluster leader to step down and intiates a new leader - election - EOF -} - quality "vault_api_sys_storage_raft_autopilot_configuration_read" { description = <<-EOF The /sys/storage/raft/autopilot/configuration Vault API returns the autopilot configuration of @@ -348,32 +55,12 @@ quality "vault_api_sys_storage_raft_autopilot_state_read" { EOF } -quality "vault_api_sys_storage_raft_autopilot_upgrade_info_read_status_matches" { - description = <<-EOF - The v1/sys/storage/raft/autopilot/state Vault API returns the raft autopilot state and the - 'upgrade_info.status' matches our expected state - EOF -} - -quality "vault_api_sys_storage_raft_autopilot_upgrade_info_target_version_read_matches_candidate" { - description = <<-EOF - The v1/sys/storage/raft/autopilot/state Vault API returns the raft autopilot state and the - 'upgrade_info.target_version' matches the the candidate version - EOF -} - quality "vault_api_sys_storage_raft_configuration_read" { description = <<-EOF The v1/sys/storage/raft/configuration Vault API returns the raft configuration of the cluster EOF } -quality "vault_api_sys_storage_raft_remove_peer_write_removes_peer" { - description = <<-EOF - The v1/sys/storage/raft/remove-peer Vault API removes the desired node from the raft sub-system - EOF -} - quality "vault_api_sys_version_history_keys" { description = <<-EOF The v1/sys/version-history Vault API returns the cluster version history and the 'keys' data @@ -417,60 +104,20 @@ quality "vault_audit_syslog" { description = "The Vault audit sub-system is enabled with the syslog and writes to syslog" } -quality "vault_auto_unseals_after_autopilot_upgrade" { - description = "Vault auto-unseals after upgrading the cluster with autopilot" -} - -quality "vault_autojoins_new_nodes_into_initialized_cluster" { - description = "Vault sucessfully auto-joins new nodes into an existing cluster" -} - quality "vault_autojoin_aws" { description = "Vault auto-joins nodes using AWS tag discovery" } -quality "vault_autopilot_upgrade_leader_election" { - description = <<-EOF - Vault elects a new leader after upgrading the cluster with autopilot - EOF -} - -quality "vault_cli_audit_enable" { - description = "The 'vault audit enable' command enables audit devices" -} - -quality "vault_cli_auth_enable_approle" { - description = "The 'vault auth enable approle' command enables the approle auth method" -} - quality "vault_cli_operator_members" { description = "The 'vault operator members' command returns the expected list of members" } -quality "vault_cli_operator_raft_remove_peer" { - description = "The 'vault operator remove-peer' command removes the desired node" -} - -quality "vault_cli_operator_step_down" { - description = "The 'vault operator step-down' command forces the cluster leader to step down" -} - -quality "vault_cli_policy_write" { - description = "The 'vault policy write' command writes a policy" -} - quality "vault_cli_status_exit_code" { description = <<-EOF The 'vault status' command exits with the correct code depending on expected seal status EOF } -quality "vault_cluster_upgrade_in_place" { - description = <<-EOF - Vault starts with existing data and configuration in-place migrates the data - EOF -} - quality "vault_config_env_variables" { description = "Vault starts when configured primarily with environment variables" } @@ -483,13 +130,6 @@ quality "vault_config_log_level" { description = "The 'log_level' config stanza modifies its log level" } -quality "vault_config_multiseal_is_toggleable" { - description = <<-EOF - The Vault Cluster can be configured with a single unseal method regardless of the - 'enable_multiseal' config value - EOF -} - quality "vault_init" { description = "Vault initializes the cluster with the given seal parameters" } @@ -510,36 +150,6 @@ quality "vault_listener_ipv6" { description = "Vault operates on ipv6 TCP listeners" } -quality "vault_mount_auth" { - description = "Vault mounts the auth engine" -} - -quality "vault_mount_identity" { - description = "Vault mounts the identity engine" -} - -quality "vault_mount_kv" { - description = "Vault mounts the kv engine" -} - -quality "vault_multiseal_enable" { - description = <<-EOF - The Vault Cluster starts with 'enable_multiseal' and multiple auto-unseal methods. - EOF -} - -quality "vault_proxy_auto_auth_approle" { - description = <<-EOF - Vault Proxy utilizes the approle auth method to to auto auth via a roles and secrets from file. - EOF -} - -quality "vault_proxy_cli_access" { - description = <<-EOF - The Vault CLI accesses tokens through the Vault proxy without a VAULT_TOKEN available - EOF -} - quality "vault_radar_index_create" { description = "Vault radar is able to create an index from KVv2 mounts" } @@ -552,34 +162,6 @@ quality "vault_raft_voters" { description = global.description.verify_raft_cluster_all_nodes_are_voters } -quality "vault_raft_removed_after_restart" { - description = "A removed raft node will continue reporting as removed after the process is restarted" -} - -quality "vault_raft_removed_statuses" { - description = "A removed raft node reports itself as removed in the status endpoints" -} - -quality "vault_raft_removed_cant_rejoin" { - description = "A removed raft node cannot rejoin a cluster while it still has old vault/raft data" -} - -quality "vault_raft_removed_rejoin_after_deletion" { - description = "A removed raft node can rejoin a cluster if it has deleted its old vault/raft data" -} - -quality "vault_replication_ce_disabled" { - description = "Replication is not enabled for CE editions" -} - -quality "vault_replication_ent_dr_available" { - description = "DR replication is available on Enterprise" -} - -quality "vault_replication_ent_pr_available" { - description = "PR replication is available on Enterprise" -} - quality "vault_seal_awskms" { description = "Vault auto-unseals with the awskms seal" } @@ -594,23 +176,6 @@ quality "vault_seal_pkcs11" { description = "Vault auto-unseals with the pkcs11 seal" } -quality "vault_secrets_kv_read" { - description = "Vault kv secrets engine data is readable" -} - -quality "vault_secrets_kv_write" { - description = "Vault kv secrets engine data is writable" -} - - -quality "vault_secrets_ldap_write_config" { - description = "The Vault LDAP secrets engine is configured with the correct settings" -} - -quality "vault_service_restart" { - description = "Vault restarts with existing configuration" -} - quality "vault_service_start" { description = "Vault starts with the configuration" } @@ -623,28 +188,10 @@ quality "vault_service_systemd_unit" { description = "The 'vault.service' systemd unit starts the service" } -quality "vault_status_seal_type" { - description = global.description.verify_seal_type -} - -quality "vault_storage_backend_consul" { - description = "Vault operates using Consul for storage" -} - quality "vault_storage_backend_raft" { description = "Vault operates using integrated Raft storage" } -quality "vault_ui_assets" { - description = global.description.verify_ui -} - -quality "vault_ui_test" { - description = <<-EOF - The Vault Web UI test suite runs against a live Vault server with the embedded static assets - EOF -} - quality "vault_unseal_ha_leader_election" { description = "Vault performs a leader election after it is unsealed" } @@ -660,7 +207,3 @@ quality "vault_version_edition" { quality "vault_version_release" { description = "Vault's reported release version matches our expectations" } - -quality "vault_billing_start_date" { - description = "Vault's billing start date has adjusted to the latest billing year" -} diff --git a/enos/enos-scenario-openldap.hcl b/enos/enos-scenario-openldap.hcl new file mode 100644 index 0000000..69873d4 --- /dev/null +++ b/enos/enos-scenario-openldap.hcl @@ -0,0 +1,581 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +scenario "openldap" { + description = <<-EOF + The scenario deploys a Vault cluster and a test OpenLDAP server to act as the LDAP backend for integration. + It enables and configures the OpenLDAP secrets engine plugin in Vault, connecting it to the deployed LDAP server, then + performs plugin configuration and usage tests to verify correct integration and expected functionality of the secrets engine. + + This scenario validates that the Vault OpenLDAP secrets engine plugin works as expected after a fresh installation, + covering plugin setup, configuration, and end-to-end workflow testing. + //TODO: add testing for static and dynamic roles + + # How to run this scenario + + For general instructions on running a scenario, refer to the Enos docs: https://eng-handbook.hashicorp.services/internal-tools/enos/running-a-scenario/ + For troubleshooting tips and common errors, see https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/. + + Variables required for all scenario variants: + - aws_ssh_private_key_path (more info about AWS SSH keypairs: https://eng-handbook.hashicorp.services/internal-tools/enos/getting-started/#set-your-aws-key-pair-name-and-private-key) + - aws_ssh_keypair_name + - vault_build_date* + - vault_product_version + - vault_revision* + + * If you don't already know what build date and revision you should be using, see + https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/#execution-error-expected-vs-got-for-vault-versioneditionrevisionbuild-date. + + Variables required for some scenario variants: + - artifactory_token (if using `artifact_source:artifactory` in your filter) + - aws_region (if different from the default value in enos-variables.hcl) + - distro_version_ (if different from the default version for your target + distro. See supported distros and default versions in the distro_version_ + definitions in enos-variables.hcl) + - vault_artifact_path (the path to where you have a Vault artifact already downloaded, + if using `artifact_source:crt` in your filter) + - vault_license_path (if using an ENT edition of Vault) + EOF + + matrix { + arch = global.archs + artifact_source = global.artifact_sources + ldap_artifact_source = global.ldap_artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + distro = global.distros + edition = global.editions + ip_version = global.ip_versions + seal = global.seals + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + ldap_artifact_source = ["local"] + artifact_type = ["package"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1403. + exclude { + seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + // softhsm packages not available for leap/sles. + exclude { + seal = ["pkcs11"] + distro = ["leap", "sles"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ec2_user, + provider.enos.ubuntu + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + ldap_artifact_path = matrix.ldap_artifact_source != "artifactory" ? abspath(var.ldap_artifact_path) : null + enos_provider = { + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + } + + step "build_vault" { + description = global.description.build_vault + module = "build_vault_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc + + variables { + common_tags = global.tags + ip_version = matrix.ip_version + } + } + + step "read_vault_license" { + description = global.description.read_vault_license + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster" { + description = global.description.create_vault_cluster + module = module.vault_cluster + depends_on = [ + step.build_vault, + step.create_vault_cluster_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // verified in modules + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = null + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + } + } + + step "bootstrap_vault_cluster_targets" { + description = global.description.bootstrap_vault_cluster_targets + module = module.bootstrap_vault_cluster_targets + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + unseal_keys = step.create_vault_cluster.unseal_keys_b64 + threshold = step.create_vault_cluster.unseal_threshold + } + } + + step "get_local_metadata" { + description = global.description.get_local_metadata + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + // Wait for our cluster to elect a leader + step "wait_for_new_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster, + step.bootstrap_vault_cluster_targets] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + timeout = 120 // seconds + ip_version = matrix.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_leader_ip" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + + step "verify_vault_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "build_ldap" { + description = global.description.build_ldap + module = "build_ldap_${matrix.ldap_artifact_source}" + + variables { + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.ldap_artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.ldap_artifact_source == "artifactory" ? var.plugin_artifactory_repo : null + artifactory_token = matrix.ldap_artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.ldap_artifact_source == "artifactory" ? matrix.arch : null + artifact_type = matrix.ldap_artifact_source == "artifactory" ? "bundle" : null + product_version = var.ldap_plugin_version + revision = var.ldap_revision + plugin_name = var.plugin_name + makefile_dir = matrix.ldap_artifact_source == "local" ? var.makefile_dir : null + plugin_dest_dir = matrix.ldap_artifact_source == "local" ? var.plugin_dest_dir : null + } + } + + step "create_ldap_server_target" { + description = global.description.create_ldap_server_target + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.ldap_tag_key + common_tags = global.tags + vpc_id = step.create_vpc.id + instance_count = 1 + } + } + + step "create_ldap_server" { + description = global.description.create_ldap_server + module = module.create_backend_server + depends_on = [step.create_ldap_server_target] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_ldap_server_target.hosts + ldap_tag = var.ldap_tag + ldap_port = global.ports.ldap.port + } + } + + step "setup_plugin" { + description = global.description.setup_plugin + module = module.setup_plugin + depends_on = [ + step.get_leader_ip, + step.create_ldap_server, + step.verify_vault_unsealed, + step.build_ldap + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.ldap_artifact_source == "artifactory" ? step.build_ldap.ldap_artifactory_release : null + release = matrix.ldap_artifact_source == "releases" ? { version = var.ldap_plugin_version, edition = "ce" } : null + hosts = step.create_vault_cluster_targets.hosts + local_artifact_path = matrix.ldap_artifact_source == "local" ? local.ldap_artifact_path : null + + + vault_leader_ip = step.get_leader_ip.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + + plugin_name = var.plugin_name + plugin_dir_vault = var.plugin_dir_vault + plugin_mount_path = var.plugin_mount_path + } + } + + step "configure_plugin" { + description = global.description.configure_plugin + module = module.configure_plugin + depends_on = [step.setup_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_leader_ip.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + + plugin_mount_path = var.plugin_mount_path + ldap_url = step.create_ldap_server.ldap_url + ldap_bind_dn = var.ldap_bind_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_user_dn = var.ldap_user_dn + ldap_schema = var.ldap_schema + } + } + + + step "verify_raft_auto_join_voter" { + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_raft_voters + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + + step "verify_log_secrets" { + skip_step = !var.vault_enable_audit_devices || !var.verify_log_secrets + + description = global.description.verify_log_secrets + module = module.verify_log_secrets + depends_on = [ + step.verify_secrets_engines_read, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_audit_log_secrets, + quality.vault_journal_secrets, + quality.vault_radar_index_create, + quality.vault_radar_scan_file, + ] + + variables { + audit_log_file_path = step.create_vault_cluster.audit_device_file_path + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + } + } + + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-terraform.hcl b/enos/enos-terraform.hcl index a8f82f9..b1f1fc0 100644 --- a/enos/enos-terraform.hcl +++ b/enos/enos-terraform.hcl @@ -1,5 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 terraform_cli "default" { plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl index 169e9f1..985f792 100644 --- a/enos/enos-variables.hcl +++ b/enos/enos-variables.hcl @@ -1,12 +1,5 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -variable "artifactory_username" { - type = string - description = "The username to use when testing an artifact from artifactory" - default = null - sensitive = true -} +// SPDX-License-Identifier: MPL-2.0 variable "artifactory_token" { type = string @@ -45,64 +38,130 @@ variable "aws_ssh_private_key_path" { default = "./support/private_key.pem" } -variable "backend_edition" { - description = "The backend release edition if applicable" +variable "distro_version_amzn" { + description = "The version of Amazon Linux 2 to use" type = string - default = "ce" // or "ent" + default = "2023" // or "2", though pkcs11 has not been tested with 2 } -variable "backend_instance_type" { - description = "The instance type to use for the Vault backend. Must be arm64/nitro compatible" +variable "distro_version_leap" { + description = "The version of openSUSE leap to use" type = string - default = "t4g.small" + default = "15.6" } -variable "backend_license_path" { - description = "The license for the backend if applicable (Consul Enterprise)" +variable "distro_version_rhel" { + description = "The version of RHEL to use" + type = string + default = "9.5" // or "8.10" +} + +variable "distro_version_sles" { + description = "The version of SUSE SLES to use" + type = string + default = "15.6" +} + +variable "distro_version_ubuntu" { + description = "The version of ubuntu to use" + type = string + default = "24.04" // or "20.04", "22.04" +} + +variable "ldap_artifact_path" { + description = "Path to CRT generated or local vault.zip bundle" + type = string + default = "/tmp/vault-plugin-secrets-openldap.zip" +} + +variable "ldap_bind_dn" { + description = "LDAP bind DN" type = string default = null } -variable "backend_log_level" { - description = "The server log level for the backend. Supported values include 'trace', 'debug', 'info', 'warn', 'error'" +variable "ldap_bind_pass" { + description = "LDAP bind password" type = string - default = "trace" + default = null } -variable "project_name" { - description = "The description of the project" +variable "ldap_plugin_version" { + description = "LDAP plugin version to use" type = string - default = "vault-enos-integration" + default = null } -variable "distro_version_amzn" { - description = "The version of Amazon Linux 2 to use" +variable "ldap_revision" { + description = "The git sha of LDAP plugin artifact we are testing" type = string - default = "2023" // or "2", though pkcs11 has not been tested with 2 + default = null } -variable "distro_version_leap" { - description = "The version of openSUSE leap to use" +variable "ldap_schema" { + description = "LDAP schema type" type = string - default = "15.6" + default = "openldap" } -variable "distro_version_rhel" { - description = "The version of RHEL to use" +variable "ldap_tag" { + description = "LDAP image tag version" type = string - default = "9.5" // or "8.10" + default = null } -variable "distro_version_sles" { - description = "The version of SUSE SLES to use" +variable "ldap_url" { + description = "LDAP server URL" type = string - default = "15.6" + default = null } -variable "distro_version_ubuntu" { - description = "The version of ubuntu to use" +variable "ldap_user_dn" { + description = "LDAP user DN" type = string - default = "24.04" // or "20.04", "22.04" + default = null +} + +variable "makefile_dir" { + description = "Directory containing the Makefile for plugin build" + type = string + default = null +} + +variable "plugin_artifactory_repo" { + type = string + description = "The artifactory repo to search for vault plugin artifacts" + default = "hashicorp-vault-ecosystem-staging-local" +} + +variable "plugin_dest_dir" { + description = "Destination directory for the plugin binary" + type = string + default = null +} + +variable "plugin_dir_vault" { + description = "Vault server plugin directory" + type = string + default = "/etc/vault/plugins" +} + +variable "plugin_mount_path" { + description = "Mount path for the plugin in Vault" + type = string + default = null +} + +variable "plugin_name" { + description = "Name of the Vault plugin to use" + type = string + default = null +} + +variable "project_name" { + description = "The description of the project" + type = string + default = "vault-plugin-secrets-openldap-enos-integration" } variable "tags" { @@ -194,16 +253,16 @@ variable "vault_radar_license_path" { default = null } -variable "vault_revision" { - description = "The git sha of Vault artifact we are testing" +variable "vault_repo_ref" { + description = "The Git ref to use for external modules; can be pinned to a specific SHA" type = string - default = null + default = "main" } -variable "vault_upgrade_initial_version" { - description = "The Vault release to deploy before upgrading" +variable "vault_revision" { + description = "The git sha of Vault artifact we are testing" type = string - default = "1.13.13" + default = null } variable "verify_aws_secrets_engine" { diff --git a/enos/enos.vars.hcl b/enos/enos.vars.hcl index b749d90..8016ce3 100644 --- a/enos/enos.vars.hcl +++ b/enos/enos.vars.hcl @@ -20,9 +20,6 @@ // aws_ssh_private_key_path is the path to the AWS keypair private key // aws_ssh_private_key_path = "./support/private_key.pem" -// backend_license_path is the license for the backend if applicable (Consul Enterprise)". -// backend_license_path = "./support/consul.hclic" - // backend_log_level is the server log level for the backend. Supported values include 'trace', // 'debug', 'info', 'warn', 'error'" // backend_log_level = "trace" @@ -49,6 +46,51 @@ // distro_version_ubuntu is the version of ubuntu to use for "distro:ubuntu" variants // distro_version_ubuntu = "22.04" // or "20.04" +// ldap_artifact_path is the path to the LDAP plugin artifact (zip file) to be installed. +// ldap_artifact_path = "~/go/vault-plugins/vault-plugin-secrets-openldap.zip" + +// ldap_artifactory_repo is the Artifactory repository where the LDAP plugin artifact is stored. +// ldap_artifactory_repo = "hashicorp-vault-ecosystem-staging-local" + +// ldap_bind_dn is the distinguished name used to bind to the LDAP server. +// ldap_bind_dn = "cn=admin,dc=example,dc=com" + +// ldap_bind_pass is the password for the LDAP bind distinguished name. +// ldap_bind_pass = "adminpassword" + +// ldap_plugin_version is the version of the LDAP plugin being used. +// ldap_plugin_version = "0.15.0" + +// ldap_revision is the git SHA of the LDAP plugin artifact being tested. +// ldap_revision = "2ee1253cb5ff67196d0e4747e8aedd1c4903625f" + +// ldap_schema specifies the LDAP schema to use (e.g., openldap). +// ldap_schema = "openldap" + +// ldap_tag is the tag or version identifier for the LDAP plugin build. +// ldap_tag = "1.5.0" + +// ldap_user_dn is the base distinguished name under which user entries are located in LDAP. +// ldap_user_dn = "ou=users,dc=example,dc=com" + +// makefile_dir is the directory containing the Makefile for building the plugin. +// makefile_dir = "~/hashicorp/plugins/vault-plugin-secrets-openldap/" + +// plugin_dest_dir is the local directory where the plugin artifact will be stored. +// plugin_dest_dir = "~/go/vault-plugins" + +// plugin_dir_vault is the directory on the Vault server where plugins are installed. +// plugin_dir_vault = "/etc/vault/plugins" + +// plugin_mount_path is the mount path in Vault where the plugin will be enabled. +// plugin_mount_path = "local-secrets-ldap" + +// plugin_name is the name of the Vault plugin to be used for LDAP secrets. +// plugin_name = "vault-plugin-secrets-openldap" + +// plugin_source_type specifies the source type for the plugin (e.g., local_build, artifactory). +// plugin_source_type = "local_build" + // tags are a map of tags that will be applied to infrastructure resources that // support tagging. // tags = { "Project Name" : "Vault", "Something Cool" : "Value" } diff --git a/enos/modules/backend_servers_setup/main.tf b/enos/modules/backend_servers_setup/main.tf new file mode 100644 index 0000000..d7958b7 --- /dev/null +++ b/enos/modules/backend_servers_setup/main.tf @@ -0,0 +1,69 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + ldap_server = { + domain = "example.com" + org = "example" + admin_pw = "adminpassword" + tag = var.ldap_tag + port = tostring(var.ldap_port) + ip_address = var.hosts[0].public_ip + } + ldif_path = "/tmp/seed.ldif" +} + +# Step 1: Install Docker +resource "enos_remote_exec" "setup_docker" { + scripts = [abspath("${path.module}/../../../bootstrap/setup-docker.sh")] + + transport = { + ssh = { + host = local.ldap_server.ip_address + } + } +} + +# Step 2: Copy LDIF file for seeding LDAP +resource "enos_file" "seed_ldif" { + depends_on = [enos_remote_exec.setup_docker] + + source = abspath("${path.module}/../../../bootstrap/ldif/seed.ldif") + destination = local.ldif_path + + transport = { + ssh = { + host = local.ldap_server.ip_address + } + } +} + +# Step 3: Start OpenLDAP Docker container and seed data +resource "enos_remote_exec" "setup_openldap" { + depends_on = [enos_file.seed_ldif] + + environment = { + LDAP_DOMAIN = local.ldap_server.domain + LDAP_ORG = local.ldap_server.org + LDAP_ADMIN_PW = local.ldap_server.admin_pw + IMAGE_TAG = local.ldap_server.tag + LDAP_PORT = local.ldap_server.port + LDIF_PATH = local.ldif_path + } + + scripts = [abspath("${path.module}/../../../bootstrap/setup-openldap.sh")] + + transport = { + ssh = { + host = local.ldap_server.ip_address + } + } +} \ No newline at end of file diff --git a/enos/modules/backend_servers_setup/outputs.tf b/enos/modules/backend_servers_setup/outputs.tf new file mode 100644 index 0000000..981db0c --- /dev/null +++ b/enos/modules/backend_servers_setup/outputs.tf @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "state" { + value = { + ldap = local.ldap_server + } +} + +output "ldap_url" { + value = "ldap://${local.ldap_server.ip_address}:${local.ldap_server.port}" +} \ No newline at end of file diff --git a/enos/modules/backend_servers_setup/variables.tf b/enos/modules/backend_servers_setup/variables.tf new file mode 100644 index 0000000..4fe4194 --- /dev/null +++ b/enos/modules/backend_servers_setup/variables.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "ldap_tag" { + type = string + description = "OpenLDAP Server Version to use" + default = "1.5.0" +} + +variable "ldap_port" { + type = number + description = "OpenLDAP Server Port" + default = 389 +} \ No newline at end of file diff --git a/enos/modules/build_artifactory_artifact/main.tf b/enos/modules/build_artifactory_artifact/main.tf new file mode 100644 index 0000000..2631123 --- /dev/null +++ b/enos/modules/build_artifactory_artifact/main.tf @@ -0,0 +1,109 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.6.1" + } + } +} + +variable "artifactory_token" { + type = string + description = "The token to use when connecting to artifactory" + default = null + sensitive = true +} + +variable "artifactory_host" { + type = string + description = "The artifactory host to search for vault artifacts" + default = "https://artifactory.hashicorp.engineering/artifactory" +} + +variable "artifactory_repo" { + type = string + description = "The artifactory repo to search for vault artifacts" + default = "hashicorp-vault-ecosystem-staging-local" +} + +variable "product_name" { + type = string + description = "The name of the product for which the plugin is built" + default = "vault-plugin-secrets-openldap" +} + +variable "plugin_name" { + type = string + description = "Name of the plugin" +} + +variable "goarch" { + type = string + description = "The Go architecture target" + default = "amd64" +} + +variable "goos" { + type = string + description = "The Go OS target" + default = "linux" +} + +variable "arch" {} +variable "artifact_type" {} +variable "artifact_path" { default = null } +variable "revision" {} +variable "product_version" {} +variable "bundle_path" { default = null } +variable "plugin_dest_dir" { default = null } +variable "makefile_dir" { default = null } + +locals { + // Compose zip filename: plugin_name_version_goos_goarch.zip + artifact_name = "${var.plugin_name}_${var.product_version}_${var.goos}_${var.goarch}.zip" +} + +data "enos_artifactory_item" "ldap" { + token = var.artifactory_token + name = local.artifact_name + host = var.artifactory_host + repo = var.artifactory_repo + path = "${var.product_name}/*" + properties = tomap({ + "commit" = var.revision, + "product-name" = var.product_name, + "product-version" = var.product_version, + }) +} + +output "url" { + value = data.enos_artifactory_item.ldap.results[0].url + description = "Artifactory download URL for the LDAP plugin zip" +} + +output "sha256" { + value = data.enos_artifactory_item.ldap.results[0].sha256 + description = "SHA256 checksum of the LDAP plugin zip" +} + +output "size" { + value = data.enos_artifactory_item.ldap.results[0].size + description = "Size in bytes of the LDAP plugin zip" +} + +output "name" { + value = data.enos_artifactory_item.ldap.results[0].name + description = "Name of the LDAP plugin artifact" +} + +output "ldap_artifactory_release" { + value = { + url = data.enos_artifactory_item.ldap.results[0].url + sha256 = data.enos_artifactory_item.ldap.results[0].sha256 + token = var.artifactory_token + username = null + } +} \ No newline at end of file diff --git a/enos/modules/build_local/main.tf b/enos/modules/build_local/main.tf new file mode 100644 index 0000000..f7b983c --- /dev/null +++ b/enos/modules/build_local/main.tf @@ -0,0 +1,58 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "plugin_dest_dir" { + description = "Where to create the zip bundle of the Plugin build" +} + +variable "goarch" { + type = string + description = "The Go architecture target" + default = "amd64" +} + +variable "goos" { + type = string + description = "The Go OS target" + default = "linux" +} + +variable "plugin_name" { + type = string + description = "Name of the plugin" +} + +variable "makefile_dir" { + type = string + description = "Plugin Project Makefile directory" + default = "$(PWD)" +} + +variable "artifactory_host" { default = null } +variable "artifactory_repo" { default = null } +variable "artifactory_token" { default = null } +variable "arch" { default = null } +variable "artifact_type" { default = null } +variable "revision" { default = null } +variable "product_version" { default = null } + +resource "enos_local_exec" "build" { + scripts = ["${path.module}/scripts/plugin-build.sh"] + + environment = { + PLUGIN_NAME = var.plugin_name + PLUGIN_DIR = var.plugin_dest_dir + MAKEFILE_DIR = var.makefile_dir + GOARCH = var.goarch + GOOS = var.goos + } + +} diff --git a/enos/modules/build_local/scripts/plugin-build.sh b/enos/modules/build_local/scripts/plugin-build.sh new file mode 100755 index 0000000..5d7a027 --- /dev/null +++ b/enos/modules/build_local/scripts/plugin-build.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Expect these environment variables: +# PLUGIN_NAME +# PLUGIN_DIR + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_NAME" ]] && fail "PLUGIN_NAME env variable has not been set" +[[ -z "$PLUGIN_DIR" ]] && fail "PLUGIN_DIR env variable has not been set" + +echo "[build] PLUGIN_NAME=${PLUGIN_NAME:-}" +echo "[build] PLUGIN_DIR=${PLUGIN_DIR:-}" + +# Remove from project .bin directory if it exists +PROJECT_BIN_DIR="${MAKEFILE_DIR}/bin" +if [ -f "${PROJECT_BIN_DIR}/${PLUGIN_NAME}" ]; then + echo "[build] Removing existing plugin at ${PROJECT_BIN_DIR}/${PLUGIN_NAME}" + rm -f "${PROJECT_BIN_DIR}/${PLUGIN_NAME}" +fi + +# Ensure destination directory exists +mkdir -p "${PLUGIN_DIR}" + +# Remove existing plugin (if present) before copying new one +if [ -f "${PLUGIN_DIR}/${PLUGIN_NAME}" ]; then + echo "[build] Removing existing plugin at ${PLUGIN_DIR}/${PLUGIN_NAME}" + rm -f "${PLUGIN_DIR}/${PLUGIN_NAME}" +fi + +# Build plugin +pushd "${MAKEFILE_DIR}" >/dev/null + GOOS="${GOOS:-$(go env GOOS)}" + GOARCH="${GOARCH:-$(go env GOARCH)}" + echo "[build] GOOS=${GOOS} GOARCH=${GOARCH}" + GOOS="${GOOS}" GOARCH="${GOARCH}" make dev +popd >/dev/null + +# Copy and set executable bit +cp "${PROJECT_BIN_DIR}/${PLUGIN_NAME}" "${PLUGIN_DIR}/${PLUGIN_NAME}" +chmod +x "${PLUGIN_DIR}/${PLUGIN_NAME}" + +# Zip up the plugin binary into a bundle +ZIP_FILE="${PLUGIN_DIR}/${PLUGIN_NAME}.zip" +pushd "${PLUGIN_DIR}" >/dev/null + zip -j "${ZIP_FILE}" "${PLUGIN_NAME}" +popd >/dev/null + +echo "[build] Plugin built and zipped at ${ZIP_FILE}" \ No newline at end of file diff --git a/enos/modules/build_releases/main.tf b/enos/modules/build_releases/main.tf new file mode 100644 index 0000000..bb51ef6 --- /dev/null +++ b/enos/modules/build_releases/main.tf @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# Shim module since Releases provided things will use the ldap_release variable +variable "bundle_path" { + default = "/tmp/vault.zip" +} + +variable "goarch" { + type = string + description = "The Go architecture target" + default = "amd64" +} + +variable "goos" { + type = string + description = "The Go OS target" + default = "linux" +} + +variable "artifactory_host" { default = null } +variable "artifactory_repo" { default = null } +variable "artifactory_token" { default = null } +variable "arch" { default = null } +variable "artifact_path" { default = null } +variable "artifact_type" { default = null } +variable "revision" { default = null } +variable "makefile_dir" { default = null } +variable "plugin_name" { default = null } +variable "product_version" { default = null } +variable "plugin_dest_dir" { default = null } diff --git a/enos/modules/configure_plugin/ldap/main.tf b/enos/modules/configure_plugin/ldap/main.tf new file mode 100644 index 0000000..6a8a550 --- /dev/null +++ b/enos/modules/configure_plugin/ldap/main.tf @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +# Configure the plugin +resource "enos_remote_exec" "plugin_configure" { + scripts = [abspath("${path.module}/scripts/plugin-configure.sh")] + environment = { + PLUGIN_PATH = var.plugin_mount_path + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + LDAP_URL = var.ldap_url + LDAP_BIND_DN = var.ldap_bind_dn + LDAP_BIND_PASS = var.ldap_bind_pass + LDAP_USER_DN = var.ldap_user_dn + LDAP_SCHEMA = var.ldap_schema + } + transport = { + ssh = { + host = var.vault_leader_ip + } + } +} \ No newline at end of file diff --git a/enos/modules/configure_plugin/ldap/scripts/plugin-configure.sh b/enos/modules/configure_plugin/ldap/scripts/plugin-configure.sh new file mode 100755 index 0000000..3275f64 --- /dev/null +++ b/enos/modules/configure_plugin/ldap/scripts/plugin-configure.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Required ENV vars: +# PLUGIN_PATH - Mount path for plugin (e.g., 'local-secrets-ldap') +# LDAP_URL - LDAP server URL (e.g., ldap://127.0.0.1:389) +# LDAP_BIND_DN - LDAP bind DN (e.g., cn=admin,dc=example,dc=com) +# LDAP_BIND_PASS - LDAP bind password +# LDAP_USER_DN - LDAP user DN base (e.g., ou=users,dc=example,dc=com) +# LDAP_SCHEMA - LDAP schema type (e.g., openldap) + +export VAULT_ADDR +export VAULT_TOKEN + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_PATH" ]] && fail "PLUGIN_PATH env variable has not been set" +[[ -z "$LDAP_URL" ]] && fail "LDAP_URL env variable has not been set" +[[ -z "$LDAP_BIND_DN" ]] && fail "LDAP_BIND_DN env variable has not been set" +[[ -z "$LDAP_BIND_PASS" ]] && fail "LDAP_BIND_PASS env variable has not been set" +[[ -z "$LDAP_USER_DN" ]] && fail "LDAP_USER_DN env variable has not been set" +[[ -z "$LDAP_SCHEMA" ]] && fail "LDAP_SCHEMA env variable has not been set" + +echo "[configure] Configuring plugin at $PLUGIN_PATH" + +vault write "${PLUGIN_PATH}/config" \ + url="${LDAP_URL}" \ + binddn="${LDAP_BIND_DN}" \ + bindpass="${LDAP_BIND_PASS}" \ + userdn="${LDAP_USER_DN}" \ + schema="${LDAP_SCHEMA}" + +echo "[configure] Current plugin config:" +vault read "${PLUGIN_PATH}/config" \ No newline at end of file diff --git a/enos/modules/configure_plugin/ldap/variables.tf b/enos/modules/configure_plugin/ldap/variables.tf new file mode 100644 index 0000000..8833a71 --- /dev/null +++ b/enos/modules/configure_plugin/ldap/variables.tf @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "vault_addr" { + type = string + description = "The Vault API address" +} + +variable "vault_root_token" { + type = string + description = "The Vault cluster root token" +} + +variable "vault_leader_ip" { + type = string + description = "Public IP of the Vault leader node" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path for the plugin" +} + +# LDAP variables for configuration +variable "ldap_url" { + type = string + description = "LDAP URL, e.g., ldap://:389" +} + +variable "ldap_bind_dn" { + type = string + description = "LDAP Bind DN" +} + +variable "ldap_bind_pass" { + type = string + description = "LDAP Bind password" +} + +variable "ldap_user_dn" { + type = string + description = "LDAP User DN" +} + +variable "ldap_schema" { + type = string + description = "LDAP schema type, e.g., openldap" +} diff --git a/enos/modules/ec2_bootstrap_tools/main.tf b/enos/modules/ec2_bootstrap_tools/main.tf new file mode 100644 index 0000000..441e0f6 --- /dev/null +++ b/enos/modules/ec2_bootstrap_tools/main.tf @@ -0,0 +1,90 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +# Install Shasum on EC2 targets +resource "enos_remote_exec" "install-shasum" { + for_each = var.hosts + scripts = [abspath("${path.module}/scripts/install-shasum.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Ensure the Vault plugin directory exists +resource "enos_remote_exec" "create_plugin_directory" { + for_each = var.hosts + + environment = { + PLUGIN_DIR = var.plugin_dir_vault + } + + scripts = [abspath("${path.module}/scripts/create-plugin-dir.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Add plugin directory to the config file +resource "enos_remote_exec" "add_plugin_directory_to_config" { + for_each = var.hosts + + inline = [ + "echo 'plugin_directory = \"/etc/vault/plugins\"' | sudo tee -a /etc/vault.d/vault.hcl" + ] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Restart Vault service on all hosts +resource "enos_remote_exec" "restart_vault" { + for_each = var.hosts + + inline = [ + "sudo systemctl restart vault" + ] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Unseal Vault +resource "enos_remote_exec" "unseal_vault" { + for_each = var.hosts + + depends_on = [enos_remote_exec.restart_vault] + + scripts = [abspath("${path.module}/scripts/vault-unseal.sh")] + + environment = { + VAULT_ADDR = var.vault_addr + UNSEAL_KEYS = join(",", var.unseal_keys) + THRESHOLD = tostring(var.threshold) + } + + transport = { + ssh = { + host = each.value.public_ip + } + } +} \ No newline at end of file diff --git a/enos/modules/ec2_bootstrap_tools/scripts/create-plugin-dir.sh b/enos/modules/ec2_bootstrap_tools/scripts/create-plugin-dir.sh new file mode 100644 index 0000000..9c591cd --- /dev/null +++ b/enos/modules/ec2_bootstrap_tools/scripts/create-plugin-dir.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +PLUGIN_DIR="${PLUGIN_DIR:-/etc/vault/plugins}" + +sudo mkdir -p "$PLUGIN_DIR" +sudo chown vault:vault "$PLUGIN_DIR" \ No newline at end of file diff --git a/enos/modules/ec2_bootstrap_tools/scripts/install-shasum.sh b/enos/modules/ec2_bootstrap_tools/scripts/install-shasum.sh new file mode 100644 index 0000000..19b7396 --- /dev/null +++ b/enos/modules/ec2_bootstrap_tools/scripts/install-shasum.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +# Function to detect the OS +detect_os() { + if [ -f /etc/os-release ]; then + # shellcheck disable=SC1091 + . /etc/os-release + echo "$ID" + else + echo "unknown" + fi +} + +# Function to install shasum or sha1sum +install_shasum() { + OS_ID=$(detect_os) + + case "$OS_ID" in + ubuntu|debian) + sudo apt-get update + sudo apt-get install -y perl + ;; + amzn|amazon) + sudo yum install -y perl-Digest-SHA + ;; + rhel|centos|fedora) + sudo yum install -y perl-Digest-SHA + ;; + alpine) + sudo apk add --no-cache perl + ;; + *) + echo "Unsupported OS: $OS_ID" + exit 1 + ;; + esac + + # Verify installation + if ! command -v shasum >/dev/null 2>&1 && ! command -v sha1sum >/dev/null 2>&1; then + echo "Failed to install shasum or sha1sum" + exit 1 + fi +} + +install_shasum \ No newline at end of file diff --git a/enos/modules/ec2_bootstrap_tools/scripts/vault-unseal.sh b/enos/modules/ec2_bootstrap_tools/scripts/vault-unseal.sh new file mode 100644 index 0000000..54ba3cd --- /dev/null +++ b/enos/modules/ec2_bootstrap_tools/scripts/vault-unseal.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +if [[ -z "$VAULT_ADDR" || -z "$UNSEAL_KEYS" || -z "$THRESHOLD" ]]; then + echo "Usage: $0 " + exit 1 +fi + +IFS=',' read -ra KEYS <<< "$UNSEAL_KEYS" + +export VAULT_ADDR + +for ((i=0; i&2 + exit 1 +} + +[[ -z "$PLUGIN_NAME" ]] && fail "PLUGIN_NAME env variable has not been set" +[[ -z "$PLUGIN_PATH" ]] && fail "PLUGIN_PATH env variable has not been set" + +echo "[enable] Enabling plugin $PLUGIN_NAME at path $PLUGIN_PATH" + +# Disable previous mount if exists +vault secrets disable "${PLUGIN_PATH}" || true + +# Enable plugin at specified path +vault secrets enable -path="${PLUGIN_PATH}" "${PLUGIN_NAME}" + +echo "[enable] Plugin $PLUGIN_NAME enabled at $PLUGIN_PATH." \ No newline at end of file diff --git a/enos/modules/setup_plugin/scripts/plugin-register.sh b/enos/modules/setup_plugin/scripts/plugin-register.sh new file mode 100755 index 0000000..cb8910e --- /dev/null +++ b/enos/modules/setup_plugin/scripts/plugin-register.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Required ENV vars: +# PLUGIN_BINARY_SRC - Where the plugin binary is (built artifact) +# PLUGIN_DIR_VAULT - Vault's plugin directory +# PLUGIN_NAME - Name to register in Vault + +export VAULT_ADDR +export VAULT_TOKEN + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_BINARY_SRC" ]] && fail "PLUGIN_BINARY_SRC env variable has not been set" +[[ -z "$PLUGIN_DIR_VAULT" ]] && fail "PLUGIN_DIR_VAULT env variable has not been set" +[[ -z "$PLUGIN_NAME" ]] && fail "PLUGIN_NAME env variable has not been set" + +echo "[register] Registering plugin: $PLUGIN_NAME" + +# Determine plugin binary source path (handle directories) +if [[ -d "$PLUGIN_BINARY_SRC" ]]; then + BINARY_PATH="$PLUGIN_BINARY_SRC/$PLUGIN_NAME" +else + BINARY_PATH="$PLUGIN_BINARY_SRC" +fi + +# Ensure the Vault plugin directory exists +mkdir -p "${PLUGIN_DIR_VAULT}" + +# Clean up any previous plugin binary +sudo rm -f "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" || true + +# Copy the binary to Vault's plugin directory +sudo cp "${BINARY_PATH}" "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" + +# Set permissions to ensure Vault can execute the plugin binary +sudo chmod 755 "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" + +# Calculate shasum +SHASUM="$(shasum -a 256 "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" | awk '{print $1}')" +if [[ -z "$SHASUM" ]]; then + echo "[register] error: shasum not set" + exit 1 +fi +echo "[register] Plugin SHA256: $SHASUM" + +# Deregister any previous registration of this plugin +vault plugin deregister secret "${PLUGIN_NAME}" || true + +# Register plugin with Vault +vault plugin register \ + -sha256="${SHASUM}" \ + secret "${PLUGIN_NAME}" + +echo "[register] Plugin $PLUGIN_NAME registered successfully." \ No newline at end of file diff --git a/enos/modules/setup_plugin/variables.tf b/enos/modules/setup_plugin/variables.tf new file mode 100644 index 0000000..1c89f24 --- /dev/null +++ b/enos/modules/setup_plugin/variables.tf @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "vault_addr" { + type = string + description = "The Vault API address" +} + +variable "vault_root_token" { + type = string + description = "The Vault cluster root token" +} + +variable "vault_leader_ip" { + type = string + description = "Public IP of the Vault leader node" +} + +variable "plugin_name" { + type = string + description = "Name of the plugin" +} + +variable "plugin_source_type" { + type = string + description = "Plugin Source" + default = "local_build" + validation { + condition = contains(["local_build", "registry", "local_path"], var.plugin_source_type) + error_message = "plugin_source_type must be one of: 'local_build', 'registry', 'local_path'." + } +} + +variable "plugin_dir_vault" { + type = string + description = "Plugin directory on Vault side" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path for the plugin" +} + +variable "artifactory_release" { + type = object({ + token = string + url = string + sha256 = string + username = string + }) + description = "The Artifactory release information to install Vault artifacts from Artifactory" + default = null +} + +variable "hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "release" { + type = object({ + version = string + edition = string + }) + description = "LDAP release version and edition to install from releases.hashicorp.com" + default = null +} + +variable "local_artifact_path" { + type = string + description = "The path to a locally built vault artifact to install. It can be a zip archive, RPM, or Debian package" + default = null +} \ No newline at end of file From 906d6594612615ebbf23fba1940ebcd4c3fe4102 Mon Sep 17 00:00:00 2001 From: Hamza Shili <98858609+HamzaShili65@users.noreply.github.com> Date: Mon, 4 Aug 2025 12:55:56 -0700 Subject: [PATCH 08/26] Vault 37085/plugin quality configure plugin and test static role api crud with enos (#179) * add environment setup and teardown srcipts and make targets for ldap server * add terraform module for ldap server setup * add terraform module for building, registering, enabling, and configuring the plugin * add terraform module for bootstrapping vault cluster * add enos modules * add enos descriptions * add ingress for ldap server and machine os and arch outputs * add smoke scenario for openldap * ignore the .enos dir * fix formatting error * install openldap clients to vault targets * add ldap ip and port as outputs * add module for testing static role crud api * update static role crud api module and decription * add step to test static role crud api * fmt and add env vars checks * add make target for static role api test * fmt * fmt * fmt and remove unused modules * use ldap server private ip for plugin configuration and static role api testing * refactor ldap configuration variables * change license to MPL-2.0 * add support for manual static-role rotation and root-rotation * change ldap image tag to 1.3.0 * remove rotate-root check of initial root credential validity * change ldap_tag to 1.3.0 in makefile * move root credentials rotation to the begining of the script --------- Co-authored-by: Hamza ElMokhtar Shili --- Makefile | 10 +- enos/enos-descriptions.hcl | 4 + enos/enos-modules.hcl | 3 + enos/enos-scenario-openldap.hcl | 31 ++++- enos/enos-variables.hcl | 28 +++-- enos/modules/backend_servers_setup/main.tf | 1 + enos/modules/backend_servers_setup/outputs.tf | 8 +- .../build_artifactory_artifact/main.tf | 10 +- enos/modules/configure_plugin/ldap/main.tf | 13 +- .../configure_plugin/ldap/variables.tf | 19 ++- enos/modules/ec2_bootstrap_tools/main.tf | 15 +++ enos/modules/static_role_crud_api/main.tf | 41 +++++++ .../scripts/static-role.sh | 114 ++++++++++++++++++ .../modules/static_role_crud_api/variables.tf | 55 +++++++++ 14 files changed, 316 insertions(+), 36 deletions(-) create mode 100644 enos/modules/static_role_crud_api/main.tf create mode 100644 enos/modules/static_role_crud_api/scripts/static-role.sh create mode 100644 enos/modules/static_role_crud_api/variables.tf diff --git a/Makefile b/Makefile index e8989c7..6942f83 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ PLUGIN_PATH ?= local-secrets-ldap LDAP_DOMAIN ?= example.com LDAP_ORG ?= example LDAP_ADMIN_PW ?= adminpassword -IMAGE_TAG ?= 1.5.0 +IMAGE_TAG ?= 1.3.0 LDAP_PORT ?= 389 LDIF_PATH ?= $(PWD)/bootstrap/ldif/seed.ldif @@ -114,3 +114,11 @@ configure: plugin-build plugin-register plugin-enable plugin-configure .PHONY: teardown-env teardown-env: cd bootstrap && ./teardown-env.sh + +.PHONY: static-role-test +static-role-test: + cd enos/modules/static_role_crud_api && ./scripts/static-role.sh + +.PHONY: teardown-env +teardown-env: + cd bootstrap && ./teardown-env.sh diff --git a/enos/enos-descriptions.hcl b/enos/enos-descriptions.hcl index a3cbd07..61011c2 100644 --- a/enos/enos-descriptions.hcl +++ b/enos/enos-descriptions.hcl @@ -88,6 +88,10 @@ globals { Must have the 'edition' variant to be set to any Enterprise edition. EOF + static_role_crud_api = <<-EOF + Tests the lifecycle of a static role via the Vault CRUD API. + EOF + setup_plugin = <<-EOF Build, register, and enable the Vault plugin. EOF diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index 35a8915..9dd3565 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -112,6 +112,9 @@ module "setup_plugin" { source = "./modules/setup_plugin" } +module "static_role_crud_api" { + source = "./modules/static_role_crud_api" +} // create target instances using ec2:RunInstances module "target_ec2_instances" { diff --git a/enos/enos-scenario-openldap.hcl b/enos/enos-scenario-openldap.hcl index 69873d4..242228e 100644 --- a/enos/enos-scenario-openldap.hcl +++ b/enos/enos-scenario-openldap.hcl @@ -166,6 +166,7 @@ scenario "openldap" { common_tags = global.tags seal_key_names = step.create_seal_key.resource_names vpc_id = step.create_vpc.id + instance_count = 1 } } @@ -459,14 +460,38 @@ scenario "openldap" { vault_root_token = step.create_vault_cluster.root_token plugin_mount_path = var.plugin_mount_path - ldap_url = step.create_ldap_server.ldap_url - ldap_bind_dn = var.ldap_bind_dn + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn ldap_bind_pass = var.ldap_bind_pass - ldap_user_dn = var.ldap_user_dn ldap_schema = var.ldap_schema } } + step "test_static_role_crud_api" { + description = global.description.static_role_crud_api + module = module.static_role_crud_api + depends_on = [step.configure_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_leader_ip.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_user_role_name = var.ldap_user_role_name + ldap_username = var.ldap_username + ldap_user_old_password = var.ldap_user_old_password + } + } + step "verify_raft_auto_join_voter" { description = global.description.verify_raft_cluster_all_nodes_are_voters diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl index 985f792..c40b5e3 100644 --- a/enos/enos-variables.hcl +++ b/enos/enos-variables.hcl @@ -74,10 +74,10 @@ variable "ldap_artifact_path" { default = "/tmp/vault-plugin-secrets-openldap.zip" } -variable "ldap_bind_dn" { - description = "LDAP bind DN" +variable "ldap_base_dn" { type = string - default = null + description = "The common DN suffix" + default = "dc=example,dc=com" } variable "ldap_bind_pass" { @@ -107,19 +107,25 @@ variable "ldap_schema" { variable "ldap_tag" { description = "LDAP image tag version" type = string - default = null + default = "1.3.0" } -variable "ldap_url" { - description = "LDAP server URL" +variable "ldap_username" { + description = "The username of the LDAP user to create" type = string - default = null + default = "mary.smith" } -variable "ldap_user_dn" { - description = "LDAP user DN" +variable "ldap_user_old_password" { + description = "The old password of the LDAP user to create" type = string - default = null + default = "defaultpassword" +} + +variable "ldap_user_role_name" { + description = "The name of the LDAP user role to create" + type = string + default = "mary" } variable "makefile_dir" { @@ -275,4 +281,4 @@ variable "verify_log_secrets" { description = "If true and var.vault_enable_audit_devices is true we'll verify that the audit log does not contain unencrypted secrets. Requires var.vault_radar_license_path to be set to a valid license file." type = bool default = false -} +} \ No newline at end of file diff --git a/enos/modules/backend_servers_setup/main.tf b/enos/modules/backend_servers_setup/main.tf index d7958b7..29f5c5f 100644 --- a/enos/modules/backend_servers_setup/main.tf +++ b/enos/modules/backend_servers_setup/main.tf @@ -17,6 +17,7 @@ locals { tag = var.ldap_tag port = tostring(var.ldap_port) ip_address = var.hosts[0].public_ip + private_ip = var.hosts[0].private_ip } ldif_path = "/tmp/seed.ldif" } diff --git a/enos/modules/backend_servers_setup/outputs.tf b/enos/modules/backend_servers_setup/outputs.tf index 981db0c..7b9c4ef 100644 --- a/enos/modules/backend_servers_setup/outputs.tf +++ b/enos/modules/backend_servers_setup/outputs.tf @@ -7,6 +7,10 @@ output "state" { } } -output "ldap_url" { - value = "ldap://${local.ldap_server.ip_address}:${local.ldap_server.port}" +output "ldap_ip_address" { + value = local.ldap_server.private_ip +} + +output "ldap_port" { + value = local.ldap_server.port } \ No newline at end of file diff --git a/enos/modules/build_artifactory_artifact/main.tf b/enos/modules/build_artifactory_artifact/main.tf index 2631123..9c73d44 100644 --- a/enos/modules/build_artifactory_artifact/main.tf +++ b/enos/modules/build_artifactory_artifact/main.tf @@ -67,11 +67,11 @@ locals { } data "enos_artifactory_item" "ldap" { - token = var.artifactory_token - name = local.artifact_name - host = var.artifactory_host - repo = var.artifactory_repo - path = "${var.product_name}/*" + token = var.artifactory_token + name = local.artifact_name + host = var.artifactory_host + repo = var.artifactory_repo + path = "${var.product_name}/*" properties = tomap({ "commit" = var.revision, "product-name" = var.product_name, diff --git a/enos/modules/configure_plugin/ldap/main.tf b/enos/modules/configure_plugin/ldap/main.tf index 6a8a550..0cf87d9 100644 --- a/enos/modules/configure_plugin/ldap/main.tf +++ b/enos/modules/configure_plugin/ldap/main.tf @@ -1,6 +1,5 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 - terraform { required_providers { enos = { @@ -9,6 +8,12 @@ terraform { } } +locals { + admin_dn = "cn=admin,${var.ldap_base_dn}" + ldap_url = "ldap://${var.ldap_host}:${var.ldap_port}" + users_dn = "ou=users,${var.ldap_base_dn}" +} + # Configure the plugin resource "enos_remote_exec" "plugin_configure" { scripts = [abspath("${path.module}/scripts/plugin-configure.sh")] @@ -16,10 +21,10 @@ resource "enos_remote_exec" "plugin_configure" { PLUGIN_PATH = var.plugin_mount_path VAULT_ADDR = var.vault_addr VAULT_TOKEN = var.vault_root_token - LDAP_URL = var.ldap_url - LDAP_BIND_DN = var.ldap_bind_dn + LDAP_URL = local.ldap_url + LDAP_BIND_DN = local.admin_dn LDAP_BIND_PASS = var.ldap_bind_pass - LDAP_USER_DN = var.ldap_user_dn + LDAP_USER_DN = local.users_dn LDAP_SCHEMA = var.ldap_schema } transport = { diff --git a/enos/modules/configure_plugin/ldap/variables.tf b/enos/modules/configure_plugin/ldap/variables.tf index 8833a71..a041113 100644 --- a/enos/modules/configure_plugin/ldap/variables.tf +++ b/enos/modules/configure_plugin/ldap/variables.tf @@ -21,28 +21,27 @@ variable "plugin_mount_path" { description = "Mount path for the plugin" } -# LDAP variables for configuration -variable "ldap_url" { +variable "ldap_host" { type = string - description = "LDAP URL, e.g., ldap://:389" + description = "The LDAP server host" } -variable "ldap_bind_dn" { +variable "ldap_port" { type = string - description = "LDAP Bind DN" + description = "The LDAP server port" } -variable "ldap_bind_pass" { +variable "ldap_base_dn" { type = string - description = "LDAP Bind password" + description = "The common DN suffix" } -variable "ldap_user_dn" { +variable "ldap_bind_pass" { type = string - description = "LDAP User DN" + description = "LDAP bind password" } variable "ldap_schema" { type = string - description = "LDAP schema type, e.g., openldap" + description = "LDAP schema type" } diff --git a/enos/modules/ec2_bootstrap_tools/main.tf b/enos/modules/ec2_bootstrap_tools/main.tf index 441e0f6..e6c08fd 100644 --- a/enos/modules/ec2_bootstrap_tools/main.tf +++ b/enos/modules/ec2_bootstrap_tools/main.tf @@ -21,6 +21,21 @@ resource "enos_remote_exec" "install-shasum" { } } +# Install OpenLDAP clients on EC2 targets +resource "enos_remote_exec" "install-openldap-clients" { + for_each = var.hosts + + inline = [ + "sudo yum install -y openldap-clients" + ] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + # Ensure the Vault plugin directory exists resource "enos_remote_exec" "create_plugin_directory" { for_each = var.hosts diff --git a/enos/modules/static_role_crud_api/main.tf b/enos/modules/static_role_crud_api/main.tf new file mode 100644 index 0000000..9bf8c79 --- /dev/null +++ b/enos/modules/static_role_crud_api/main.tf @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + admin_dn = "cn=admin,${var.ldap_base_dn}" + users_dn = "ou=users,${var.ldap_base_dn}" + user_dn = "uid=${var.ldap_username},${local.users_dn}" +} + +resource "enos_remote_exec" "static_role_crud_api_test" { + scripts = ["${path.module}/scripts/static-role.sh"] + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + PLUGIN_PATH = var.plugin_mount_path + LDAP_HOST = var.ldap_host + LDAP_PORT = var.ldap_port + LDAP_DN = local.user_dn + LDAP_USERNAME = var.ldap_username + LDAP_OLD_PASSWORD = var.ldap_user_old_password + ROLE_NAME = var.ldap_user_role_name + LDAP_BIND_DN = local.admin_dn + LDAP_BIND_PASS = var.ldap_bind_pass + } + + transport = { + ssh = { + host = var.vault_leader_ip + } + } + +} \ No newline at end of file diff --git a/enos/modules/static_role_crud_api/scripts/static-role.sh b/enos/modules/static_role_crud_api/scripts/static-role.sh new file mode 100644 index 0000000..0e15567 --- /dev/null +++ b/enos/modules/static_role_crud_api/scripts/static-role.sh @@ -0,0 +1,114 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Test Vault LDAP Static Role CRUD and credential lifecycle using provided LDIFs. +# Assumptions: +# - Vault CLI is authenticated and VAULT_ADDR and VAULT_TOKEN are set. +# - Required ENV vars: +# PLUGIN_PATH - Path to the mounted plugin secrets engine (e.g., ldap-secrets/) +# LDAP_HOST - LDAP server hostname or IP (e.g., 127.0.0.1) +# LDAP_PORT - LDAP server port (e.g., 389) +# LDAP_DN - User DN (e.g., uid=mary.smith,ou=users,dc=example,dc=com) +# LDAP_USERNAME - LDAP username (e.g., mary.smith) +# LDAP_OLD_PASSWORD - The original LDAP password for testing (before Vault rotation) +# ROLE_NAME - Name of the static role to create (e.g., mary) + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_PATH" ]] && fail "PLUGIN_PATH env variable has not been set" +[[ -z "$LDAP_HOST" ]] && fail "LDAP_HOST env variable has not been set" +[[ -z "$LDAP_PORT" ]] && fail "LDAP_PORT env variable has not been set" +[[ -z "$LDAP_DN" ]] && fail "LDAP_DN env variable has not been set" +[[ -z "$LDAP_USERNAME" ]] && fail "LDAP_USERNAME env variable has not been set" +[[ -z "$LDAP_OLD_PASSWORD" ]] && fail "LDAP_OLD_PASSWORD env variable has not been set" +[[ -z "$ROLE_NAME" ]] && fail "ROLE_NAME env variable has not been set" +[[ -z "$LDAP_BIND_DN" ]] && fail "LDAP_BIND_DN env variable has not been set" +[[ -z "$LDAP_BIND_PASS" ]] && fail "LDAP_BIND_PASS env variable has not been set" + +export VAULT_ADDR +export VAULT_TOKEN + +ROLE_PATH="${PLUGIN_PATH}/static-role/${ROLE_NAME}" +CRED_PATH="${PLUGIN_PATH}/static-cred/${ROLE_NAME}" + +echo "==> LDAP_HOST: ${LDAP_HOST}" +echo "==> LDAP_PORT: ${LDAP_PORT}" + +echo "==> Rotating root credentials" +vault write -f "${PLUGIN_PATH}/rotate-root" + +echo "==> Creating static role ${ROLE_NAME}" +vault write "${ROLE_PATH}" \ + dn="${LDAP_DN}" \ + username="${LDAP_USERNAME}" \ + rotation_period="5m" + +echo "==> Reading static role" +vault read "${ROLE_PATH}" + +echo "==> Reading credentials" +vault read "${CRED_PATH}" + +echo "==> Listing all static roles" +vault list "${PLUGIN_PATH}/static-role" + +echo "==> LDAP check: old password should fail after rotation" +if ldapwhoami -h "${LDAP_HOST}:${LDAP_PORT}" -x -w "${LDAP_OLD_PASSWORD}" -D "${LDAP_DN}"; then + echo "[ERROR] Old password still works! Rotation failed." + exit 1 +else + echo "[OK] Old password rejected as expected." +fi + +echo "==> LDAP check: new password should succeed" +NEW_PASSWORD=$(vault read -field=password "${CRED_PATH}") +if ldapwhoami -h "${LDAP_HOST}:${LDAP_PORT}" -x -w "${NEW_PASSWORD}" -D "${LDAP_DN}"; then + echo "[OK] New password accepted as expected." +else + echo "[ERROR] New password did not work!" + exit 1 +fi + +echo "==> Forcing manual rotation for static role" +vault write -force "${PLUGIN_PATH}/rotate-role/${ROLE_NAME}" +echo "==> Reading credentials after manual rotation" +ROTATED_PASSWORD=$(vault read -field=password "${CRED_PATH}") +echo "==> LDAP check: old generated password should be rejected" +if ldapwhoami -h "${LDAP_HOST}:${LDAP_PORT}" -x -w "${NEW_PASSWORD}" -D "${LDAP_DN}"; then + echo "[ERROR] Previously generated password still works after manual rotation!" + exit 1 +else + echo "[OK] Old generated password rejected as expected." +fi +echo "==> LDAP check: new rotated password should succeed" +if ldapwhoami -h "${LDAP_HOST}:${LDAP_PORT}" -x -w "${ROTATED_PASSWORD}" -D "${LDAP_DN}"; then + echo "[OK] Rotated password accepted as expected." +else + echo "[ERROR] Rotated password did not work!" + exit 1 +fi + +echo "==> Updating static role (change rotation_period)" +vault write "${ROLE_PATH}" \ + dn="${LDAP_DN}" \ + username="${LDAP_USERNAME}" \ + rotation_period="10m" + +echo "==> Reading updated static role" +vault read "${ROLE_PATH}" + +echo "==> Deleting static role" +vault delete "${ROLE_PATH}" + +echo "==> Confirming deletion" +if vault read "${ROLE_PATH}"; then + echo "[ERROR] Static role still exists after deletion!" + exit 1 +else + echo "[OK] Static role deleted successfully." +fi \ No newline at end of file diff --git a/enos/modules/static_role_crud_api/variables.tf b/enos/modules/static_role_crud_api/variables.tf new file mode 100644 index 0000000..862ae7a --- /dev/null +++ b/enos/modules/static_role_crud_api/variables.tf @@ -0,0 +1,55 @@ +variable "vault_leader_ip" { + type = string + description = "Public IP of the Vault leader node" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path for the plugin" +} + + +variable "ldap_host" { + type = string + description = "The LDAP server host" +} + +variable "ldap_port" { + type = string + description = "The LDAP server port" +} + +variable "ldap_base_dn" { + type = string + description = "The common DN suffix" +} + +variable "ldap_bind_pass" { + type = string + description = "LDAP bind password" +} + +variable "ldap_username" { + description = "The username of the LDAP user to create" + type = string +} + +variable "ldap_user_old_password" { + description = "The old password of the LDAP user to create" + type = string +} + +variable "ldap_user_role_name" { + description = "The name of the LDAP user role to create" + type = string +} + +variable "vault_addr" { + type = string + description = "The Vault API address" +} + +variable "vault_root_token" { + type = string + description = "The Vault cluster root token" +} From 70ce7cbc3ab95b378495c42d5dc5466ed9b8ec97 Mon Sep 17 00:00:00 2001 From: Hamza Shili <98858609+HamzaShili65@users.noreply.github.com> Date: Tue, 5 Aug 2025 09:25:06 -0700 Subject: [PATCH 09/26] Vault 37086 plugin quality configure plugin and test dynamic role api crud with enos (#181) * add environment setup and teardown srcipts and make targets for ldap server * add terraform module for ldap server setup * add terraform module for building, registering, enabling, and configuring the plugin * add terraform module for bootstrapping vault cluster * add enos modules * add enos descriptions * add ingress for ldap server and machine os and arch outputs * add smoke scenario for openldap * ignore the .enos dir * fix formatting error * install openldap clients to vault targets * add ldap ip and port as outputs * add module for testing static role crud api * update static role crud api module and decription * add step to test static role crud api * add module for testing dynamic role api * add dynamic role api test step * add dynamic role api test module and description * add make target for dynamic role api test * change license to MPL-2.0 * removed unused tf modules and scripts * added error handling for requesting dynamic credentials * make input variables to the test_dynamic_role_crud_api step dynamic --------- Co-authored-by: Hamza ElMokhtar Shili --- Makefile | 4 + enos/enos-descriptions.hcl | 4 + enos/enos-modules.hcl | 4 + enos/enos-scenario-openldap.hcl | 25 ++++- enos/enos-variables.hcl | 11 ++ .../dynamic_role_crud_api/ldif/creation.ldif | 7 ++ .../dynamic_role_crud_api/ldif/deletion.ldif | 2 + .../dynamic_role_crud_api/ldif/rollback.ldif | 2 + enos/modules/dynamic_role_crud_api/main.tf | 66 +++++++++++ .../scripts/dynamic-role.sh | 103 ++++++++++++++++++ .../dynamic_role_crud_api/variables.tf | 53 +++++++++ 11 files changed, 280 insertions(+), 1 deletion(-) create mode 100644 enos/modules/dynamic_role_crud_api/ldif/creation.ldif create mode 100644 enos/modules/dynamic_role_crud_api/ldif/deletion.ldif create mode 100644 enos/modules/dynamic_role_crud_api/ldif/rollback.ldif create mode 100644 enos/modules/dynamic_role_crud_api/main.tf create mode 100755 enos/modules/dynamic_role_crud_api/scripts/dynamic-role.sh create mode 100644 enos/modules/dynamic_role_crud_api/variables.tf diff --git a/Makefile b/Makefile index 6942f83..b480a49 100644 --- a/Makefile +++ b/Makefile @@ -119,6 +119,10 @@ teardown-env: static-role-test: cd enos/modules/static_role_crud_api && ./scripts/static-role.sh +.PHONY: dynamic-role-test +dynamic-role-test: + cd enos/modules/dynamic_role_crud_api && ./scripts/dynamic-role.sh + .PHONY: teardown-env teardown-env: cd bootstrap && ./teardown-env.sh diff --git a/enos/enos-descriptions.hcl b/enos/enos-descriptions.hcl index 61011c2..60e7150 100644 --- a/enos/enos-descriptions.hcl +++ b/enos/enos-descriptions.hcl @@ -59,6 +59,10 @@ globals { egress traffic via the internet gateway. EOF + dynamic_role_crud_api = <<-EOF + Tests the lifecycle of a dynamic role via the Vault CRUD API. + EOF + ec2_info = <<-EOF Query various endpoints in AWS Ec2 to gather metadata we'll use later in our run when creating infrastructure for the Vault cluster. This metadata includes: diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index 9dd3565..b1df9aa 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -65,6 +65,10 @@ module "create_vpc" { common_tags = var.tags } +module "dynamic_role_crud_api" { + source = "./modules/dynamic_role_crud_api" +} + module "ec2_info" { source = "git::https://github.com/hashicorp/vault.git//enos/modules/ec2_info?ref=${var.vault_repo_ref}" } diff --git a/enos/enos-scenario-openldap.hcl b/enos/enos-scenario-openldap.hcl index 242228e..1c38336 100644 --- a/enos/enos-scenario-openldap.hcl +++ b/enos/enos-scenario-openldap.hcl @@ -166,7 +166,6 @@ scenario "openldap" { common_tags = global.tags seal_key_names = step.create_seal_key.resource_names vpc_id = step.create_vpc.id - instance_count = 1 } } @@ -492,6 +491,30 @@ scenario "openldap" { } } + step "test_dynamic_role_crud_api" { + description = global.description.dynamic_role_crud_api + module = module.dynamic_role_crud_api + depends_on = [step.setup_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_leader_ip.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + hosts = step.create_vault_cluster_targets.hosts + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + dynamic_role_ldif_templates_path = var.dynamic_role_ldif_templates_path + ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name + } + } + step "verify_raft_auto_join_voter" { description = global.description.verify_raft_cluster_all_nodes_are_voters diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl index c40b5e3..e88f74f 100644 --- a/enos/enos-variables.hcl +++ b/enos/enos-variables.hcl @@ -68,6 +68,11 @@ variable "distro_version_ubuntu" { default = "24.04" // or "20.04", "22.04" } +variable "dynamic_role_ldif_templates_path" { + description = "LDIF templates path for dynamic role CRUD API tests" + default = "/tmp" +} + variable "ldap_artifact_path" { description = "Path to CRT generated or local vault.zip bundle" type = string @@ -86,6 +91,12 @@ variable "ldap_bind_pass" { default = null } +variable "ldap_dynamic_user_role_name" { + description = "The name of the LDAP dynamic user role to create" + type = string + default = "adam" +} + variable "ldap_plugin_version" { description = "LDAP plugin version to use" type = string diff --git a/enos/modules/dynamic_role_crud_api/ldif/creation.ldif b/enos/modules/dynamic_role_crud_api/ldif/creation.ldif new file mode 100644 index 0000000..981e479 --- /dev/null +++ b/enos/modules/dynamic_role_crud_api/ldif/creation.ldif @@ -0,0 +1,7 @@ +dn: uid={{.Username}},ou=users,dc=example,dc=com +objectClass: inetOrgPerson +uid: {{.Username}} +cn: {{.Username}} +sn: {{.Password | utf16le | base64}} +memberOf: cn=dev,ou=groups,dc=example,dc=com +userPassword: {{.Password}} \ No newline at end of file diff --git a/enos/modules/dynamic_role_crud_api/ldif/deletion.ldif b/enos/modules/dynamic_role_crud_api/ldif/deletion.ldif new file mode 100644 index 0000000..3b1551e --- /dev/null +++ b/enos/modules/dynamic_role_crud_api/ldif/deletion.ldif @@ -0,0 +1,2 @@ +dn: uid={{.Username}},ou=users,dc=example,dc=com +changetype: delete \ No newline at end of file diff --git a/enos/modules/dynamic_role_crud_api/ldif/rollback.ldif b/enos/modules/dynamic_role_crud_api/ldif/rollback.ldif new file mode 100644 index 0000000..3b1551e --- /dev/null +++ b/enos/modules/dynamic_role_crud_api/ldif/rollback.ldif @@ -0,0 +1,2 @@ +dn: uid={{.Username}},ou=users,dc=example,dc=com +changetype: delete \ No newline at end of file diff --git a/enos/modules/dynamic_role_crud_api/main.tf b/enos/modules/dynamic_role_crud_api/main.tf new file mode 100644 index 0000000..06ea329 --- /dev/null +++ b/enos/modules/dynamic_role_crud_api/main.tf @@ -0,0 +1,66 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + ldif_files = fileset("${path.module}/ldif", "*") + file_host_pairs = flatten([ + for i in range(length(var.hosts)) : [ + for file in local.ldif_files : { + host_index = i + public_ip = var.hosts[i].public_ip + file = file + } + ] + ]) + file_host_map = { + for item in local.file_host_pairs : + "${item["host_index"]}_${item["file"]}" => item + } + users_dn = "ou=users,${var.ldap_base_dn}" + ldap_user_dn_tpl = "uid={{username}},${local.users_dn}" +} + +# Copy LDIF files to the hosts +resource "enos_file" "ldif_files" { + for_each = local.file_host_map + source = abspath("${path.module}/ldif/${each.value["file"]}") + destination = "${var.dynamic_role_ldif_templates_path}/${each.value["file"]}" + transport = { + ssh = { + host = each.value["public_ip"] + } + } +} + +# Execute the dynamic role CRUD API test script on the Vault leader +resource "enos_remote_exec" "dynamic_role_crud_api_test" { + depends_on = [enos_file.ldif_files] + scripts = ["${path.module}/scripts/dynamic-role.sh"] + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + PLUGIN_PATH = var.plugin_mount_path + LDAP_HOST = var.ldap_host + LDAP_PORT = var.ldap_port + + ROLE_NAME = var.ldap_dynamic_user_role_name + LDAP_USER_DN_TPL = local.ldap_user_dn_tpl + LDIF_PATH = var.dynamic_role_ldif_templates_path + } + + transport = { + ssh = { + host = var.vault_leader_ip + } + } + +} \ No newline at end of file diff --git a/enos/modules/dynamic_role_crud_api/scripts/dynamic-role.sh b/enos/modules/dynamic_role_crud_api/scripts/dynamic-role.sh new file mode 100755 index 0000000..b2c326a --- /dev/null +++ b/enos/modules/dynamic_role_crud_api/scripts/dynamic-role.sh @@ -0,0 +1,103 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Test Vault LDAP Dynamic Role CRUD and credential lifecycle using provided LDIFs. +# Assumptions: +# - You have uploaded creation.ldif, deletion.ldif, and rollback.ldif to the server. +# - Vault CLI is authenticated and VAULT_ADDR and VAULT_TOKEN are set. +# - Required ENV vars: +# PLUGIN_PATH (e.g., local-secrets-ldap) +# ROLE_NAME (e.g., adam) +# LDAP_HOST +# LDAP_PORT +# LDAP_USER_DN_TPL (e.g., uid={{username}},ou=users,dc=example,dc=com) +# LDIF_PATH (path to directory containing creation.ldif, deletion.ldif, rollback.ldif) + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$ROLE_NAME" ]] && fail "ROLE_NAME env variable has not been set" +[[ -z "$LDAP_HOST" ]] && fail "LDAP_HOST env variable has not been set" +[[ -z "$LDAP_PORT" ]] && fail "LDAP_PORT env variable has not been set" +[[ -z "$LDAP_USER_DN_TPL" ]] && fail "LDAP_USER_DN_TPL env variable has not been set" +[[ -z "$LDIF_PATH" ]] && fail "LDIF_PATH env variable has not been set" + +export VAULT_ADDR +export VAULT_TOKEN + +echo "==> Rotating root credentials" +vault write -f "${PLUGIN_PATH}/rotate-root" + +ROLE_PATH="${PLUGIN_PATH}/role/${ROLE_NAME}" + +echo "==> Creating dynamic role: ${ROLE_NAME}" +vault write "${ROLE_PATH}" \ + creation_ldif=@"${LDIF_PATH}/creation.ldif" \ + deletion_ldif=@"${LDIF_PATH}/deletion.ldif" \ + rollback_ldif=@"${LDIF_PATH}/rollback.ldif" \ + default_ttl="2m" \ + max_ttl="10m" + +echo "==> Reading dynamic role" +vault read "${ROLE_PATH}" + +echo "==> Listing dynamic roles" +vault list "${PLUGIN_PATH}/role" + +echo "==> Requesting dynamic credentials" +CRED_PATH="${PLUGIN_PATH}/creds/${ROLE_NAME}" +if ! DYNAMIC_CREDS=$(vault read -format=json "${CRED_PATH}"); then + fail "Vault read failed when requesting dynamic credentials from ${CRED_PATH}" +fi +DYN_USERNAME=$(echo "${DYNAMIC_CREDS}" | jq -r .data.username) +DYN_PASSWORD=$(echo "${DYNAMIC_CREDS}" | jq -r .data.password) +LEASE_ID=$(echo "${DYNAMIC_CREDS}" | jq -r .lease_id) +if [[ -z "${DYN_USERNAME}" || -z "${DYN_PASSWORD}" || \ +-z "${LEASE_ID}" || "${DYN_USERNAME}" == "null" || \ +"${DYN_PASSWORD}" == "null" || "${LEASE_ID}" == "null" ]]; then + fail "Invalid dynamic credentials returned: ${DYNAMIC_CREDS}" +fi +echo "==> Got dynamic username: ${DYN_USERNAME}" +echo "==> Got dynamic password: ${DYN_PASSWORD}" +echo "==> Lease ID: ${LEASE_ID}" + +# Build the DN for the dynamic user +DYN_DN=${LDAP_USER_DN_TPL/\{\{username\}\}/$DYN_USERNAME} + +echo "==> Verifying login with dynamic credentials" +if ldapwhoami -h "${LDAP_HOST}:${LDAP_PORT}" -x -w "${DYN_PASSWORD}" -D "${DYN_DN}"; then + echo "[OK] Dynamic user login succeeded." +else + echo "[ERROR] Dynamic user login failed!" + exit 1 +fi + +echo "==> Revoking dynamic credentials (deletes LDAP user)" +vault lease revoke "${LEASE_ID}" + +sleep 2 + +echo "==> Verifying dynamic user is deleted" +if ldapwhoami -h "${LDAP_HOST}:${LDAP_PORT}" -x -w "${DYN_PASSWORD}" -D "${DYN_DN}"; then + echo "[ERROR] Dynamic user still exists after lease revoke!" + exit 1 +else + echo "[OK] Dynamic user deleted as expected." +fi + +echo "==> Deleting dynamic role" +vault delete "${ROLE_PATH}" + +echo "==> Confirming dynamic role deletion" +if vault read "${ROLE_PATH}"; then + echo "[ERROR] Dynamic role still exists after deletion!" + exit 1 +else + echo "[OK] Dynamic role deleted successfully." +fi + +echo "==> Dynamic role CRUD and credential lifecycle test: SUCCESS" \ No newline at end of file diff --git a/enos/modules/dynamic_role_crud_api/variables.tf b/enos/modules/dynamic_role_crud_api/variables.tf new file mode 100644 index 0000000..a9a0456 --- /dev/null +++ b/enos/modules/dynamic_role_crud_api/variables.tf @@ -0,0 +1,53 @@ +variable "dynamic_role_ldif_templates_path" { + type = string + description = "LDIF files path" +} + +variable "hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "vault_leader_ip" { + type = string + description = "Public IP of the Vault leader node" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path for the plugin" +} + +variable "ldap_base_dn" { + type = string + description = "The common DN suffix" +} + +variable "ldap_host" { + type = string + description = "LDAP IP or hostname" +} + +variable "ldap_port" { + type = string + description = "LDAP port" +} + +variable "ldap_dynamic_user_role_name" { + type = string + description = "LDAP role name to be created" +} + +variable "vault_addr" { + type = string + description = "The Vault API address" +} + +variable "vault_root_token" { + type = string + description = "The Vault cluster root token" +} \ No newline at end of file From 0afdd1c57833649bac53e5aef81f029e234e5dea Mon Sep 17 00:00:00 2001 From: Hamza Shili <98858609+HamzaShili65@users.noreply.github.com> Date: Tue, 12 Aug 2025 10:57:50 -0700 Subject: [PATCH 10/26] Vault 37087 plugin quality test configure plugin and seal unseal vault with enos (#188) * add module to verify that vault is sealed * move plugin dir clean up and copying plugin binary out of plugin registration logic * add module references and descriptions for sealing and unsealing vault * make plugin_dir in vault dynamic * rename enos-scenario-openldap.hcl to enos-scenario-openldap-smoke.hcl * add restart scenario --- enos/enos-descriptions.hcl | 13 + enos/enos-modules.hcl | 22 + enos/enos-scenario-openldap-restart.hcl | 782 ++++++++++++++++++ ...p.hcl => enos-scenario-openldap-smoke.hcl} | 71 +- enos/modules/ec2_bootstrap_tools/main.tf | 9 +- enos/modules/setup_plugin/main.tf | 25 +- .../setup_plugin/scripts/plugin-copy.sh | 42 + .../setup_plugin/scripts/plugin-register.sh | 19 - .../vault_wait_for_cluster_sealed/main.tf | 62 ++ .../scripts/verify-vault-node-sealed.sh | 66 ++ 10 files changed, 1051 insertions(+), 60 deletions(-) create mode 100644 enos/enos-scenario-openldap-restart.hcl rename enos/{enos-scenario-openldap.hcl => enos-scenario-openldap-smoke.hcl} (96%) create mode 100644 enos/modules/setup_plugin/scripts/plugin-copy.sh create mode 100644 enos/modules/vault_wait_for_cluster_sealed/main.tf create mode 100644 enos/modules/vault_wait_for_cluster_sealed/scripts/verify-vault-node-sealed.sh diff --git a/enos/enos-descriptions.hcl b/enos/enos-descriptions.hcl index 60e7150..1c609a9 100644 --- a/enos/enos-descriptions.hcl +++ b/enos/enos-descriptions.hcl @@ -92,6 +92,11 @@ globals { Must have the 'edition' variant to be set to any Enterprise edition. EOF + restart_all_vault_nodes = <<-EOF + Restart all Vault nodes in the cluster. This is useful for testing the Vault cluster's + resilience to node restarts and ensuring that the cluster can recover and maintain its state. + EOF + static_role_crud_api = <<-EOF Tests the lifecycle of a static role via the Vault CRUD API. EOF @@ -100,6 +105,10 @@ globals { Build, register, and enable the Vault plugin. EOF + unseal_vault = <<-EOF + Unseal the Vault cluster using the configured seal mechanism. +EOF + verify_log_secrets = <<-EOF Verify that the vault audit log and systemd journal do not leak secret values. EOF @@ -109,6 +118,10 @@ globals { healthy and are voters. EOF + verify_vault_sealed = <<-EOF + Verify that the Vault cluster has successfully sealed. + EOF + verify_vault_unsealed = <<-EOF Verify that the Vault cluster has successfully unsealed. EOF diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index b1df9aa..d811cb4 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -111,6 +111,10 @@ module "seal_pkcs11" { common_tags = var.tags } +module "seal_vault" { + source = "./modules/seal_vault" +} + // Register, and enable the Vault plugin module "setup_plugin" { source = "./modules/setup_plugin" @@ -155,12 +159,30 @@ module "vault_get_cluster_ips" { vault_install_dir = var.vault_install_dir } +module "vault_unseal_replication_followers" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_unseal_replication_followers?ref=${var.vault_repo_ref}" + + vault_install_dir = var.vault_install_dir +} + +module "vault_wait_for_cluster_sealed" { + source = "./modules/vault_wait_for_cluster_sealed" + + vault_install_dir = var.vault_install_dir +} + module "vault_wait_for_cluster_unsealed" { source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_wait_for_cluster_unsealed?ref=${var.vault_repo_ref}" vault_install_dir = var.vault_install_dir } +module "verify_log_secrets" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/verify_log_secrets?ref=${var.vault_repo_ref}" + + radar_license_path = var.vault_radar_license_path != null ? abspath(var.vault_radar_license_path) : null +} + module "vault_verify_raft_auto_join_voter" { source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_verify_raft_auto_join_voter?ref=${var.vault_repo_ref}" diff --git a/enos/enos-scenario-openldap-restart.hcl b/enos/enos-scenario-openldap-restart.hcl new file mode 100644 index 0000000..909948e --- /dev/null +++ b/enos/enos-scenario-openldap-restart.hcl @@ -0,0 +1,782 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +scenario "openldap_restart" { + description = <<-EOF + The scenario verifies that the Vault OpenLDAP secrets engine plugin works correctly after a restart of the Vault cluster. + + This scenario creates a Vault cluster with the OpenLDAP secrets engine plugin installed and configured, and starts an OpenLDAP server. + It then tests the plugin by creating static and dynamic roles, verifying that they can be created, read, updated, and deleted via the Vault API. + After that, it restarts all Vault nodes and verifies that the plugin still works correctly after the restart. + + # How to run this scenario + + For general instructions on running a scenario, refer to the Enos docs: https://eng-handbook.hashicorp.services/internal-tools/enos/running-a-scenario/ + For troubleshooting tips and common errors, see https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/. + + Variables required for all scenario variants: + - aws_ssh_private_key_path (more info about AWS SSH keypairs: https://eng-handbook.hashicorp.services/internal-tools/enos/getting-started/#set-your-aws-key-pair-name-and-private-key) + - aws_ssh_keypair_name + - vault_build_date* + - vault_product_version + - vault_revision* + + * If you don't already know what build date and revision you should be using, see + https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/#execution-error-expected-vs-got-for-vault-versioneditionrevisionbuild-date. + + Variables required for some scenario variants: + - artifactory_token (if using `artifact_source:artifactory` in your filter) + - aws_region (if different from the default value in enos-variables.hcl) + - distro_version_ (if different from the default version for your target + distro. See supported distros and default versions in the distro_version_ + definitions in enos-variables.hcl) + - vault_artifact_path (the path to where you have a Vault artifact already downloaded, + if using `artifact_source:crt` in your filter) + - vault_license_path (if using an ENT edition of Vault) + EOF + + matrix { + arch = global.archs + artifact_source = global.artifact_sources + ldap_artifact_source = global.ldap_artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + distro = global.distros + edition = global.editions + ip_version = global.ip_versions + seal = global.seals + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + ldap_artifact_source = ["local"] + artifact_type = ["package"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1403. + exclude { + seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + // softhsm packages not available for leap/sles. + exclude { + seal = ["pkcs11"] + distro = ["leap", "sles"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ec2_user, + provider.enos.ubuntu + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + ldap_artifact_path = matrix.ldap_artifact_source != "artifactory" ? abspath(var.ldap_artifact_path) : null + enos_provider = { + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + } + + step "build_vault" { + description = global.description.build_vault + module = "build_vault_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc + + variables { + common_tags = global.tags + ip_version = matrix.ip_version + } + } + + step "read_vault_license" { + description = global.description.read_vault_license + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster" { + description = global.description.create_vault_cluster + module = module.vault_cluster + depends_on = [ + step.build_vault, + step.create_vault_cluster_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // verified in modules + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = null + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + } + } + + step "bootstrap_vault_cluster_targets" { + description = global.description.bootstrap_vault_cluster_targets + module = module.bootstrap_vault_cluster_targets + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + unseal_keys = step.create_vault_cluster.unseal_keys_b64 + threshold = step.create_vault_cluster.unseal_threshold + } + } + + step "get_local_metadata" { + description = global.description.get_local_metadata + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + // Wait for our cluster to elect a leader + step "wait_for_new_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster, + step.bootstrap_vault_cluster_targets] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + timeout = 120 // seconds + ip_version = matrix.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + + step "verify_vault_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.verify_vault_unsealed, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_raft_voters + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "build_ldap" { + description = global.description.build_ldap + module = "build_ldap_${matrix.ldap_artifact_source}" + + variables { + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.ldap_artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.ldap_artifact_source == "artifactory" ? var.plugin_artifactory_repo : null + artifactory_token = matrix.ldap_artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.ldap_artifact_source == "artifactory" ? matrix.arch : null + artifact_type = matrix.ldap_artifact_source == "artifactory" ? "bundle" : null + product_version = var.ldap_plugin_version + revision = var.ldap_revision + plugin_name = var.plugin_name + makefile_dir = matrix.ldap_artifact_source == "local" ? var.makefile_dir : null + plugin_dest_dir = matrix.ldap_artifact_source == "local" ? var.plugin_dest_dir : null + } + } + + step "create_ldap_server_target" { + description = global.description.create_ldap_server_target + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.ldap_tag_key + common_tags = global.tags + vpc_id = step.create_vpc.id + instance_count = 1 + } + } + + step "create_ldap_server" { + description = global.description.create_ldap_server + module = module.create_backend_server + depends_on = [step.create_ldap_server_target] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_ldap_server_target.hosts + ldap_tag = var.ldap_tag + ldap_port = global.ports.ldap.port + } + } + + step "setup_plugin" { + description = global.description.setup_plugin + module = module.setup_plugin + depends_on = [ + step.get_vault_cluster_ips, + step.create_ldap_server, + step.verify_vault_unsealed, + step.build_ldap + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.ldap_artifact_source == "artifactory" ? step.build_ldap.ldap_artifactory_release : null + release = matrix.ldap_artifact_source == "releases" ? { version = var.ldap_plugin_version, edition = "ce" } : null + hosts = step.create_vault_cluster_targets.hosts + local_artifact_path = matrix.ldap_artifact_source == "local" ? local.ldap_artifact_path : null + + + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + + plugin_name = var.plugin_name + plugin_dir_vault = var.plugin_dir_vault + plugin_mount_path = var.plugin_mount_path + } + } + + step "configure_plugin" { + description = global.description.configure_plugin + module = module.configure_plugin + depends_on = [step.setup_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_schema = var.ldap_schema + } + } + + step "test_static_role_crud_api" { + description = global.description.static_role_crud_api + module = module.static_role_crud_api + depends_on = [step.configure_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_user_role_name = var.ldap_user_role_name + ldap_username = var.ldap_username + ldap_user_old_password = var.ldap_user_old_password + } + } + + step "test_dynamic_role_crud_api" { + description = global.description.dynamic_role_crud_api + module = module.dynamic_role_crud_api + depends_on = [step.configure_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + hosts = step.create_vault_cluster_targets.hosts + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + dynamic_role_ldif_templates_path = var.dynamic_role_ldif_templates_path + ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name + } + } + + step "verify_log_secrets" { + skip_step = !var.vault_enable_audit_devices || !var.verify_log_secrets + + description = global.description.verify_log_secrets + module = module.verify_log_secrets + depends_on = [ + step.verify_vault_unsealed, + step.test_static_role_crud_api, + step.test_dynamic_role_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_audit_log_secrets, + quality.vault_journal_secrets, + quality.vault_radar_index_create, + quality.vault_radar_scan_file, + ] + + variables { + audit_log_file_path = step.create_vault_cluster.audit_device_file_path + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "restart_all_vault_nodes" { + description = global.description.restart_all_vault_nodes + module = module.restart_vault + depends_on = [ + step.get_vault_cluster_ips, + step.test_dynamic_role_crud_api, + step.verify_raft_auto_join_voter, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_vault_sealed_after_restart" { + description = global.description.verify_vault_sealed + module = module.vault_wait_for_cluster_sealed + depends_on = [ + step.restart_all_vault_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "unseal_vault" { + description = global.description.unseal_vault + module = module.vault_unseal_replication_followers + depends_on = [step.verify_vault_sealed_after_restart] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_seal_type = matrix.seal + vault_unseal_keys = step.create_vault_cluster.unseal_keys_hex + } + } + + step "verify_vault_unsealed_after_restart" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.unseal_vault] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "get_vault_cluster_ips_after_restart" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.verify_vault_unsealed_after_restart] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "test_static_role_crud_api_after_restart" { + description = global.description.static_role_crud_api + module = module.static_role_crud_api + depends_on = [step.get_vault_cluster_ips_after_restart] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips_after_restart.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_user_role_name = var.ldap_user_role_name + ldap_username = var.ldap_username + ldap_user_old_password = var.ldap_user_old_password + } + } + + step "test_dynamic_role_crud_api_after_restart" { + description = global.description.dynamic_role_crud_api + module = module.dynamic_role_crud_api + depends_on = [ + step.get_vault_cluster_ips_after_restart + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips_after_restart.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + hosts = step.create_vault_cluster_targets.hosts + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + dynamic_role_ldif_templates_path = var.dynamic_role_ldif_templates_path + ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name + } + } + + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} \ No newline at end of file diff --git a/enos/enos-scenario-openldap.hcl b/enos/enos-scenario-openldap-smoke.hcl similarity index 96% rename from enos/enos-scenario-openldap.hcl rename to enos/enos-scenario-openldap-smoke.hcl index 1c38336..f5ea9d5 100644 --- a/enos/enos-scenario-openldap.hcl +++ b/enos/enos-scenario-openldap-smoke.hcl @@ -1,15 +1,14 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -scenario "openldap" { +scenario "openldap_smoke" { description = <<-EOF The scenario deploys a Vault cluster and a test OpenLDAP server to act as the LDAP backend for integration. It enables and configures the OpenLDAP secrets engine plugin in Vault, connecting it to the deployed LDAP server, then performs plugin configuration and usage tests to verify correct integration and expected functionality of the secrets engine. This scenario validates that the Vault OpenLDAP secrets engine plugin works as expected after a fresh installation, - covering plugin setup, configuration, and end-to-end workflow testing. - //TODO: add testing for static and dynamic roles + covering plugin setup, configuration, and end-to-end workflow testing of the static roles, and dynamic roles API endpoints # How to run this scenario @@ -283,7 +282,7 @@ scenario "openldap" { } } - step "get_leader_ip" { + step "get_vault_cluster_ips" { description = global.description.get_vault_cluster_ip_addresses module = module.vault_get_cluster_ips depends_on = [step.wait_for_new_leader] @@ -359,6 +358,30 @@ scenario "openldap" { } } + step "verify_raft_auto_join_voter" { + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.verify_vault_unsealed, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_raft_voters + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + step "build_ldap" { description = global.description.build_ldap module = "build_ldap_${matrix.ldap_artifact_source}" @@ -417,7 +440,7 @@ scenario "openldap" { description = global.description.setup_plugin module = module.setup_plugin depends_on = [ - step.get_leader_ip, + step.get_vault_cluster_ips, step.create_ldap_server, step.verify_vault_unsealed, step.build_ldap @@ -434,7 +457,7 @@ scenario "openldap" { local_artifact_path = matrix.ldap_artifact_source == "local" ? local.ldap_artifact_path : null - vault_leader_ip = step.get_leader_ip.leader_host.public_ip + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip vault_addr = step.create_vault_cluster.api_addr_localhost vault_root_token = step.create_vault_cluster.root_token @@ -454,7 +477,7 @@ scenario "openldap" { } variables { - vault_leader_ip = step.get_leader_ip.leader_host.public_ip + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip vault_addr = step.create_vault_cluster.api_addr_localhost vault_root_token = step.create_vault_cluster.root_token @@ -477,7 +500,7 @@ scenario "openldap" { } variables { - vault_leader_ip = step.get_leader_ip.leader_host.public_ip + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip vault_addr = step.create_vault_cluster.api_addr_localhost vault_root_token = step.create_vault_cluster.root_token plugin_mount_path = var.plugin_mount_path @@ -494,14 +517,14 @@ scenario "openldap" { step "test_dynamic_role_crud_api" { description = global.description.dynamic_role_crud_api module = module.dynamic_role_crud_api - depends_on = [step.setup_plugin] + depends_on = [step.configure_plugin] providers = { enos = local.enos_provider[matrix.distro] } variables { - vault_leader_ip = step.get_leader_ip.leader_host.public_ip + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip vault_addr = step.create_vault_cluster.api_addr_localhost vault_root_token = step.create_vault_cluster.root_token hosts = step.create_vault_cluster_targets.hosts @@ -515,36 +538,15 @@ scenario "openldap" { } } - - step "verify_raft_auto_join_voter" { - description = global.description.verify_raft_cluster_all_nodes_are_voters - skip_step = matrix.backend != "raft" - module = module.vault_verify_raft_auto_join_voter - depends_on = [step.verify_vault_unsealed] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - verifies = quality.vault_raft_voters - - variables { - hosts = step.create_vault_cluster_targets.hosts - ip_version = matrix.ip_version - vault_addr = step.create_vault_cluster.api_addr_localhost - vault_install_dir = global.vault_install_dir[matrix.artifact_type] - vault_root_token = step.create_vault_cluster.root_token - } - } - - step "verify_log_secrets" { skip_step = !var.vault_enable_audit_devices || !var.verify_log_secrets description = global.description.verify_log_secrets module = module.verify_log_secrets depends_on = [ - step.verify_secrets_engines_read, + step.verify_vault_unsealed, + step.test_static_role_crud_api, + step.test_dynamic_role_crud_api ] providers = { @@ -566,7 +568,6 @@ scenario "openldap" { } } - output "audit_device_file_path" { description = "The file path for the file audit device, if enabled" value = step.create_vault_cluster.audit_device_file_path diff --git a/enos/modules/ec2_bootstrap_tools/main.tf b/enos/modules/ec2_bootstrap_tools/main.tf index e6c08fd..c1be6c1 100644 --- a/enos/modules/ec2_bootstrap_tools/main.tf +++ b/enos/modules/ec2_bootstrap_tools/main.tf @@ -55,10 +55,11 @@ resource "enos_remote_exec" "create_plugin_directory" { # Add plugin directory to the config file resource "enos_remote_exec" "add_plugin_directory_to_config" { + depends_on = [enos_remote_exec.create_plugin_directory] for_each = var.hosts inline = [ - "echo 'plugin_directory = \"/etc/vault/plugins\"' | sudo tee -a /etc/vault.d/vault.hcl" + "echo \"plugin_directory = \\\"${var.plugin_dir_vault}\\\"\" | sudo tee -a /etc/vault.d/vault.hcl" ] transport = { @@ -70,6 +71,8 @@ resource "enos_remote_exec" "add_plugin_directory_to_config" { # Restart Vault service on all hosts resource "enos_remote_exec" "restart_vault" { + depends_on = [enos_remote_exec.add_plugin_directory_to_config] + for_each = var.hosts inline = [ @@ -85,10 +88,10 @@ resource "enos_remote_exec" "restart_vault" { # Unseal Vault resource "enos_remote_exec" "unseal_vault" { - for_each = var.hosts - depends_on = [enos_remote_exec.restart_vault] + for_each = var.hosts + scripts = [abspath("${path.module}/scripts/vault-unseal.sh")] environment = { diff --git a/enos/modules/setup_plugin/main.tf b/enos/modules/setup_plugin/main.tf index 72ab849..9c43509 100644 --- a/enos/modules/setup_plugin/main.tf +++ b/enos/modules/setup_plugin/main.tf @@ -25,9 +25,28 @@ resource "enos_bundle_install" "ldap" { } } -# Step 2: Register the plugin -resource "enos_remote_exec" "plugin_register" { +# Step 2: Clean up the plugin directory and copy the plugin binary there +resource "enos_remote_exec" "plugin_copy" { depends_on = [enos_bundle_install.ldap] + for_each = var.hosts + scripts = [abspath("${path.module}/scripts/plugin-copy.sh")] + environment = { + PLUGIN_BINARY_SRC = "/tmp/${var.plugin_name}" + PLUGIN_DIR_VAULT = var.plugin_dir_vault + PLUGIN_NAME = var.plugin_name + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + } + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Step 3: Register the plugin +resource "enos_remote_exec" "plugin_register" { + depends_on = [enos_remote_exec.plugin_copy] scripts = [abspath("${path.module}/scripts/plugin-register.sh")] environment = { PLUGIN_BINARY_SRC = "/tmp/${var.plugin_name}" @@ -43,7 +62,7 @@ resource "enos_remote_exec" "plugin_register" { } } -# Step 3: Enable the plugin +# Step 4: Enable the plugin resource "enos_remote_exec" "plugin_enable" { depends_on = [enos_remote_exec.plugin_register] scripts = [abspath("${path.module}/scripts/plugin-enable.sh")] diff --git a/enos/modules/setup_plugin/scripts/plugin-copy.sh b/enos/modules/setup_plugin/scripts/plugin-copy.sh new file mode 100644 index 0000000..ac9aeda --- /dev/null +++ b/enos/modules/setup_plugin/scripts/plugin-copy.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +# Required ENV vars: +# PLUGIN_BINARY_SRC - Where the plugin binary is (built artifact) +# PLUGIN_DIR_VAULT - Vault's plugin directory +# PLUGIN_NAME - Name to register in Vault + +export VAULT_ADDR +export VAULT_TOKEN + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PLUGIN_BINARY_SRC" ]] && fail "PLUGIN_BINARY_SRC env variable has not been set" +[[ -z "$PLUGIN_DIR_VAULT" ]] && fail "PLUGIN_DIR_VAULT env variable has not been set" +[[ -z "$PLUGIN_NAME" ]] && fail "PLUGIN_NAME env variable has not been set" + +echo "[register] Registering plugin: $PLUGIN_NAME" + +# Determine plugin binary source path (handle directories) +if [[ -d "$PLUGIN_BINARY_SRC" ]]; then + BINARY_PATH="$PLUGIN_BINARY_SRC/$PLUGIN_NAME" +else + BINARY_PATH="$PLUGIN_BINARY_SRC" +fi + +# Ensure the Vault plugin directory exists +mkdir -p "${PLUGIN_DIR_VAULT}" + +# Clean up any previous plugin binary +sudo rm -f "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" || true + +# Copy the binary to Vault's plugin directory +sudo cp "${BINARY_PATH}" "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" + +# Set permissions to ensure Vault can execute the plugin binary +sudo chmod 755 "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" \ No newline at end of file diff --git a/enos/modules/setup_plugin/scripts/plugin-register.sh b/enos/modules/setup_plugin/scripts/plugin-register.sh index cb8910e..fed6bae 100755 --- a/enos/modules/setup_plugin/scripts/plugin-register.sh +++ b/enos/modules/setup_plugin/scripts/plugin-register.sh @@ -22,25 +22,6 @@ fail() { echo "[register] Registering plugin: $PLUGIN_NAME" -# Determine plugin binary source path (handle directories) -if [[ -d "$PLUGIN_BINARY_SRC" ]]; then - BINARY_PATH="$PLUGIN_BINARY_SRC/$PLUGIN_NAME" -else - BINARY_PATH="$PLUGIN_BINARY_SRC" -fi - -# Ensure the Vault plugin directory exists -mkdir -p "${PLUGIN_DIR_VAULT}" - -# Clean up any previous plugin binary -sudo rm -f "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" || true - -# Copy the binary to Vault's plugin directory -sudo cp "${BINARY_PATH}" "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" - -# Set permissions to ensure Vault can execute the plugin binary -sudo chmod 755 "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" - # Calculate shasum SHASUM="$(shasum -a 256 "${PLUGIN_DIR_VAULT}/${PLUGIN_NAME}" | awk '{print $1}')" if [[ -z "$SHASUM" ]]; then diff --git a/enos/modules/vault_wait_for_cluster_sealed/main.tf b/enos/modules/vault_wait_for_cluster_sealed/main.tf new file mode 100644 index 0000000..17f9e5b --- /dev/null +++ b/enos/modules/vault_wait_for_cluster_sealed/main.tf @@ -0,0 +1,62 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "hosts" { + description = "The Vault cluster instances to verify sealed" + type = map(object({ + ipv6 = string + private_ip = string + public_ip = string + })) +} + +variable "retry_interval" { + description = "Seconds to wait between retries" + type = number + default = 2 +} + +variable "timeout" { + description = "Max seconds to wait before timing out" + type = number + default = 60 +} + +variable "vault_addr" { + description = "Vault API address" + type = string +} + +variable "vault_install_dir" { + description = "Directory where the Vault binary is installed" + type = string +} + +resource "enos_remote_exec" "verify_node_sealed" { + for_each = var.hosts + + scripts = [abspath("${path.module}/scripts/verify-vault-node-sealed.sh")] + + environment = { + HOST_IPV4 = each.value.public_ip + HOST_IPV6 = each.value.ipv6 + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = var.vault_addr + VAULT_INSTALL_DIR = var.vault_install_dir + } + + transport = { + ssh = { + host = each.value.public_ip + } + } +} \ No newline at end of file diff --git a/enos/modules/vault_wait_for_cluster_sealed/scripts/verify-vault-node-sealed.sh b/enos/modules/vault_wait_for_cluster_sealed/scripts/verify-vault-node-sealed.sh new file mode 100644 index 0000000..8a600ec --- /dev/null +++ b/enos/modules/vault_wait_for_cluster_sealed/scripts/verify-vault-node-sealed.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +getStatus() { + $binpath status -format json +} + +isSealed() { + local status ret + status=$(getStatus) + ret=$? + + if [[ $ret -eq 1 ]]; then + echo "failed to get vault status" 1>&2 + return 1 + fi + + if [[ -z "$status" ]]; then + echo "vault status output empty" 1>&2 + return 1 + fi + + if [[ $ret -eq 2 ]]; then + echo "vault is sealed" 1>&2 + return 2 + fi + + echo "vault is unsealed" + return 0 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + echo "waiting for vault to be sealed..." + + if isSealed; sealed_rc=$?; [ $sealed_rc -eq 2 ]; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +if [ -n "$HOST_IPV6" ]; then + fail "timed out waiting for Vault cluster on ${HOST_IPV6} to be sealed" +fi +if [ -n "$HOST_IPV4" ]; then + fail "timed out waiting for Vault cluster on ${HOST_IPV4} to be sealed" +fi +fail "timed out waiting for Vault cluster to be sealed" From 2eeec4ba3c1ae511d3aaa0113a5456a997be388f Mon Sep 17 00:00:00 2001 From: Hamza Shili <98858609+HamzaShili65@users.noreply.github.com> Date: Tue, 12 Aug 2025 11:08:35 -0700 Subject: [PATCH 11/26] Vault 38807 plugin quality configure plugin and test library api crud with enos (#189) * add module to verify that vault is sealed * move plugin dir clean up and copying plugin binary out of plugin registration logic * add module references and descriptions for sealing and unsealing vault * make plugin_dir in vault dynamic * rename enos-scenario-openldap.hcl to enos-scenario-openldap-smoke.hcl * add restart scenario * add module for testing library endpoint api * add module refs and description for testing library crud api * add steps to test library endpoint api for both smoke and restart scenarios * add variables for the library endpoint test step * fmt --- enos/enos-descriptions.hcl | 4 + enos/enos-modules.hcl | 4 + enos/enos-scenario-openldap-restart.hcl | 55 +++++- enos/enos-scenario-openldap-smoke.hcl | 28 ++- enos/enos-variables.hcl | 12 ++ enos/modules/ec2_bootstrap_tools/main.tf | 2 +- enos/modules/library_crud_api/main.tf | 31 ++++ .../library_crud_api/scripts/library.sh | 165 ++++++++++++++++++ enos/modules/library_crud_api/variables.tf | 47 +++++ 9 files changed, 344 insertions(+), 4 deletions(-) create mode 100644 enos/modules/library_crud_api/main.tf create mode 100644 enos/modules/library_crud_api/scripts/library.sh create mode 100644 enos/modules/library_crud_api/variables.tf diff --git a/enos/enos-descriptions.hcl b/enos/enos-descriptions.hcl index 1c609a9..cf5cd00 100644 --- a/enos/enos-descriptions.hcl +++ b/enos/enos-descriptions.hcl @@ -86,6 +86,10 @@ globals { and follower nodes. EOF + library_crud_api = <<-EOF + Tests the lifecycle of a dynamic role via the Vault CRUD API. + EOF + read_vault_license = <<-EOF When deploying Vault Enterprise, ensure a Vault Enterprise license is present on disk and read its contents so that we can utilize it when configuring the Vault Enterprise cluster. diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index d811cb4..4ddc632 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -124,6 +124,10 @@ module "static_role_crud_api" { source = "./modules/static_role_crud_api" } +module "library_crud_api" { + source = "./modules/library_crud_api" +} + // create target instances using ec2:RunInstances module "target_ec2_instances" { source = "git::https://github.com/hashicorp/vault.git//enos/modules/target_ec2_instances?ref=${var.vault_repo_ref}" diff --git a/enos/enos-scenario-openldap-restart.hcl b/enos/enos-scenario-openldap-restart.hcl index 909948e..1e4ce45 100644 --- a/enos/enos-scenario-openldap-restart.hcl +++ b/enos/enos-scenario-openldap-restart.hcl @@ -260,7 +260,7 @@ scenario "openldap_restart" { description = global.description.wait_for_cluster_to_have_leader module = module.vault_wait_for_leader depends_on = [step.create_vault_cluster, - step.bootstrap_vault_cluster_targets] + step.bootstrap_vault_cluster_targets] providers = { enos = local.enos_provider[matrix.distro] @@ -537,6 +537,31 @@ scenario "openldap_restart" { } } + step "test_library_crud_api" { + description = global.description.library_crud_api + module = module.library_crud_api + depends_on = [ + step.configure_plugin, + step.test_static_role_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + library_set_name = var.library_set_name + service_account_names = var.service_account_names + } + } + step "verify_log_secrets" { skip_step = !var.vault_enable_audit_devices || !var.verify_log_secrets @@ -572,8 +597,10 @@ scenario "openldap_restart" { module = module.restart_vault depends_on = [ step.get_vault_cluster_ips, + step.test_static_role_crud_api, step.test_dynamic_role_crud_api, - step.verify_raft_auto_join_voter, + step.test_library_crud_api, + step.verify_raft_auto_join_voter ] providers = { @@ -719,6 +746,30 @@ scenario "openldap_restart" { } } + step "test_library_crud_api_after_restart" { + description = global.description.library_crud_api + module = module.library_crud_api + depends_on = [ + step.get_vault_cluster_ips_after_restart, + step.test_static_role_crud_api_after_restart + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips_after_restart.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + library_set_name = var.library_set_name + service_account_names = var.service_account_names + } + } output "audit_device_file_path" { description = "The file path for the file audit device, if enabled" diff --git a/enos/enos-scenario-openldap-smoke.hcl b/enos/enos-scenario-openldap-smoke.hcl index f5ea9d5..eaaee2f 100644 --- a/enos/enos-scenario-openldap-smoke.hcl +++ b/enos/enos-scenario-openldap-smoke.hcl @@ -538,6 +538,31 @@ scenario "openldap_smoke" { } } + step "test_library_crud_api" { + description = global.description.library_crud_api + module = module.library_crud_api + depends_on = [ + step.configure_plugin, + step.test_static_role_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + library_set_name = var.library_set_name + service_account_names = var.service_account_names + } + } + step "verify_log_secrets" { skip_step = !var.vault_enable_audit_devices || !var.verify_log_secrets @@ -546,7 +571,8 @@ scenario "openldap_smoke" { depends_on = [ step.verify_vault_unsealed, step.test_static_role_crud_api, - step.test_dynamic_role_crud_api + step.test_dynamic_role_crud_api, + step.test_library_crud_api ] providers = { diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl index e88f74f..a178293 100644 --- a/enos/enos-variables.hcl +++ b/enos/enos-variables.hcl @@ -139,6 +139,12 @@ variable "ldap_user_role_name" { default = "mary" } +variable "library_set_name" { + description = "The name of the library set to use for library CRUD API tests" + type = string + default = "dev-team" +} + variable "makefile_dir" { description = "Directory containing the Makefile for plugin build" type = string @@ -181,6 +187,12 @@ variable "project_name" { default = "vault-plugin-secrets-openldap-enos-integration" } +variable "service_account_names" { + description = "List of service account names to create for library CRUD API tests" + type = list(string) + default = ["staticuser", "bob.johnson", "mary.smith"] +} + variable "tags" { description = "Tags that will be applied to infrastructure resources that support tagging" type = map(string) diff --git a/enos/modules/ec2_bootstrap_tools/main.tf b/enos/modules/ec2_bootstrap_tools/main.tf index c1be6c1..d2339b3 100644 --- a/enos/modules/ec2_bootstrap_tools/main.tf +++ b/enos/modules/ec2_bootstrap_tools/main.tf @@ -56,7 +56,7 @@ resource "enos_remote_exec" "create_plugin_directory" { # Add plugin directory to the config file resource "enos_remote_exec" "add_plugin_directory_to_config" { depends_on = [enos_remote_exec.create_plugin_directory] - for_each = var.hosts + for_each = var.hosts inline = [ "echo \"plugin_directory = \\\"${var.plugin_dir_vault}\\\"\" | sudo tee -a /etc/vault.d/vault.hcl" diff --git a/enos/modules/library_crud_api/main.tf b/enos/modules/library_crud_api/main.tf new file mode 100644 index 0000000..fe179f8 --- /dev/null +++ b/enos/modules/library_crud_api/main.tf @@ -0,0 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +resource "enos_remote_exec" "library_crud_api_test" { + scripts = ["${path.module}/scripts/library.sh"] + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + PLUGIN_PATH = var.plugin_mount_path + LDAP_HOST = var.ldap_host + LDAP_PORT = var.ldap_port + LDAP_BASE_DN = var.ldap_base_dn + LIBRARY_SET_NAME = var.library_set_name + SERVICE_ACCOUNT_NAMES = join(",", var.service_account_names) + } + + transport = { + ssh = { + host = var.vault_leader_ip + } + } +} diff --git a/enos/modules/library_crud_api/scripts/library.sh b/enos/modules/library_crud_api/scripts/library.sh new file mode 100644 index 0000000..42915ec --- /dev/null +++ b/enos/modules/library_crud_api/scripts/library.sh @@ -0,0 +1,165 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +# Required environment variables +[[ -z "$PLUGIN_PATH" ]] && fail "PLUGIN_PATH env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$LDAP_HOST" ]] && fail "LDAP_HOST env variable has not been set" +[[ -z "$LDAP_PORT" ]] && fail "LDAP_PORT env variable has not been set" +[[ -z "$LDAP_BASE_DN" ]] && fail "LDAP_BASE_DN env variable has not been set" +[[ -z "$LIBRARY_SET_NAME" ]] && fail "LIBRARY_SET_NAME env variable has not been set" +[[ -z "$SERVICE_ACCOUNT_NAMES" ]] && fail "SERVICE_ACCOUNT_NAMES env variable has not been set" + +export VAULT_ADDR +export VAULT_TOKEN + +LIB_PATH="${PLUGIN_PATH}/library/${LIBRARY_SET_NAME}" +STATUS_PATH="${LIB_PATH}/status" +CHECKOUT_PATH="${LIB_PATH}/check-out" +CHECKIN_PATH="${LIB_PATH}/check-in" +MANAGE_CHECKIN_PATH="${PLUGIN_PATH}/library/manage/${LIBRARY_SET_NAME}/check-in" + +# Verify SERVICE_ACCOUNT_NAMES parsing +IFS=',' read -r -a SA_LIST <<< "$SERVICE_ACCOUNT_NAMES" +if [[ ${#SA_LIST[@]} -lt 1 ]]; then + fail "SERVICE_ACCOUNT_NAMES must contain at least one account" +fi + +# Rotate root credentials +echo "==> Rotating root credentials" +vault write -f "${PLUGIN_PATH}/rotate-root" + +# Create library set +echo "==> Creating library set ${LIBRARY_SET_NAME}" +vault write "${LIB_PATH}" \ + service_account_names="${SERVICE_ACCOUNT_NAMES}" \ + ttl="1h" \ + max_ttl="2h" \ + disable_check_in_enforcement=false + +# Read library set +echo "==> Reading library set" +vault read "${LIB_PATH}" + +# List all library sets and verify ours is present +echo "==> Verifying library set appears in list" +LIST_OUTPUT=$(vault list "${PLUGIN_PATH}/library" 2>/dev/null) +echo "$LIST_OUTPUT" | grep -x "${LIBRARY_SET_NAME}" >/dev/null || fail "Library set '${LIBRARY_SET_NAME}' not found in list" + +# Check status +echo "==> Checking library set status" +vault read "${STATUS_PATH}" + +# Check out a service account +echo "==> Checking out a service account" +CRED_JSON=$(vault write -format=json "${CHECKOUT_PATH}" ttl="30m") +SA_NAME=$(echo "$CRED_JSON" | jq -r .data.service_account_name) +SA_PW=$(echo "$CRED_JSON" | jq -r .data.password) +LEASE_ID=$(echo "$CRED_JSON" | jq -r .lease_id) + +# Validate checkout output +if [[ -z "$SA_NAME" || "$SA_NAME" == "null" ]]; then + fail "No service_account_name returned from check-out" +fi +if [[ -z "$SA_PW" || "$SA_PW" == "null" ]]; then + fail "No password returned from check-out" +fi +if [[ -z "$LEASE_ID" || "$LEASE_ID" == "null" ]]; then + fail "No lease_id returned from check-out" +fi + +# Attempt second check-out should fail +echo "==> Verifying no second check-out is allowed" +if vault write -format=json "${CHECKOUT_PATH}" 2>/dev/null; then + fail "Unexpectedly succeeded second check-out: account wasn't exclusive" +else + echo "[OK] Second check-out is correctly unavailable" +fi + +# Status after checkout +echo "==> Status after check-out" +vault read "${STATUS_PATH}" + +# Renew the lease +echo "==> Renewing lease ${LEASE_ID}" +RENEW_JSON=$(vault lease renew -format=json "${LEASE_ID}") +RENEW_TTL=$(echo "$RENEW_JSON" | jq -r .lease_duration) +if [[ -z "$RENEW_TTL" || "$RENEW_TTL" == "null" ]]; then + fail "Lease renew failed: no lease_duration returned" +fi +# Revoke the lease (auto check-in) +echo "==> Revoking lease ${LEASE_ID} to auto check-in" +vault lease revoke "${LEASE_ID}" +sleep 2 +# Verify account available after revoke +echo "==> Verifying account is available after lease revoke" +POST_REVOKE_AVAIL=$(vault read -format=json "${STATUS_PATH}" | jq -r ".data[\"$SA_NAME\"].available") +if [[ "$POST_REVOKE_AVAIL" != "true" ]]; then + fail "Account '$SA_NAME' should be available after lease revoke" +fi +# Attempt check-in on already available account (should succeed with empty check_ins) +echo "==> Checking in already available account (expect no check_ins)" +CI_JSON=$(vault write -format=json "${CHECKIN_PATH}" service_account_names="${SA_NAME}") +CI_COUNT=$(echo "$CI_JSON" | jq -r '.data.check_ins | length') +if [[ "$CI_COUNT" -ne 0 ]]; then + fail "Expected 0 check_ins when checking in an already available account, got $CI_COUNT" +fi + +# Check the account back in +echo "==> Checking in ${SA_NAME}" +vault write "${CHECKIN_PATH}" service_account_names="${SA_NAME}" + +# Status after check-in +echo "==> Status after check-in" +vault read "${STATUS_PATH}" + +# Force check-in of all accounts +echo "==> Forcing manage-level check-in of all accounts" +vault write "${MANAGE_CHECKIN_PATH}" service_account_names="${SERVICE_ACCOUNT_NAMES}" + +# After force check-in, verify both accounts available +echo "==> Checking status after manage-level check-in" +STATUS_AFTER_MANAGE=$(vault read -format=json "${STATUS_PATH}") +for acct in "${SA_LIST[@]}"; do + avail=$(echo "$STATUS_AFTER_MANAGE" | jq -r ".data[\"$acct\"].available") + if [[ "$avail" != "true" ]]; then + fail "Account '$acct' should be available after manage-level check-in" + fi +done + +# Test TTL expiry automatic check-in +echo "==> Testing TTL expiry automatic check-in" +TTL_TEST_JSON=$(vault write -format=json "${CHECKOUT_PATH}" ttl="10s") +TTL_NAME=$(echo "$TTL_TEST_JSON" | jq -r .data.service_account_name) +echo "Checked out ${TTL_NAME} with 10s TTL, waiting 12s" +sleep 12 +POST_TTL_AVAIL=$(vault read -format=json "${STATUS_PATH}" | jq -r ".data[\"$TTL_NAME\"].available") +if [[ "$POST_TTL_AVAIL" != "true" ]]; then + fail "Account '$TTL_NAME' should be available after TTL expiry" +fi + +# Delete library set +echo "==> Deleting library set" +vault delete "${LIB_PATH}" + +# Confirm deletion and absence from list +echo "==> Confirming deletion" +if vault read "${LIB_PATH}" 2>/dev/null; then + fail "Library set still exists after deletion!" +else + echo "[OK] Library set deleted successfully." +fi +LIST_AFTER_DEL=$(vault list "${PLUGIN_PATH}/library" 2>/dev/null || true) + +# Ensure the set no longer appears +if echo "$LIST_AFTER_DEL" | grep -x "${LIBRARY_SET_NAME}" >/dev/null; then + fail "Library set '${LIBRARY_SET_NAME}' still in list after deletion" +fi diff --git a/enos/modules/library_crud_api/variables.tf b/enos/modules/library_crud_api/variables.tf new file mode 100644 index 0000000..92ac875 --- /dev/null +++ b/enos/modules/library_crud_api/variables.tf @@ -0,0 +1,47 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "vault_leader_ip" { + type = string + description = "Public IP of the Vault leader node" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path for the plugin" +} + +variable "ldap_host" { + type = string + description = "The LDAP server host" +} + +variable "ldap_port" { + type = string + description = "The LDAP server port" +} + +variable "ldap_base_dn" { + type = string + description = "The common DN suffix (e.g., dc=example,dc=com)" +} + +variable "library_set_name" { + type = string + description = "Name of the LDAP library set to create" +} + +variable "service_account_names" { + type = list(string) + description = "List of service account UIDs (under ou=users) for the library set" +} + +variable "vault_addr" { + type = string + description = "The Vault API address" +} + +variable "vault_root_token" { + type = string + description = "The Vault cluster root token" +} From ed377fe5ffcc97c030c1fbb94928fa1e9f36b18d Mon Sep 17 00:00:00 2001 From: Hamza Shili <98858609+HamzaShili65@users.noreply.github.com> Date: Mon, 18 Aug 2025 07:35:37 -0700 Subject: [PATCH 12/26] Vault 38809 plugin quality test configure plugin and vault leader change with enos (#191) * add scenario for leader change case * add modules refs, descriptions, and qualities for leader change case --- enos/enos-descriptions.hcl | 7 +- enos/enos-modules.hcl | 6 + enos/enos-qualities.hcl | 11 + enos/enos-scenario-openldap-leader-change.hcl | 827 ++++++++++++++++++ 4 files changed, 850 insertions(+), 1 deletion(-) create mode 100644 enos/enos-scenario-openldap-leader-change.hcl diff --git a/enos/enos-descriptions.hcl b/enos/enos-descriptions.hcl index cf5cd00..e1597de 100644 --- a/enos/enos-descriptions.hcl +++ b/enos/enos-descriptions.hcl @@ -111,7 +111,12 @@ globals { unseal_vault = <<-EOF Unseal the Vault cluster using the configured seal mechanism. -EOF + EOF + + vault_leader_step_down = <<-EOF + Force the Vault cluster leader to step down which forces the Vault cluster to perform a leader + election. + EOF verify_log_secrets = <<-EOF Verify that the vault audit log and systemd journal do not leak secret values. diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index 4ddc632..868cf9d 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -163,6 +163,12 @@ module "vault_get_cluster_ips" { vault_install_dir = var.vault_install_dir } +module "vault_step_down" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_step_down?ref=${var.vault_repo_ref}" + + vault_install_dir = var.vault_install_dir +} + module "vault_unseal_replication_followers" { source = "git::https://github.com/hashicorp/vault.git//enos/modules/vault_unseal_replication_followers?ref=${var.vault_repo_ref}" diff --git a/enos/enos-qualities.hcl b/enos/enos-qualities.hcl index 6731a6b..6ca45e9 100644 --- a/enos/enos-qualities.hcl +++ b/enos/enos-qualities.hcl @@ -41,6 +41,13 @@ quality "vault_api_sys_seal_status_api_read_matches_sys_health" { EOF } +quality "vault_api_sys_step_down_steps_down" { + description = <<-EOF + The v1/sys/step-down Vault API forces the cluster leader to step down and intiates a new leader + election + EOF +} + quality "vault_api_sys_storage_raft_autopilot_configuration_read" { description = <<-EOF The /sys/storage/raft/autopilot/configuration Vault API returns the autopilot configuration of @@ -112,6 +119,10 @@ quality "vault_cli_operator_members" { description = "The 'vault operator members' command returns the expected list of members" } +quality "vault_cli_operator_step_down" { + description = "The 'vault operator step-down' command forces the cluster leader to step down" +} + quality "vault_cli_status_exit_code" { description = <<-EOF The 'vault status' command exits with the correct code depending on expected seal status diff --git a/enos/enos-scenario-openldap-leader-change.hcl b/enos/enos-scenario-openldap-leader-change.hcl new file mode 100644 index 0000000..e1c32b3 --- /dev/null +++ b/enos/enos-scenario-openldap-leader-change.hcl @@ -0,0 +1,827 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +scenario "openldap_leader_change" { + description = <<-EOF + The scenario verifies that the Vault OpenLDAP secrets engine plugin works correctly after a leader change. + + This scenario creates a Vault cluster with the OpenLDAP secrets engine plugin installed and configured, and starts an OpenLDAP server. + It then tests the plugin by creating static and dynamic roles, verifying that they can be created, read, updated, and deleted via the Vault API. + After that, it forces a Vault leader stepdown followed by a leader election and verifies that the plugin still works correctly after the leader change + + # How to run this scenario + + For general instructions on running a scenario, refer to the Enos docs: https://eng-handbook.hashicorp.services/internal-tools/enos/running-a-scenario/ + For troubleshooting tips and common errors, see https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/. + + Variables required for all scenario variants: + - aws_ssh_private_key_path (more info about AWS SSH keypairs: https://eng-handbook.hashicorp.services/internal-tools/enos/getting-started/#set-your-aws-key-pair-name-and-private-key) + - aws_ssh_keypair_name + - vault_build_date* + - vault_product_version + - vault_revision* + + * If you don't already know what build date and revision you should be using, see + https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/#execution-error-expected-vs-got-for-vault-versioneditionrevisionbuild-date. + + Variables required for some scenario variants: + - artifactory_token (if using `artifact_source:artifactory` in your filter) + - aws_region (if different from the default value in enos-variables.hcl) + - distro_version_ (if different from the default version for your target + distro. See supported distros and default versions in the distro_version_ + definitions in enos-variables.hcl) + - vault_artifact_path (the path to where you have a Vault artifact already downloaded, + if using `artifact_source:crt` in your filter) + - vault_license_path (if using an ENT edition of Vault) + EOF + + matrix { + arch = global.archs + artifact_source = global.artifact_sources + ldap_artifact_source = global.ldap_artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + distro = global.distros + edition = global.editions + ip_version = global.ip_versions + seal = global.seals + + // Our local builder always creates bundles + exclude { + artifact_source = ["local"] + ldap_artifact_source = ["local"] + artifact_type = ["package"] + } + + // PKCS#11 can only be used on ent.hsm and ent.hsm.fips1403. + exclude { + seal = ["pkcs11"] + edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] + } + + // softhsm packages not available for leap/sles. + exclude { + seal = ["pkcs11"] + distro = ["leap", "sles"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ec2_user, + provider.enos.ubuntu + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + ldap_artifact_path = matrix.ldap_artifact_source != "artifactory" ? abspath(var.ldap_artifact_path) : null + enos_provider = { + amzn = provider.enos.ec2_user + leap = provider.enos.ec2_user + rhel = provider.enos.ec2_user + sles = provider.enos.ec2_user + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + } + + step "build_vault" { + description = global.description.build_vault + module = "build_vault_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + description = global.description.ec2_info + module = module.ec2_info + } + + step "create_vpc" { + description = global.description.create_vpc + module = module.create_vpc + + variables { + common_tags = global.tags + ip_version = matrix.ip_version + } + } + + step "read_vault_license" { + description = global.description.read_vault_license + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_seal_key" { + description = global.description.create_seal_key + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = global.description.create_vault_cluster_targets + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster" { + description = global.description.create_vault_cluster + module = module.vault_cluster + depends_on = [ + step.build_vault, + step.create_vault_cluster_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + // verified in modules + quality.vault_artifact_bundle, + quality.vault_artifact_deb, + quality.vault_artifact_rpm, + quality.vault_audit_log, + quality.vault_audit_socket, + quality.vault_audit_syslog, + quality.vault_autojoin_aws, + quality.vault_config_env_variables, + quality.vault_config_file, + quality.vault_config_log_level, + quality.vault_init, + quality.vault_license_required_ent, + quality.vault_listener_ipv4, + quality.vault_listener_ipv6, + quality.vault_service_start, + quality.vault_storage_backend_raft, + // verified in enos_vault_start resource + quality.vault_api_sys_config_read, + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_health_read, + quality.vault_api_sys_host_info_read, + quality.vault_api_sys_replication_status_read, + quality.vault_api_sys_seal_status_api_read_matches_sys_health, + quality.vault_api_sys_storage_raft_autopilot_configuration_read, + quality.vault_api_sys_storage_raft_autopilot_state_read, + quality.vault_api_sys_storage_raft_configuration_read, + quality.vault_cli_status_exit_code, + quality.vault_service_systemd_notified, + quality.vault_service_systemd_unit, + ] + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = null + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + enable_audit_devices = var.vault_enable_audit_devices + hosts = step.create_vault_cluster_targets.hosts + install_dir = global.vault_install_dir[matrix.artifact_type] + ip_version = matrix.ip_version + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro][global.distro_version[matrix.distro]]) + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + } + } + + step "bootstrap_vault_cluster_targets" { + description = global.description.bootstrap_vault_cluster_targets + module = module.bootstrap_vault_cluster_targets + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + unseal_keys = step.create_vault_cluster.unseal_keys_b64 + threshold = step.create_vault_cluster.unseal_threshold + } + } + + step "get_local_metadata" { + description = global.description.get_local_metadata + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + // Wait for our cluster to elect a leader + step "wait_for_new_leader" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster, + step.bootstrap_vault_cluster_targets] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_unseal_ha_leader_election, + ] + + variables { + timeout = 120 // seconds + ip_version = matrix.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + + step "verify_vault_unsealed" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "verify_vault_version" { + description = global.description.verify_vault_version + module = module.vault_verify_version + depends_on = [step.verify_vault_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_version_history_keys, + quality.vault_api_sys_version_history_key_info, + quality.vault_version_build_date, + quality.vault_version_edition, + quality.vault_version_release, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_edition = matrix.edition + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + description = global.description.verify_raft_cluster_all_nodes_are_voters + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.verify_vault_unsealed, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = quality.vault_raft_voters + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "build_ldap" { + description = global.description.build_ldap + module = "build_ldap_${matrix.ldap_artifact_source}" + + variables { + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.ldap_artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.ldap_artifact_source == "artifactory" ? var.plugin_artifactory_repo : null + artifactory_token = matrix.ldap_artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.ldap_artifact_source == "artifactory" ? matrix.arch : null + artifact_type = matrix.ldap_artifact_source == "artifactory" ? "bundle" : null + product_version = var.ldap_plugin_version + revision = var.ldap_revision + plugin_name = var.plugin_name + makefile_dir = matrix.ldap_artifact_source == "local" ? var.makefile_dir : null + plugin_dest_dir = matrix.ldap_artifact_source == "local" ? var.plugin_dest_dir : null + } + } + + step "create_ldap_server_target" { + description = global.description.create_ldap_server_target + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.ldap_tag_key + common_tags = global.tags + vpc_id = step.create_vpc.id + instance_count = 1 + } + } + + step "create_ldap_server" { + description = global.description.create_ldap_server + module = module.create_backend_server + depends_on = [step.create_ldap_server_target] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + hosts = step.create_ldap_server_target.hosts + ldap_tag = var.ldap_tag + ldap_port = global.ports.ldap.port + } + } + + step "setup_plugin" { + description = global.description.setup_plugin + module = module.setup_plugin + depends_on = [ + step.get_vault_cluster_ips, + step.create_ldap_server, + step.verify_vault_unsealed, + step.build_ldap + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.ldap_artifact_source == "artifactory" ? step.build_ldap.ldap_artifactory_release : null + release = matrix.ldap_artifact_source == "releases" ? { version = var.ldap_plugin_version, edition = "ce" } : null + hosts = step.create_vault_cluster_targets.hosts + local_artifact_path = matrix.ldap_artifact_source == "local" ? local.ldap_artifact_path : null + + + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + + plugin_name = var.plugin_name + plugin_dir_vault = var.plugin_dir_vault + plugin_mount_path = var.plugin_mount_path + } + } + + step "configure_plugin" { + description = global.description.configure_plugin + module = module.configure_plugin + depends_on = [step.setup_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_schema = var.ldap_schema + } + } + + step "test_static_role_crud_api" { + description = global.description.static_role_crud_api + module = module.static_role_crud_api + depends_on = [step.configure_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_user_role_name = var.ldap_user_role_name + ldap_username = var.ldap_username + ldap_user_old_password = var.ldap_user_old_password + } + } + + step "test_dynamic_role_crud_api" { + description = global.description.dynamic_role_crud_api + module = module.dynamic_role_crud_api + depends_on = [step.configure_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + hosts = step.create_vault_cluster_targets.hosts + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + dynamic_role_ldif_templates_path = var.dynamic_role_ldif_templates_path + ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name + } + } + + step "test_library_crud_api" { + description = global.description.library_crud_api + module = module.library_crud_api + depends_on = [ + step.configure_plugin, + step.test_static_role_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + library_set_name = var.library_set_name + service_account_names = var.service_account_names + } + } + + step "verify_log_secrets" { + skip_step = !var.vault_enable_audit_devices || !var.verify_log_secrets + + description = global.description.verify_log_secrets + module = module.verify_log_secrets + depends_on = [ + step.verify_vault_unsealed, + step.test_static_role_crud_api, + step.test_dynamic_role_crud_api, + step.test_library_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_audit_log_secrets, + quality.vault_journal_secrets, + quality.vault_radar_index_create, + quality.vault_radar_scan_file, + ] + + variables { + audit_log_file_path = step.create_vault_cluster.audit_device_file_path + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Force a step down to trigger a new leader election + step "vault_leader_step_down" { + description = global.description.vault_leader_step_down + module = module.vault_step_down + depends_on = [ + step.get_vault_cluster_ips, + step.test_static_role_crud_api, + step.test_dynamic_role_crud_api, + step.test_library_crud_api + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_step_down_steps_down, + quality.vault_cli_operator_step_down, + ] + + variables { + leader_host = step.get_vault_cluster_ips.leader_host + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader_after_step_down" { + description = global.description.wait_for_cluster_to_have_leader + module = module.vault_wait_for_leader + depends_on = [step.vault_leader_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_step_down, + ] + + variables { + timeout = 120 // seconds + ip_version = matrix.ip_version + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips_after_step_down" { + description = global.description.get_vault_cluster_ip_addresses + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader_after_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_api_sys_ha_status_read, + quality.vault_api_sys_leader_read, + quality.vault_cli_operator_members, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + ip_version = matrix.ip_version + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed_after_step_down" { + description = global.description.verify_vault_unsealed + module = module.vault_wait_for_cluster_unsealed + depends_on = [step.wait_for_leader_after_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + verifies = [ + quality.vault_seal_awskms, + quality.vault_seal_pkcs11, + quality.vault_seal_shamir, + ] + + variables { + hosts = step.create_vault_cluster_targets.hosts + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_install_dir = global.vault_install_dir[matrix.artifact_type] + } + } + + step "test_static_role_crud_api_after_step_down" { + description = global.description.static_role_crud_api + module = module.static_role_crud_api + depends_on = [step.verify_vault_unsealed_after_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips_after_step_down.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + ldap_bind_pass = var.ldap_bind_pass + ldap_user_role_name = var.ldap_user_role_name + ldap_username = var.ldap_username + ldap_user_old_password = var.ldap_user_old_password + } + } + + step "test_dynamic_role_crud_api_after_step_down" { + description = global.description.dynamic_role_crud_api + module = module.dynamic_role_crud_api + depends_on = [step.verify_vault_unsealed_after_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips_after_step_down.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + hosts = step.create_vault_cluster_targets.hosts + + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + dynamic_role_ldif_templates_path = var.dynamic_role_ldif_templates_path + ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name + } + } + + step "test_library_crud_api_after_step_down" { + description = global.description.library_crud_api + module = module.library_crud_api + depends_on = [ + step.verify_vault_unsealed_after_step_down, + step.test_static_role_crud_api_after_step_down + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips_after_step_down.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + ldap_host = step.create_ldap_server.ldap_ip_address + ldap_port = step.create_ldap_server.ldap_port + ldap_base_dn = var.ldap_base_dn + library_set_name = var.library_set_name + service_account_names = var.service_account_names + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} From 3c43c3beaaabd6d1d52e324b8f11b9f7477d5a5f Mon Sep 17 00:00:00 2001 From: Hamza Shili <98858609+HamzaShili65@users.noreply.github.com> Date: Wed, 20 Aug 2025 16:51:11 -0500 Subject: [PATCH 13/26] Vault 38808 plugin quality test configure plugin and test config endpoint root rotation (#193) * add scenario for leader change case * add modules refs, descriptions, and qualities for leader change case * add tf module for testing ldap secrets engine manual root_rotation * add tf module for testing ldap secrets engine periodic root_rotation * add tf module for testing ldap secrets engine scheduled root_rotation * add setup for integrating root rotation modules * fmt * takeout root rotation from scripts * integrate root rotation modules with smoke scenario --- enos/enos-descriptions.hcl | 4 + enos/enos-globals.hcl | 13 +-- enos/enos-modules.hcl | 12 +++ enos/enos-scenario-openldap-leader-change.hcl | 4 +- enos/enos-scenario-openldap-smoke.hcl | 53 ++++++++--- enos/enos-variables.hcl | 18 ++++ .../scripts/dynamic-role.sh | 3 - .../library_crud_api/scripts/library.sh | 4 - enos/modules/root_rotation_manual/main.tf | 30 +++++++ .../scripts/test-root-rotation-manual.sh | 46 ++++++++++ .../modules/root_rotation_manual/variables.tf | 22 +++++ enos/modules/root_rotation_period/main.tf | 27 ++++++ .../scripts/test-root-rotation-period.sh | 80 +++++++++++++++++ .../modules/root_rotation_period/variables.tf | 27 ++++++ enos/modules/root_rotation_schedule/main.tf | 27 ++++++ .../scripts/test-root-rotation-schedule.sh | 89 +++++++++++++++++++ .../root_rotation_schedule/variables.tf | 33 +++++++ .../scripts/static-role.sh | 3 - 18 files changed, 464 insertions(+), 31 deletions(-) create mode 100644 enos/modules/root_rotation_manual/main.tf create mode 100755 enos/modules/root_rotation_manual/scripts/test-root-rotation-manual.sh create mode 100644 enos/modules/root_rotation_manual/variables.tf create mode 100644 enos/modules/root_rotation_period/main.tf create mode 100755 enos/modules/root_rotation_period/scripts/test-root-rotation-period.sh create mode 100644 enos/modules/root_rotation_period/variables.tf create mode 100644 enos/modules/root_rotation_schedule/main.tf create mode 100755 enos/modules/root_rotation_schedule/scripts/test-root-rotation-schedule.sh create mode 100644 enos/modules/root_rotation_schedule/variables.tf diff --git a/enos/enos-descriptions.hcl b/enos/enos-descriptions.hcl index e1597de..e99ed00 100644 --- a/enos/enos-descriptions.hcl +++ b/enos/enos-descriptions.hcl @@ -109,6 +109,10 @@ globals { Build, register, and enable the Vault plugin. EOF + ldap_config_root_rotation = <<-EOF + Test the LDAP secrets engine's config endpoint root rotation functionality. + EOF + unseal_vault = <<-EOF Unseal the Vault cluster using the configured seal mechanism. EOF diff --git a/enos/enos-globals.hcl b/enos/enos-globals.hcl index 0504052..67fa2f5 100644 --- a/enos/enos-globals.hcl +++ b/enos/enos-globals.hcl @@ -2,12 +2,13 @@ // SPDX-License-Identifier: MPL-2.0 globals { - archs = ["amd64", "arm64"] - artifact_sources = ["local", "crt", "artifactory"] - ldap_artifact_sources = ["local", "releases", "artifactory"] - artifact_types = ["bundle", "package"] - backends = ["raft"] - backend_tag_key = "VaultStorage" + archs = ["amd64", "arm64"] + artifact_sources = ["local", "crt", "artifactory"] + ldap_artifact_sources = ["local", "releases", "artifactory"] + ldap_config_root_rotation_methods = ["period", "schedule", "manual"] + artifact_types = ["bundle", "package"] + backends = ["raft"] + backend_tag_key = "VaultStorage" build_tags = { "ce" = ["ui"] "ent" = ["ui", "enterprise", "ent"] diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index 868cf9d..50e923d 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -90,6 +90,18 @@ module "restart_vault" { vault_install_dir = var.vault_install_dir } +module "root_rotation_period" { + source = "./modules/root_rotation_period" +} + +module "root_rotation_schedule" { + source = "./modules/root_rotation_schedule" +} + +module "root_rotation_manual" { + source = "./modules/root_rotation_manual" +} + module "seal_awskms" { source = "git::https://github.com/hashicorp/vault.git//enos/modules/seal_awskms?ref=${var.vault_repo_ref}" diff --git a/enos/enos-scenario-openldap-leader-change.hcl b/enos/enos-scenario-openldap-leader-change.hcl index e1c32b3..706e361 100644 --- a/enos/enos-scenario-openldap-leader-change.hcl +++ b/enos/enos-scenario-openldap-leader-change.hcl @@ -260,7 +260,7 @@ scenario "openldap_leader_change" { description = global.description.wait_for_cluster_to_have_leader module = module.vault_wait_for_leader depends_on = [step.create_vault_cluster, - step.bootstrap_vault_cluster_targets] + step.bootstrap_vault_cluster_targets] providers = { enos = local.enos_provider[matrix.distro] @@ -597,7 +597,7 @@ scenario "openldap_leader_change" { step "vault_leader_step_down" { description = global.description.vault_leader_step_down module = module.vault_step_down - depends_on = [ + depends_on = [ step.get_vault_cluster_ips, step.test_static_role_crud_api, step.test_dynamic_role_crud_api, diff --git a/enos/enos-scenario-openldap-smoke.hcl b/enos/enos-scenario-openldap-smoke.hcl index eaaee2f..92e4d48 100644 --- a/enos/enos-scenario-openldap-smoke.hcl +++ b/enos/enos-scenario-openldap-smoke.hcl @@ -37,16 +37,17 @@ scenario "openldap_smoke" { EOF matrix { - arch = global.archs - artifact_source = global.artifact_sources - ldap_artifact_source = global.ldap_artifact_sources - artifact_type = global.artifact_types - backend = global.backends - config_mode = global.config_modes - distro = global.distros - edition = global.editions - ip_version = global.ip_versions - seal = global.seals + arch = global.archs + artifact_source = global.artifact_sources + ldap_artifact_source = global.ldap_artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + distro = global.distros + edition = global.editions + ip_version = global.ip_versions + seal = global.seals + ldap_config_root_rotation_method = global.ldap_config_root_rotation_methods // Our local builder always creates bundles exclude { @@ -66,6 +67,12 @@ scenario "openldap_smoke" { seal = ["pkcs11"] distro = ["leap", "sles"] } + + // rotation manager capabilities not supported in Vault community edition + exclude { + edition = ["ce"] + ldap_config_root_rotation_method = ["period", "schedule"] + } } terraform_cli = terraform_cli.default @@ -490,10 +497,30 @@ scenario "openldap_smoke" { } } + step "test_ldap_config_root_rotation" { + description = global.description.ldap_config_root_rotation + module = "root_rotation_${matrix.ldap_config_root_rotation_method}" + depends_on = [step.configure_plugin] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_leader_ip = step.get_vault_cluster_ips.leader_host.public_ip + vault_addr = step.create_vault_cluster.api_addr_localhost + vault_root_token = step.create_vault_cluster.root_token + plugin_mount_path = var.plugin_mount_path + + rotation_period = matrix.ldap_config_root_rotation_method == "period" ? var.ldap_rotation_period : null + rotation_window = matrix.ldap_config_root_rotation_method == "schedule" ? var.ldap_rotation_window : null + } + } + step "test_static_role_crud_api" { description = global.description.static_role_crud_api module = module.static_role_crud_api - depends_on = [step.configure_plugin] + depends_on = [step.test_ldap_config_root_rotation] providers = { enos = local.enos_provider[matrix.distro] @@ -517,7 +544,7 @@ scenario "openldap_smoke" { step "test_dynamic_role_crud_api" { description = global.description.dynamic_role_crud_api module = module.dynamic_role_crud_api - depends_on = [step.configure_plugin] + depends_on = [step.test_ldap_config_root_rotation] providers = { enos = local.enos_provider[matrix.distro] @@ -542,7 +569,7 @@ scenario "openldap_smoke" { description = global.description.library_crud_api module = module.library_crud_api depends_on = [ - step.configure_plugin, + step.test_ldap_config_root_rotation, step.test_static_role_crud_api ] diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl index a178293..ba54bfa 100644 --- a/enos/enos-variables.hcl +++ b/enos/enos-variables.hcl @@ -91,6 +91,12 @@ variable "ldap_bind_pass" { default = null } +variable "ldap_disable_automated_rotation" { + type = bool + default = false + description = "Enterprise: cancel upcoming rotations until unset" +} + variable "ldap_dynamic_user_role_name" { description = "The name of the LDAP dynamic user role to create" type = string @@ -109,6 +115,18 @@ variable "ldap_revision" { default = null } +variable "ldap_rotation_period" { + type = number + default = 0 + description = "Enterprise: time in seconds before rotating the LDAP secret engine root credential. 0 disables rotation" +} + +variable "ldap_rotation_window" { + type = number + default = 0 + description = "Enterprise: max time in seconds to complete scheduled rotation" +} + variable "ldap_schema" { description = "LDAP schema type" type = string diff --git a/enos/modules/dynamic_role_crud_api/scripts/dynamic-role.sh b/enos/modules/dynamic_role_crud_api/scripts/dynamic-role.sh index b2c326a..fa6d58d 100755 --- a/enos/modules/dynamic_role_crud_api/scripts/dynamic-role.sh +++ b/enos/modules/dynamic_role_crud_api/scripts/dynamic-role.sh @@ -29,9 +29,6 @@ fail() { export VAULT_ADDR export VAULT_TOKEN -echo "==> Rotating root credentials" -vault write -f "${PLUGIN_PATH}/rotate-root" - ROLE_PATH="${PLUGIN_PATH}/role/${ROLE_NAME}" echo "==> Creating dynamic role: ${ROLE_NAME}" diff --git a/enos/modules/library_crud_api/scripts/library.sh b/enos/modules/library_crud_api/scripts/library.sh index 42915ec..f4f8144 100644 --- a/enos/modules/library_crud_api/scripts/library.sh +++ b/enos/modules/library_crud_api/scripts/library.sh @@ -33,10 +33,6 @@ if [[ ${#SA_LIST[@]} -lt 1 ]]; then fail "SERVICE_ACCOUNT_NAMES must contain at least one account" fi -# Rotate root credentials -echo "==> Rotating root credentials" -vault write -f "${PLUGIN_PATH}/rotate-root" - # Create library set echo "==> Creating library set ${LIBRARY_SET_NAME}" vault write "${LIB_PATH}" \ diff --git a/enos/modules/root_rotation_manual/main.tf b/enos/modules/root_rotation_manual/main.tf new file mode 100644 index 0000000..bbac5b0 --- /dev/null +++ b/enos/modules/root_rotation_manual/main.tf @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "rotation_period" { default = null } +variable "rotation_window" { default = null } + +resource "enos_remote_exec" "root_rotation_manual_test" { + scripts = [abspath("${path.module}/scripts/test-root-rotation-manual.sh")] + + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + PLUGIN_PATH = var.plugin_mount_path + } + + transport = { + ssh = { + host = var.vault_leader_ip + } + } +} + diff --git a/enos/modules/root_rotation_manual/scripts/test-root-rotation-manual.sh b/enos/modules/root_rotation_manual/scripts/test-root-rotation-manual.sh new file mode 100755 index 0000000..e1c30e3 --- /dev/null +++ b/enos/modules/root_rotation_manual/scripts/test-root-rotation-manual.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -euo pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +PLUGIN_PATH=local-secrets-ldap + +# Required env vars: PLUGIN_PATH +if [[ -z "${PLUGIN_PATH:-}" ]]; then + fail "PLUGIN_PATH env variable has not been set" +fi + +# Configure plugin for manual rotation +vault write -format=json "${PLUGIN_PATH}/config" \ + disable_automated_rotation=true \ + rotation_period=0 \ + rotation_schedule="" \ + rotation_window=0 >/dev/null + +# Read disable_automated_rotation from config +disable_automated_rotation=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.disable_automated_rotation') + +# Validate disable_automated_rotation +if [[ "$disable_automated_rotation" != "true" ]]; then + fail "[ERROR] Expected rotation_schedule=true, got $disable_automated_rotation" +fi + +# Read pre-rotation timestamp +before=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.last_bind_password_rotation') + +# Trigger manual rotation +vault write -format=json -f "${PLUGIN_PATH}/rotate-root" >/dev/null + +# Read post-rotation timestamp after a brief pause +after=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.last_bind_password_rotation') + +if [[ "$after" == "$before" ]]; then + fail "[ERROR] Manual rotation failed: timestamp did not change (before=$before, after=$after)" +fi + +echo "[OK] Manual rotation succeeded: timestamp updated (before=$before, after=$after)" diff --git a/enos/modules/root_rotation_manual/variables.tf b/enos/modules/root_rotation_manual/variables.tf new file mode 100644 index 0000000..0cf21c6 --- /dev/null +++ b/enos/modules/root_rotation_manual/variables.tf @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "vault_addr" { + type = string + description = "Vault API address" +} + +variable "vault_root_token" { + type = string + description = "Vault cluster root token" +} + +variable "vault_leader_ip" { + type = string + description = "SSH host/IP of Vault leader for remote exec" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path of the LDAP plugin in Vault" +} diff --git a/enos/modules/root_rotation_period/main.tf b/enos/modules/root_rotation_period/main.tf new file mode 100644 index 0000000..1dbfd65 --- /dev/null +++ b/enos/modules/root_rotation_period/main.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "rotation_window" { default = null } + +resource "enos_remote_exec" "root_rotation_period_test" { + scripts = [abspath("${path.module}/scripts/test-root-rotation-period.sh")] + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + PLUGIN_PATH = var.plugin_mount_path + ROTATION_PERIOD = var.rotation_period + } + transport = { + ssh = { + host = var.vault_leader_ip + } + } +} diff --git a/enos/modules/root_rotation_period/scripts/test-root-rotation-period.sh b/enos/modules/root_rotation_period/scripts/test-root-rotation-period.sh new file mode 100755 index 0000000..78c0005 --- /dev/null +++ b/enos/modules/root_rotation_period/scripts/test-root-rotation-period.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -euo pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +# Required env vars: PLUGIN_PATH, ROTATION_PERIOD +if [[ -z "${PLUGIN_PATH:-}" ]]; then fail "PLUGIN_PATH not set"; fi +if [[ -z "${ROTATION_PERIOD:-}" ]]; then fail "ROTATION_PERIOD not set"; fi + +# Configure plugin for rotation period +vault write -format=json "${PLUGIN_PATH}/config" \ + disable_automated_rotation=false \ + rotation_period="${ROTATION_PERIOD}" \ + rotation_schedule="" \ + rotation_window=0 >/dev/null + +# Add cross-platform parse_epoch helper +parse_epoch() { + python3 -c " +import sys, datetime, re +ts = sys.argv[1] +if ts == 'null': + print(0) + sys.exit(0) +# Remove Z and handle nanoseconds +if ts.endswith('Z'): + ts = ts[:-1] +match = re.match(r'(.*\.\d{6})\d*(.*)', ts) +if match: + ts = match.group(1) + match.group(2) +dt = datetime.datetime.fromisoformat(ts) +print(int(dt.timestamp())) +" "$1" +} + +# Read rotation_period from config +rotation_period=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.rotation_period') + +# Validate rotation_period +if [[ "$rotation_period" != "$ROTATION_PERIOD" ]]; then + fail "[ERROR] Expected rotation_period=$ROTATION_PERIOD, got $rotation_period" +fi + +# Read timestamp before rotation +before=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.last_bind_password_rotation') + +# Convert to epoch +before_epoch=$(parse_epoch "$before") + +# Wait for rotation_period + 1 seconds +echo "==> Sleeping for $((ROTATION_PERIOD + 1)) seconds for automated rotation" +sleep $((ROTATION_PERIOD + 1)) + +# Read timestamp after rotation +after=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.last_bind_password_rotation') + +after_epoch=$(parse_epoch "$after") + +# Assert a rotation occurred +if [[ "$before" == "null" ]]; then + echo "[INFO] No previous rotation timestamp found (before=null), first rotation expected." +fi +if [[ "$after" == "null" ]]; then + fail "[ERROR] No rotation occurred, after=null" +fi + +# Compute difference +diff=$((after_epoch - before_epoch)) +if [[ "$diff" -lt "$ROTATION_PERIOD" ]]; then + fail "[ERROR] Automated rotation did not occur: delta $diff < $ROTATION_PERIOD" +fi + +#final check: + +echo "[OK] Automated rotation succeeded: delta $diff >= $ROTATION_PERIOD" diff --git a/enos/modules/root_rotation_period/variables.tf b/enos/modules/root_rotation_period/variables.tf new file mode 100644 index 0000000..3434a88 --- /dev/null +++ b/enos/modules/root_rotation_period/variables.tf @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "vault_addr" { + type = string + description = "Vault API address" +} + +variable "vault_root_token" { + type = string + description = "Vault cluster root token" +} + +variable "vault_leader_ip" { + type = string + description = "SSH host/IP of Vault leader for remote exec" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path of the LDAP plugin in Vault" +} + +variable "rotation_period" { + type = number + description = "Automated rotation period in seconds for the LDAP root credentials" +} diff --git a/enos/modules/root_rotation_schedule/main.tf b/enos/modules/root_rotation_schedule/main.tf new file mode 100644 index 0000000..c4e7ad4 --- /dev/null +++ b/enos/modules/root_rotation_schedule/main.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "rotation_period" { default = null } + +resource "enos_remote_exec" "root_rotation_schedule_test" { + scripts = [abspath("${path.module}/scripts/test-root-rotation-schedule.sh")] + environment = { + VAULT_ADDR = var.vault_addr + VAULT_TOKEN = var.vault_root_token + PLUGIN_PATH = var.plugin_mount_path + ROTATION_WINDOW = var.rotation_window + } + transport = { + ssh = { + host = var.vault_leader_ip + } + } +} diff --git a/enos/modules/root_rotation_schedule/scripts/test-root-rotation-schedule.sh b/enos/modules/root_rotation_schedule/scripts/test-root-rotation-schedule.sh new file mode 100755 index 0000000..122b022 --- /dev/null +++ b/enos/modules/root_rotation_schedule/scripts/test-root-rotation-schedule.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +set -euo pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +# Required env vars: PLUGIN_PATH, ROTATION_WINDOW +if [[ -z "${PLUGIN_PATH:-}" ]]; then fail "PLUGIN_PATH not set"; fi +if [[ -z "${ROTATION_WINDOW:-}" ]]; then fail "ROTATION_WINDOW not set"; fi + +# Compute cron schedule one minute from now +schedule=$(python3 - < Using cron schedule: $schedule" + +# Configure plugin for schedule-based rotation +vault write -format=json "${PLUGIN_PATH}/config" \ + disable_automated_rotation=false \ + rotation_schedule="$schedule" \ + rotation_window="${ROTATION_WINDOW}" \ + rotation_period=0 >/dev/null + +# Read rotation_schedule from config +rotation_schedule=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.rotation_schedule') + +# Validate rotation_schedule +if [[ "$rotation_schedule" != "$schedule" ]]; then + fail "[ERROR] Expected rotation_schedule=$schedule, got $rotation_schedule" +fi + +# Read rotation_window from config +rotation_window=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.rotation_window') + +# Validate rotation_window +if [[ "$rotation_window" != "$ROTATION_WINDOW" ]]; then + fail "[ERROR] Expected rotation_period=$ROTATION_WINDOW, got $rotation_window" +fi + +# Cross-platform parse_epoch helper +parse_epoch() { + python3 -c " +import sys, datetime, re +ts = sys.argv[1] +if ts == 'null': + print(0) + sys.exit(0) +# Remove Z and handle nanoseconds +if ts.endswith('Z'): + ts = ts[:-1] +match = re.match(r'(.*\.\d{6})\d*(.*)', ts) +if match: + ts = match.group(1) + match.group(2) +dt = datetime.datetime.fromisoformat(ts) +print(int(dt.timestamp())) +" "$1" +} + +# Read timestamp before window expiration +before=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.last_bind_password_rotation') +before_epoch=$(parse_epoch "$before") + +sleep 61 # Wait for the cron job to trigger + +# Read timestamp after window expiration +after=$(vault read -format=json "${PLUGIN_PATH}/config" | jq -r '.data.last_bind_password_rotation') +after_epoch=$(parse_epoch "$after") + +# Assert a rotation occurred +if [[ "$before" == "null" ]]; then + echo "[INFO] No previous rotation timestamp found (before=null), first rotation expected." +fi +if [[ "$after" == "null" ]]; then + fail "[ERROR] No rotation occurred, after=null" +fi + +diff=$((after_epoch - before_epoch)) +if [[ "$diff" -eq 0 ]]; then + fail "[ERROR] No rotation occurred at $after" +fi + +echo "[OK] Rotation occurred at $after" diff --git a/enos/modules/root_rotation_schedule/variables.tf b/enos/modules/root_rotation_schedule/variables.tf new file mode 100644 index 0000000..033f7de --- /dev/null +++ b/enos/modules/root_rotation_schedule/variables.tf @@ -0,0 +1,33 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +variable "vault_addr" { + type = string + description = "Vault API address" +} + +variable "vault_root_token" { + type = string + description = "Vault cluster root token" +} + +variable "vault_leader_ip" { + type = string + description = "SSH host/IP of Vault leader for remote exec" +} + +variable "plugin_mount_path" { + type = string + description = "Mount path of the LDAP plugin in Vault" +} + +variable "rotation_window" { + type = number + description = "Maximum time in seconds allowed to complete a scheduled rotation" + default = 3600 + + validation { + condition = var.rotation_window >= 3600 + error_message = "rotation_window must be at least 3600 seconds (1 hour)." + } +} \ No newline at end of file diff --git a/enos/modules/static_role_crud_api/scripts/static-role.sh b/enos/modules/static_role_crud_api/scripts/static-role.sh index 0e15567..9c92d7e 100644 --- a/enos/modules/static_role_crud_api/scripts/static-role.sh +++ b/enos/modules/static_role_crud_api/scripts/static-role.sh @@ -39,9 +39,6 @@ CRED_PATH="${PLUGIN_PATH}/static-cred/${ROLE_NAME}" echo "==> LDAP_HOST: ${LDAP_HOST}" echo "==> LDAP_PORT: ${LDAP_PORT}" -echo "==> Rotating root credentials" -vault write -f "${PLUGIN_PATH}/rotate-root" - echo "==> Creating static role ${ROLE_NAME}" vault write "${ROLE_PATH}" \ dn="${LDAP_DN}" \ From c9472b2ca11817bf08057f31e147ce16612fc879 Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Thu, 21 Aug 2025 16:44:11 -0500 Subject: [PATCH 14/26] swap docker with podman --- bootstrap/setup-docker.sh | 55 ------------------- bootstrap/setup-openldap.sh | 23 ++++++-- enos/modules/backend_servers_setup/main.tf | 19 +++---- .../backend_servers_setup/variables.tf | 22 ++++++-- .../scripts/install-shasum.sh | 49 ----------------- 5 files changed, 43 insertions(+), 125 deletions(-) delete mode 100755 bootstrap/setup-docker.sh delete mode 100644 enos/modules/ec2_bootstrap_tools/scripts/install-shasum.sh diff --git a/bootstrap/setup-docker.sh b/bootstrap/setup-docker.sh deleted file mode 100755 index 90e1954..0000000 --- a/bootstrap/setup-docker.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -set -e - -# Function to check if Docker is already installed -check_docker_installed() { - if command -v docker &> /dev/null; then - echo "Docker is already installed: $(docker --version)" - exit 0 - fi -} - -# Function to detect the OS -detect_os() { - if [ -f /etc/os-release ]; then - # shellcheck disable=SC1091 - . /etc/os-release - echo "$ID" - else - echo "Unknown OS: /etc/os-release not found" - fi -} - -# Main logic -check_docker_installed - -os_id=$(detect_os) -echo "Installing Docker for: ${os_id}" -case "$os_id" in - amzn) - sudo dnf upgrade --refresh -y - sudo dnf install -y docker - ;; - ubuntu) - sudo apt update -y - sudo apt install apt-transport-https ca-certificates curl software-properties-common -y - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg - echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - sudo apt update - sudo apt install docker-ce docker-ce-cli containerd.io -y - ;; - rhel | centos) - sudo yum update -y - sudo yum install -y docker - ;; - *) - echo "Unsupported or unknown OS: $os_id" - exit 1 - ;; -esac - -echo "Successfully installed Docker." -sudo docker --version \ No newline at end of file diff --git a/bootstrap/setup-openldap.sh b/bootstrap/setup-openldap.sh index 35e16ce..7b8e326 100755 --- a/bootstrap/setup-openldap.sh +++ b/bootstrap/setup-openldap.sh @@ -18,14 +18,25 @@ fail() { LDAP_HOSTNAME="${LDAP_HOSTNAME:-openldap}" +# Determine container runtime: prefer podman if installed, allow override via CONTAINER_RUNTIME +if [[ -n "$CONTAINER_RUNTIME" ]]; then + RUNTIME="$CONTAINER_RUNTIME" +elif command -v podman >/dev/null 2>&1; then + RUNTIME="sudo podman" +else + RUNTIME="sudo docker" +fi + +echo "Using container runtime: $RUNTIME" + # Pulling image echo "Pulling image: ${LDAP_DOCKER_NAME}" LDAP_DOCKER_NAME="docker.io/osixia/openldap:${IMAGE_TAG}" -docker pull "${LDAP_DOCKER_NAME}" +${RUNTIME} pull "${LDAP_DOCKER_NAME}" # Run OpenLDAP container echo "Starting OpenLDAP container..." -docker run -d \ +${RUNTIME} run -d \ --name openldap \ --hostname "${LDAP_HOSTNAME}" \ -p "${LDAP_PORT}:${LDAP_PORT}" \ @@ -35,21 +46,21 @@ docker run -d \ -e LDAP_ADMIN_PASSWORD="${LDAP_ADMIN_PW}" \ "${LDAP_DOCKER_NAME}" -echo "OpenLDAP server is now running in Docker!" +echo "OpenLDAP server is now running in container!" # Wait for the container to be up and running echo "Waiting for OpenLDAP to start..." sleep 5 # Check container status -status=$(docker ps --filter name=openldap --format "{{.Status}}") +status=$(${RUNTIME} ps --filter name=openldap --format "{{.Status}}") if [[ -n "$status" ]]; then echo "OpenLDAP container is running. Status: $status" else echo "OpenLDAP container is NOT running!" - echo "Check logs with: docker logs openldap" + echo "Check logs with: ${RUNTIME} logs openldap" exit 1 fi # Run ldapadd inside the container -docker exec -i openldap ldapadd -x -w "${LDAP_ADMIN_PW}" -D "cn=admin,dc=${LDAP_DOMAIN//./,dc=}" -f /dev/stdin < "${LDIF_PATH}" \ No newline at end of file +${RUNTIME} exec -i openldap ldapadd -x -w "${LDAP_ADMIN_PW}" -D "cn=admin,dc=${LDAP_DOMAIN//./,dc=}" -f /dev/stdin < "${LDIF_PATH}" diff --git a/enos/modules/backend_servers_setup/main.tf b/enos/modules/backend_servers_setup/main.tf index 29f5c5f..eec62db 100644 --- a/enos/modules/backend_servers_setup/main.tf +++ b/enos/modules/backend_servers_setup/main.tf @@ -15,27 +15,24 @@ locals { org = "example" admin_pw = "adminpassword" tag = var.ldap_tag - port = tostring(var.ldap_port) + port = var.ports.ldap.port + secure_port = var.ports.ldaps.port ip_address = var.hosts[0].public_ip private_ip = var.hosts[0].private_ip } ldif_path = "/tmp/seed.ldif" } -# Step 1: Install Docker -resource "enos_remote_exec" "setup_docker" { - scripts = [abspath("${path.module}/../../../bootstrap/setup-docker.sh")] - - transport = { - ssh = { - host = local.ldap_server.ip_address - } - } +# Step 1: We run install_packages +module "install_packages" { + source = "git::https://github.com/hashicorp/vault.git//enos/modules/install_packages" + hosts = var.hosts + packages = var.packages } # Step 2: Copy LDIF file for seeding LDAP resource "enos_file" "seed_ldif" { - depends_on = [enos_remote_exec.setup_docker] + depends_on = [module.install_packages] source = abspath("${path.module}/../../../bootstrap/ldif/seed.ldif") destination = local.ldif_path diff --git a/enos/modules/backend_servers_setup/variables.tf b/enos/modules/backend_servers_setup/variables.tf index 4fe4194..af1c2e3 100644 --- a/enos/modules/backend_servers_setup/variables.tf +++ b/enos/modules/backend_servers_setup/variables.tf @@ -16,8 +16,22 @@ variable "ldap_tag" { default = "1.5.0" } -variable "ldap_port" { - type = number - description = "OpenLDAP Server Port" - default = 389 +variable "ports" { + description = "Port configuration for services" + type = map(object({ + port = string + description = string + })) +} + +variable "packages" { + type = list(string) + description = "A list of packages to install via the target host package manager" + default = [] +} + +variable "vault_repo_ref" { + type = string + description = "The reference to use for the Vault repository" + default = "main" } \ No newline at end of file diff --git a/enos/modules/ec2_bootstrap_tools/scripts/install-shasum.sh b/enos/modules/ec2_bootstrap_tools/scripts/install-shasum.sh deleted file mode 100644 index 19b7396..0000000 --- a/enos/modules/ec2_bootstrap_tools/scripts/install-shasum.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -set -e - -# Function to detect the OS -detect_os() { - if [ -f /etc/os-release ]; then - # shellcheck disable=SC1091 - . /etc/os-release - echo "$ID" - else - echo "unknown" - fi -} - -# Function to install shasum or sha1sum -install_shasum() { - OS_ID=$(detect_os) - - case "$OS_ID" in - ubuntu|debian) - sudo apt-get update - sudo apt-get install -y perl - ;; - amzn|amazon) - sudo yum install -y perl-Digest-SHA - ;; - rhel|centos|fedora) - sudo yum install -y perl-Digest-SHA - ;; - alpine) - sudo apk add --no-cache perl - ;; - *) - echo "Unsupported OS: $OS_ID" - exit 1 - ;; - esac - - # Verify installation - if ! command -v shasum >/dev/null 2>&1 && ! command -v sha1sum >/dev/null 2>&1; then - echo "Failed to install shasum or sha1sum" - exit 1 - fi -} - -install_shasum \ No newline at end of file From 8b26e27807ae35845d638343bcff5ecbdc37e1df Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Thu, 21 Aug 2025 16:44:41 -0500 Subject: [PATCH 15/26] remove unused make targets --- enos/Makefile | 4 ---- 1 file changed, 4 deletions(-) diff --git a/enos/Makefile b/enos/Makefile index 24c66eb..bff493f 100644 --- a/enos/Makefile +++ b/enos/Makefile @@ -19,10 +19,6 @@ fmt-enos: enos fmt . enos fmt ./k8s -.PHONY: gen-enos -gen-enos: - pushd ../tools/pipeline &> /dev/null && go run ./... generate enos-dynamic-config -d ../../enos -f enos-dynamic-config.hcl -e ce -v $(VAULT_VERSION) -n 3 --log info && popd &> /dev/null - .PHONY: check-fmt-modules check-fmt-modules: terraform fmt -check -diff -recursive ./modules From 988f55dbb83eeea1a61b04ea4c71be0c08f68888 Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Thu, 21 Aug 2025 16:45:32 -0500 Subject: [PATCH 16/26] keep only amzn and ubuntu distros --- enos/enos-globals.hcl | 37 +++++++++++-------------------------- 1 file changed, 11 insertions(+), 26 deletions(-) diff --git a/enos/enos-globals.hcl b/enos/enos-globals.hcl index 67fa2f5..ac2e0bc 100644 --- a/enos/enos-globals.hcl +++ b/enos/enos-globals.hcl @@ -2,7 +2,7 @@ // SPDX-License-Identifier: MPL-2.0 globals { - archs = ["amd64", "arm64"] + archs = ["amd64"] artifact_sources = ["local", "crt", "artifactory"] ldap_artifact_sources = ["local", "releases", "artifactory"] ldap_config_root_rotation_methods = ["period", "schedule", "manual"] @@ -17,37 +17,20 @@ globals { "ent.hsm.fips1403" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_3", "ent.hsm.fips1403"] } config_modes = ["env", "file"] - distros = ["amzn", "leap", "rhel", "sles", "ubuntu"] + distros = ["amzn", "ubuntu"] // Different distros may require different packages, or use different aliases for the same package distro_packages = { amzn = { - "2" = ["nc"] - "2023" = ["nc"] - } - leap = { - "15.6" = ["netcat", "openssl"] - } - rhel = { - "8.10" = ["nc"] - "9.5" = ["nc"] - } - sles = { - // When installing Vault RPM packages on a SLES AMI, the openssl package provided - // isn't named "openssl, which rpm doesn't know how to handle. Therefore we add the - // "correctly" named one in our package installation before installing Vault. - "15.6" = ["netcat-openbsd", "openssl"] + "2" = ["nc", "openldap-clients", "perl-Digest-SHA"] + "2023" = ["nc", "openldap-clients", "perl-Digest-SHA"] } ubuntu = { - "20.04" = ["netcat"] - "22.04" = ["netcat"] - "24.04" = ["netcat-openbsd"] + "22.04" = ["netcat", "ldap-utils", "perl"] + "24.04" = ["netcat-openbsd", "ldap-utils", "perl"] } } distro_version = { amzn = var.distro_version_amzn - leap = var.distro_version_leap - rhel = var.distro_version_rhel - sles = var.distro_version_sles ubuntu = var.distro_version_ubuntu } editions = ["ce", "ent", "ent.fips1403", "ent.hsm", "ent.hsm.fips1403"] @@ -55,9 +38,6 @@ globals { ip_versions = ["4", "6"] package_manager = { "amzn" = "yum" - "leap" = "zypper" - "rhel" = "yum" - "sles" = "zypper" "ubuntu" = "apt" } packages = ["jq"] @@ -74,6 +54,11 @@ globals { port = 389 protocol = "tcp" }, + ldaps : { + description = "LDAPS" + port = 636 + protocol = "tcp" + }, vault_agent : { description = "Vault Agent" port = 8100 From a3dc87c6a92adf61a09e38fc267fd5827c812afe Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Thu, 21 Aug 2025 16:46:16 -0500 Subject: [PATCH 17/26] use package_install module --- enos/modules/ec2_bootstrap_tools/main.tf | 29 ++---------------------- 1 file changed, 2 insertions(+), 27 deletions(-) diff --git a/enos/modules/ec2_bootstrap_tools/main.tf b/enos/modules/ec2_bootstrap_tools/main.tf index d2339b3..eb6e6c4 100644 --- a/enos/modules/ec2_bootstrap_tools/main.tf +++ b/enos/modules/ec2_bootstrap_tools/main.tf @@ -9,33 +9,6 @@ terraform { } } -# Install Shasum on EC2 targets -resource "enos_remote_exec" "install-shasum" { - for_each = var.hosts - scripts = [abspath("${path.module}/scripts/install-shasum.sh")] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - -# Install OpenLDAP clients on EC2 targets -resource "enos_remote_exec" "install-openldap-clients" { - for_each = var.hosts - - inline = [ - "sudo yum install -y openldap-clients" - ] - - transport = { - ssh = { - host = each.value.public_ip - } - } -} - # Ensure the Vault plugin directory exists resource "enos_remote_exec" "create_plugin_directory" { for_each = var.hosts @@ -53,6 +26,8 @@ resource "enos_remote_exec" "create_plugin_directory" { } } +#TODO: In the future, we should use the plugin_directory attribute in enos_vault_start resource when supported. + # Add plugin directory to the config file resource "enos_remote_exec" "add_plugin_directory_to_config" { depends_on = [enos_remote_exec.create_plugin_directory] From 4ab2233bc2de0aab864810ff56c3faa306fd9701 Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Thu, 21 Aug 2025 16:46:48 -0500 Subject: [PATCH 18/26] integrate tf module changes with scenarios --- enos/enos-scenario-openldap-leader-change.hcl | 43 +++++++++++++------ enos/enos-scenario-openldap-restart.hcl | 43 +++++++++++++------ enos/enos-scenario-openldap-smoke.hcl | 37 ++++++++++++---- 3 files changed, 90 insertions(+), 33 deletions(-) diff --git a/enos/enos-scenario-openldap-leader-change.hcl b/enos/enos-scenario-openldap-leader-change.hcl index 706e361..c9f85e5 100644 --- a/enos/enos-scenario-openldap-leader-change.hcl +++ b/enos/enos-scenario-openldap-leader-change.hcl @@ -20,6 +20,20 @@ scenario "openldap_leader_change" { - vault_build_date* - vault_product_version - vault_revision* + - ldap_revision* + - plugin_name + - plugin_dir_vault + - ldap_bind_pass + - ldap_schema + - ldap_tag + - ldap_base_dn + - ldap_user_role_name + - ldap_username + - ldap_user_old_password + - ldap_dynamic_user_role_name + - ldap_dynamic_role_ldif_templates_path + - ldap_library_set_name + - ldap_service_account_names * If you don't already know what build date and revision you should be using, see https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/#execution-error-expected-vs-got-for-vault-versioneditionrevisionbuild-date. @@ -33,6 +47,10 @@ scenario "openldap_leader_change" { - vault_artifact_path (the path to where you have a Vault artifact already downloaded, if using `artifact_source:crt` in your filter) - vault_license_path (if using an ENT edition of Vault) + - ldap_plugin_version (if using `ldap_artifact_source:releases` or `ldap_artifact_source:artifactory` in your filter) + - ldap_artifactory_repo (if using `ldap_artifact_source:artifactory` in your filter) + - ldap_rotation_period (if using `ldap_config_root_rotation_method:period` in your filter) + - ldap_rotation_window (if using `ldap_config_root_rotation_method:schedule` in your filter) EOF matrix { @@ -407,11 +425,11 @@ scenario "openldap_leader_change" { depends_on = [step.create_vpc] providers = { - enos = local.enos_provider[matrix.distro] + enos = local.enos_provider["ubuntu"] } variables { - ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["24.04"] cluster_tag_key = global.ldap_tag_key common_tags = global.tags vpc_id = step.create_vpc.id @@ -425,13 +443,14 @@ scenario "openldap_leader_change" { depends_on = [step.create_ldap_server_target] providers = { - enos = local.enos_provider[matrix.distro] + enos = local.enos_provider["ubuntu"] } variables { - hosts = step.create_ldap_server_target.hosts - ldap_tag = var.ldap_tag - ldap_port = global.ports.ldap.port + hosts = step.create_ldap_server_target.hosts + ldap_tag = var.ldap_tag + packages = concat(global.packages, global.distro_packages["ubuntu"]["24.04"], ["podman", "podman-docker"]) + ports = global.ports } } @@ -532,7 +551,7 @@ scenario "openldap_leader_change" { ldap_host = step.create_ldap_server.ldap_ip_address ldap_port = step.create_ldap_server.ldap_port ldap_base_dn = var.ldap_base_dn - dynamic_role_ldif_templates_path = var.dynamic_role_ldif_templates_path + dynamic_role_ldif_templates_path = var.ldap_dynamic_role_ldif_templates_path ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name } } @@ -557,8 +576,8 @@ scenario "openldap_leader_change" { ldap_host = step.create_ldap_server.ldap_ip_address ldap_port = step.create_ldap_server.ldap_port ldap_base_dn = var.ldap_base_dn - library_set_name = var.library_set_name - service_account_names = var.service_account_names + library_set_name = var.ldap_library_set_name + service_account_names = var.ldap_service_account_names } } @@ -735,7 +754,7 @@ scenario "openldap_leader_change" { ldap_host = step.create_ldap_server.ldap_ip_address ldap_port = step.create_ldap_server.ldap_port ldap_base_dn = var.ldap_base_dn - dynamic_role_ldif_templates_path = var.dynamic_role_ldif_templates_path + dynamic_role_ldif_templates_path = var.ldap_dynamic_role_ldif_templates_path ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name } } @@ -760,8 +779,8 @@ scenario "openldap_leader_change" { ldap_host = step.create_ldap_server.ldap_ip_address ldap_port = step.create_ldap_server.ldap_port ldap_base_dn = var.ldap_base_dn - library_set_name = var.library_set_name - service_account_names = var.service_account_names + library_set_name = var.ldap_library_set_name + service_account_names = var.ldap_service_account_names } } diff --git a/enos/enos-scenario-openldap-restart.hcl b/enos/enos-scenario-openldap-restart.hcl index 1e4ce45..d6ae191 100644 --- a/enos/enos-scenario-openldap-restart.hcl +++ b/enos/enos-scenario-openldap-restart.hcl @@ -20,6 +20,20 @@ scenario "openldap_restart" { - vault_build_date* - vault_product_version - vault_revision* + - ldap_revision* + - plugin_name + - plugin_dir_vault + - ldap_bind_pass + - ldap_schema + - ldap_tag + - ldap_base_dn + - ldap_user_role_name + - ldap_username + - ldap_user_old_password + - ldap_dynamic_user_role_name + - ldap_dynamic_role_ldif_templates_path + - ldap_library_set_name + - ldap_service_account_names * If you don't already know what build date and revision you should be using, see https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/#execution-error-expected-vs-got-for-vault-versioneditionrevisionbuild-date. @@ -33,6 +47,10 @@ scenario "openldap_restart" { - vault_artifact_path (the path to where you have a Vault artifact already downloaded, if using `artifact_source:crt` in your filter) - vault_license_path (if using an ENT edition of Vault) + - ldap_plugin_version (if using `ldap_artifact_source:releases` or `ldap_artifact_source:artifactory` in your filter) + - ldap_artifactory_repo (if using `ldap_artifact_source:artifactory` in your filter) + - ldap_rotation_period (if using `ldap_config_root_rotation_method:period` in your filter) + - ldap_rotation_window (if using `ldap_config_root_rotation_method:schedule` in your filter) EOF matrix { @@ -407,11 +425,11 @@ scenario "openldap_restart" { depends_on = [step.create_vpc] providers = { - enos = local.enos_provider[matrix.distro] + enos = local.enos_provider["ubuntu"] } variables { - ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["24.04"] cluster_tag_key = global.ldap_tag_key common_tags = global.tags vpc_id = step.create_vpc.id @@ -425,13 +443,14 @@ scenario "openldap_restart" { depends_on = [step.create_ldap_server_target] providers = { - enos = local.enos_provider[matrix.distro] + enos = local.enos_provider["ubuntu"] } variables { - hosts = step.create_ldap_server_target.hosts - ldap_tag = var.ldap_tag - ldap_port = global.ports.ldap.port + hosts = step.create_ldap_server_target.hosts + ldap_tag = var.ldap_tag + packages = concat(global.packages, global.distro_packages["ubuntu"]["24.04"], ["podman", "podman-docker"]) + ports = global.ports } } @@ -532,7 +551,7 @@ scenario "openldap_restart" { ldap_host = step.create_ldap_server.ldap_ip_address ldap_port = step.create_ldap_server.ldap_port ldap_base_dn = var.ldap_base_dn - dynamic_role_ldif_templates_path = var.dynamic_role_ldif_templates_path + dynamic_role_ldif_templates_path = var.ldap_dynamic_role_ldif_templates_path ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name } } @@ -557,8 +576,8 @@ scenario "openldap_restart" { ldap_host = step.create_ldap_server.ldap_ip_address ldap_port = step.create_ldap_server.ldap_port ldap_base_dn = var.ldap_base_dn - library_set_name = var.library_set_name - service_account_names = var.service_account_names + library_set_name = var.ldap_library_set_name + service_account_names = var.ldap_service_account_names } } @@ -741,7 +760,7 @@ scenario "openldap_restart" { ldap_host = step.create_ldap_server.ldap_ip_address ldap_port = step.create_ldap_server.ldap_port ldap_base_dn = var.ldap_base_dn - dynamic_role_ldif_templates_path = var.dynamic_role_ldif_templates_path + dynamic_role_ldif_templates_path = var.ldap_dynamic_role_ldif_templates_path ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name } } @@ -766,8 +785,8 @@ scenario "openldap_restart" { ldap_host = step.create_ldap_server.ldap_ip_address ldap_port = step.create_ldap_server.ldap_port ldap_base_dn = var.ldap_base_dn - library_set_name = var.library_set_name - service_account_names = var.service_account_names + library_set_name = var.ldap_library_set_name + service_account_names = var.ldap_service_account_names } } diff --git a/enos/enos-scenario-openldap-smoke.hcl b/enos/enos-scenario-openldap-smoke.hcl index 92e4d48..e0a7708 100644 --- a/enos/enos-scenario-openldap-smoke.hcl +++ b/enos/enos-scenario-openldap-smoke.hcl @@ -21,6 +21,20 @@ scenario "openldap_smoke" { - vault_build_date* - vault_product_version - vault_revision* + - ldap_revision* + - plugin_name + - plugin_dir_vault + - ldap_bind_pass + - ldap_schema + - ldap_tag + - ldap_base_dn + - ldap_user_role_name + - ldap_username + - ldap_user_old_password + - ldap_dynamic_user_role_name + - ldap_dynamic_role_ldif_templates_path + - ldap_library_set_name + - ldap_service_account_names * If you don't already know what build date and revision you should be using, see https://eng-handbook.hashicorp.services/internal-tools/enos/troubleshooting/#execution-error-expected-vs-got-for-vault-versioneditionrevisionbuild-date. @@ -34,6 +48,10 @@ scenario "openldap_smoke" { - vault_artifact_path (the path to where you have a Vault artifact already downloaded, if using `artifact_source:crt` in your filter) - vault_license_path (if using an ENT edition of Vault) + - ldap_plugin_version (if using `ldap_artifact_source:releases` or `ldap_artifact_source:artifactory` in your filter) + - ldap_artifactory_repo (if using `ldap_artifact_source:artifactory` in your filter) + - ldap_rotation_period (if using `ldap_config_root_rotation_method:period` in your filter) + - ldap_rotation_window (if using `ldap_config_root_rotation_method:schedule` in your filter) EOF matrix { @@ -415,11 +433,11 @@ scenario "openldap_smoke" { depends_on = [step.create_vpc] providers = { - enos = local.enos_provider[matrix.distro] + enos = local.enos_provider["ubuntu"] } variables { - ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["24.04"] cluster_tag_key = global.ldap_tag_key common_tags = global.tags vpc_id = step.create_vpc.id @@ -433,13 +451,14 @@ scenario "openldap_smoke" { depends_on = [step.create_ldap_server_target] providers = { - enos = local.enos_provider[matrix.distro] + enos = local.enos_provider["ubuntu"] } variables { - hosts = step.create_ldap_server_target.hosts - ldap_tag = var.ldap_tag - ldap_port = global.ports.ldap.port + hosts = step.create_ldap_server_target.hosts + ldap_tag = var.ldap_tag + packages = concat(global.packages, global.distro_packages["ubuntu"]["24.04"], ["podman", "podman-docker"]) + ports = global.ports } } @@ -560,7 +579,7 @@ scenario "openldap_smoke" { ldap_host = step.create_ldap_server.ldap_ip_address ldap_port = step.create_ldap_server.ldap_port ldap_base_dn = var.ldap_base_dn - dynamic_role_ldif_templates_path = var.dynamic_role_ldif_templates_path + dynamic_role_ldif_templates_path = var.ldap_dynamic_role_ldif_templates_path ldap_dynamic_user_role_name = var.ldap_dynamic_user_role_name } } @@ -585,8 +604,8 @@ scenario "openldap_smoke" { ldap_host = step.create_ldap_server.ldap_ip_address ldap_port = step.create_ldap_server.ldap_port ldap_base_dn = var.ldap_base_dn - library_set_name = var.library_set_name - service_account_names = var.service_account_names + library_set_name = var.ldap_library_set_name + service_account_names = var.ldap_service_account_names } } From 3dbd7cb11ab738fee520b360cdb27d95162b647c Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Thu, 21 Aug 2025 16:53:05 -0500 Subject: [PATCH 19/26] fmt --- .gitignore | 1 - enos/enos.vars.hcl | 160 --------------------------------------------- 2 files changed, 161 deletions(-) delete mode 100644 enos/enos.vars.hcl diff --git a/.gitignore b/.gitignore index 91331c3..5821379 100644 --- a/.gitignore +++ b/.gitignore @@ -84,4 +84,3 @@ scripts/custom.sh # enos /enos/.enos/* /enos/enos.vars.hcl - diff --git a/enos/enos.vars.hcl b/enos/enos.vars.hcl deleted file mode 100644 index 8016ce3..0000000 --- a/enos/enos.vars.hcl +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// artifactory_token is the token to use when authenticating to artifactory. -// artifactory_token = "yourtoken" - -// artifactory_host is the artifactory host to search for vault artifacts. -// artifactory_host = "https://artifactory.hashicorp.engineering/artifactory" - -// artifactory_repo is the artifactory repo to search for vault artifacts. -// artifactory_repo = "hashicorp-crt-stable-local*" - -// aws_region is the AWS region where we'll create infrastructure -// for the smoke scenario -// aws_region = "us-east-1" - -// aws_ssh_keypair_name is the AWS keypair to use for SSH -// aws_ssh_keypair_name = "enos-ci-ssh-key" - -// aws_ssh_private_key_path is the path to the AWS keypair private key -// aws_ssh_private_key_path = "./support/private_key.pem" - -// backend_log_level is the server log level for the backend. Supported values include 'trace', -// 'debug', 'info', 'warn', 'error'" -// backend_log_level = "trace" - -// backend_instance_type is the instance type to use for the Vault backend. Must support arm64 -// backend_instance_type = "t4g.small" - -// project_name is the description of the project. It will often be used to tag infrastructure -// resources. -// project_name = "vault-enos-integration" - -// distro_version_amzn is the version of Amazon Linux 2 to use for "distro:amzn" variants -// distro_version_amzn = "2" - -// distro_version_leap is the version of openSUSE Leap to use for "distro:leap" variants -// distro_version_leap = "15.5" - -// distro_version_rhel is the version of RHEL to use for "distro:rhel" variants. -// distro_version_rhel = "9.3" // or "8.9" - -// distro_version_sles is the version of SUSE SLES to use for "distro:sles" variants. -// distro_version_sles = "v15_sp5_standard" - -// distro_version_ubuntu is the version of ubuntu to use for "distro:ubuntu" variants -// distro_version_ubuntu = "22.04" // or "20.04" - -// ldap_artifact_path is the path to the LDAP plugin artifact (zip file) to be installed. -// ldap_artifact_path = "~/go/vault-plugins/vault-plugin-secrets-openldap.zip" - -// ldap_artifactory_repo is the Artifactory repository where the LDAP plugin artifact is stored. -// ldap_artifactory_repo = "hashicorp-vault-ecosystem-staging-local" - -// ldap_bind_dn is the distinguished name used to bind to the LDAP server. -// ldap_bind_dn = "cn=admin,dc=example,dc=com" - -// ldap_bind_pass is the password for the LDAP bind distinguished name. -// ldap_bind_pass = "adminpassword" - -// ldap_plugin_version is the version of the LDAP plugin being used. -// ldap_plugin_version = "0.15.0" - -// ldap_revision is the git SHA of the LDAP plugin artifact being tested. -// ldap_revision = "2ee1253cb5ff67196d0e4747e8aedd1c4903625f" - -// ldap_schema specifies the LDAP schema to use (e.g., openldap). -// ldap_schema = "openldap" - -// ldap_tag is the tag or version identifier for the LDAP plugin build. -// ldap_tag = "1.5.0" - -// ldap_user_dn is the base distinguished name under which user entries are located in LDAP. -// ldap_user_dn = "ou=users,dc=example,dc=com" - -// makefile_dir is the directory containing the Makefile for building the plugin. -// makefile_dir = "~/hashicorp/plugins/vault-plugin-secrets-openldap/" - -// plugin_dest_dir is the local directory where the plugin artifact will be stored. -// plugin_dest_dir = "~/go/vault-plugins" - -// plugin_dir_vault is the directory on the Vault server where plugins are installed. -// plugin_dir_vault = "/etc/vault/plugins" - -// plugin_mount_path is the mount path in Vault where the plugin will be enabled. -// plugin_mount_path = "local-secrets-ldap" - -// plugin_name is the name of the Vault plugin to be used for LDAP secrets. -// plugin_name = "vault-plugin-secrets-openldap" - -// plugin_source_type specifies the source type for the plugin (e.g., local_build, artifactory). -// plugin_source_type = "local_build" - -// tags are a map of tags that will be applied to infrastructure resources that -// support tagging. -// tags = { "Project Name" : "Vault", "Something Cool" : "Value" } - -// terraform_plugin_cache_dir is the directory to cache Terraform modules and providers. -// It must exist. -// terraform_plugin_cache_dir = "/Users//.terraform/plugin-cache-dir - -// ui_test_filter is the test filter to limit the ui tests to execute for the ui scenario. It will -// be appended to the ember test command as '-f=\"\"'. -// ui_test_filter = "sometest" - -// ui_run_tests sets whether to run the UI tests or not for the ui scenario. If set to false a -// cluster will be created but no tests will be run. -// ui_run_tests = true - -// vault_artifact_path is the path to CRT generated or local vault.zip bundle. When -// using the "builder:local" variant a bundle will be built from the current branch. -// In CI it will use the output of the build workflow. -// vault_artifact_path = "./dist/vault.zip" - -// vault_artifact_type is the type of Vault artifact to use when installing Vault from artifactory. -// It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" -// vault_artifact_type = "bundle" - -// vault_build_date is the build date for Vault artifact. Some validations will require the binary build -// date to match" -// vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example - -// vault_enable_audit_devices sets whether or not to enable every audit device. It true -// a file audit device will be enabled at the path /var/log/vault_audit.log, the syslog -// audit device will be enabled, and a socket audit device connecting to 127.0.0.1:9090 -// will be enabled. The netcat program is run in listening mode to provide an endpoint -// that the socket audit device can connect to. -// vault_enable_audit_devices = true - -// vault_install_dir is the directory where the vault binary will be installed on -// the remote machines. -// vault_install_dir = "/opt/vault/bin" - -// vault_local_binary_path is the path of the local binary that we're upgrading to. -// vault_local_binary_path = "./support/vault" - -// vault_instance_type is the instance type to use for the Vault backend -// vault_instance_type = "t3.small" - -// vault_instance_count is how many instances to create for the Vault cluster. -// vault_instance_count = 3 - -// vault_license_path is the path to a valid Vault enterprise edition license. -// This is only required for non-ce editions" -// vault_license_path = "./support/vault.hclic" - -// vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants. -// vault_local_build_tags = ["ui", "ent"] - -// vault_log_level is the server log level for Vault logs. Supported values (in order of detail) are -// trace, debug, info, warn, and err." -// vault_log_level = "trace" - -// vault_product_version is the version of Vault we are testing. Some validations will expect the vault -// binary and cluster to report this version. -// vault_product_version = "1.15.0" - -// vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault -// binary and cluster to report this revision. -// vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de" From 7b179adca58b71de63b62dfe59ea9a6f7739c4a9 Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Fri, 22 Aug 2025 13:46:25 -0500 Subject: [PATCH 20/26] modify readme for openldap scenarios --- enos/README.md | 241 ++++++++++++------------------------------------- 1 file changed, 58 insertions(+), 183 deletions(-) diff --git a/enos/README.md b/enos/README.md index 06d14c8..83608b5 100644 --- a/enos/README.md +++ b/enos/README.md @@ -1,71 +1,57 @@ # Enos -Enos is an quality testing framework that allows composing and executing quality -requirement scenarios as code. For Vault, it is currently used to perform -infrastructure integration testing using the artifacts that are created as part -of the `build` workflow. While intended to be executed via Github Actions using -the results of the `build` workflow, scenarios are also executable from a developer -machine that has the requisite dependencies and configuration. +Enos is a quality testing framework that allows composing and executing quality +requirement scenarios as code. For the OpenLDAP secrets engine Vault plugin, +scenarios are currently executable from a developer machine that has the requisite dependencies +and configuration. Future plans include executing scenarios via Github Actions. Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs) for further information regarding installation, execution or composing Enos scenarios. -## When to use Enos -Determining whether to use `vault.NewTestCluster()` or Enos for testing a feature -or scenario is ultimately up to the author. Sometimes one, the other, or both -might be appropriate depending on the requirements. Generally, `vault.NewTestCluster()` -is going to give you faster feedback and execution time, whereas Enos is going -to give you a real-world execution and validation of the requirement. Consider -the following cases as examples of when one might opt for an Enos scenario: - -- The feature require third-party integrations. Whether that be networked - dependencies like a real Consul backend, a real KMS key to test awskms - auto-unseal, auto-join discovery using AWS tags, or Cloud hardware KMS's. -- The feature might behave differently under multiple configuration variants - and therefore should be tested with both combinations, e.g. auto-unseal and - manual shamir unseal or replication in HA mode with integrated storage or - Consul storage. -- The scenario requires coordination between multiple targets. For example, - consider the complex lifecycle event of migrating the seal type or storage, - or manually triggering a raft disaster scenario by partitioning the network - between the leader and follower nodes. Or perhaps an auto-pilot upgrade between - a stable version of Vault and our candidate version. -- The scenario has specific deployment strategy requirements. For example, - if we want to add a regression test for an issue that only arises when the - software is deployed in a certain manner. -- The scenario needs to use actual build artifacts that will be promoted - through the pipeline. - ## Requirements - AWS access. HashiCorp Vault developers should use Doormat. - Terraform >= 1.7 -- Enos >= v0.0.28. You can [download a release](https://github.com/hashicorp/enos/releases/) or +- Enos >= v0.4.0. You can [download a release](https://github.com/hashicorp/enos/releases/) or install it with Homebrew: ```shell brew tap hashicorp/tap && brew update && brew install hashicorp/tap/enos ``` - An SSH keypair in the AWS region you wish to run the scenario. You can use Doormat to log in to the AWS console to create or upload an existing keypair. -- A Vault artifact is downloaded from the GHA artifacts when using the `artifact_source:crt` variants, from Artifactory when using `artifact_source:artifactory`, and is built locally from the current branch when using `artifact_source:local` variant. +- A Vault artifact is downloaded from the GHA artifacts when using the `artifact_source:crt` variants or from Artifactory when using `artifact_source:artifactory`. +- An OpenLDAP plugin artifact is downloaded from releases when using the `ldap_artifact_source:releases`, from Artifactory when using `ldap_artifact_source:artifactory`, and is built locally from the current branch when using `ldap_artifact_source:local` variant. ## Scenario Variables -In CI, each scenario is executed via Github Actions and has been configured using -environment variable inputs that follow the `ENOS_VAR_varname` pattern. - For local execution you can specify all the required variables using environment variables, or you can update `enos.vars.hcl` with values and uncomment the lines. -Variables that are required: +Variables that are required (include): * `aws_ssh_keypair_name` * `aws_ssh_private_key_path` * `vault_bundle_path` * `vault_license_path` (only required for non-OSS editions) - -See [enos.vars.hcl](./enos.vars.hcl) or [enos-variables.hcl](./enos-variables.hcl) +* `plugin_name` +* `plugin_dir_vault` +* `ldap_bind_pass` +* `ldap_schema` +* `ldap_tag` +* `ldap_base_dn` +* `ldap_user_role_name` +* `ldap_username` +* `ldap_user_old_password` +* `ldap_dynamic_user_role_name` +* `ldap_dynamic_role_ldif_templates_path` +* `ldap_library_set_name` +* `ldap_service_account_names` + +See [enos.vars.hcl](template_enos.vars.hcl) or [enos-variables.hcl](./enos-variables.hcl) for further descriptions of the variables. Additional variable information can also be found in the [Scenario Outlines](#scenario_outlines) +**[Future Works]** In CI, each scenario should be executed via Github Actions and should be configured using +environment variable inputs that follow the `ENOS_VAR_varname` pattern. + ## Scenario Outlines Enos is capable of producing an outline of each scenario that is defined in a given directory. These scenarios often include a description of what behavior the scenario performs, which variants are @@ -76,7 +62,7 @@ You can generate outlines of all scenarios or specify one via it's name. From the `enos` directory: ```bash -enos scenario outline smoke +enos scenario outline openldap_smoke ``` There are also HTML versions available for an improved reading experience: @@ -91,169 +77,58 @@ From the `enos` directory: ```bash # List all available scenarios enos scenario list -# Run the smoke or upgrade scenario with an artifact that is built locally. Make sure -# the local machine has been configured as detailed in the requirements -# section. This will execute the scenario and clean up any resources if successful. -enos scenario run smoke artifact_source:local -enos scenario run upgrade artifact_source:local -# To run the same scenario variants that are run in CI, refer to the scenarios listed -# in json files under .github/enos-run-matrices directory, -# adding `artifact_source:local` to run locally. -enos scenario run smoke backend:consul consul_version:1.12.3 distro:ubuntu seal:awskms artifact_source:local arch:amd64 edition:oss +# Run the smoke or restart scenario with a Vault artifact from Artifactory and an +# openLDAP secrets engine plugin artifact that is built locally. +# Make sure the local machine has been configured as detailed in the requirements section. +# This will execute the scenario and clean up any resources if successful. +enos scenario run openldap_smoke artifact_source:artifactory ldap_artifact_source:local +enos scenario run openldap_restart artifact_source:artifactory ldap_artifact_source:local +# To run a specific variant of a scenario, you can specify the variant values. +enos scenario run openldap_smoke arch:amd64 artifact_source:artifactory artifact_type:package config_mode:env \ + distro:amzn edition:ent ip_version:4 seal:shamir ldap_artifact_source:local ldap_config_root_rotation_method:manual # Launch an individual scenario but leave infrastructure up after execution -enos scenario launch smoke artifact_source:local +enos scenario launch openldap_smoke artifact_source:artifactory ldap_artifact_source:local # Check an individual scenario for validity. This is useful during scenario # authoring and debugging. -enos scenario validate smoke artifact_source:local +enos scenario validate openldap_smoke artifact_source:artifactory ldap_artifact_source:local # If you've run the tests and desire to see the outputs, such as the URL or # credentials, you can run the output command to see them. Please note that # after "run" or destroy there will be no "outputs" as the infrastructure # will have been destroyed and state cleared. -enos scenario output smoke artifact_source:local +enos scenario output openldap_smoke artifact_source:artifactory ldap_artifact_source:local # Explicitly destroy all existing infrastructure -enos scenario destroy smoke artifact_source:local +enos scenario destroy openldap_smoke artifact_source:artifactory ldap_artifact_source:local ``` Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs) for further information regarding installation, execution or composing scenarios. -## UI Tests -The [`ui` scenario](./enos-scenario-ui.hcl) creates a Vault cluster (deployed to AWS) using a version -built from the current checkout of the project. Once the cluster is available the UI acceptance tests -are run in a headless browser. -### Variables -In addition to the required variables that must be set, as described in the [Scenario Variables](#Scenario Variables), -the `ui` scenario has two optional variables: - -**ui_test_filter** - An optional test filter to limit the tests that are run, i.e. `'!enterprise'`. -To set a filter export the variable as follows: -```shell -> export ENOS_VAR_ui_test_filter="some filter" -``` -**ui_run_tests** - An optional boolean variable to run or not run the tests. The default value is true. -Setting this value to false is useful in the case where you want to create a cluster, but run the tests -manually. The section [Running the Tests](#Running the Tests) describes the different ways to run the -'UI' acceptance tests. - -### Running the Tests -The UI tests can be run fully automated or manually. -#### Fully Automated -The following will deploy the cluster, run the tests, and subsequently tear down the cluster: -```shell -> export ENOS_VAR_ui_test_filter="some filter" # <-- optional -> cd enos -> enos scenario ui run edition:oss -``` -#### Manually -The UI tests can be run manually as follows: -```shell -> export ENOS_VAR_ui_test_filter="some filter" # <-- optional -> export ENOS_VAR_ui_run_tests=false -> cd enos -> enos scenario ui launch edition:oss -# once complete the scenario will output a set of environment variables that must be exported. The -# output will look as follows: -export TEST_FILTER='some filter>' \ -export VAULT_ADDR='http://:8200' \ -export VAULT_TOKEN='' \ -export VAULT_UNSEAL_KEYS='["","",""]' -# copy and paste the above into the terminal to export the values -> cd ../ui -> yarn test:enos # run headless -# or -> yarn test:enos -s # run manually in a web browser -# once testing is complete -> cd ../enos -> enos scenario ui destroy edition:oss -``` - # Variants -Both scenarios support a matrix of variants. In order to achieve broad coverage while -keeping test run time reasonable, the variants executed by the `enos-run` Github -Actions are tailored to maximize variant distribution per scenario. +Both scenarios support a matrix of variants. -## `artifact_source:crt` -This variant is designed for use in Github Actions. The `enos-run.yml` workflow -downloads the artifact built by the `build.yml` workflow, unzips it, and sets the -`vault_bundle_path` to the zip file and the `vault_local_binary_path` to the binary. +## `ldap_artifact_source:local` +This variant is for running the Enos scenario locally. It builds the plugin binary +from the current branch, placing the binary at the `ldap_artifact_path`. -## `artifact_source:local` -This variant is for running the Enos scenario locally. It builds the Vault bundle -from the current branch, placing the bundle at the `vault_bundle_path` and the -unzipped Vault binary at the `vault_local_binary_path`. +## `ldap_artifact_source:releases` +This variant is for running the Enos scenario to test an artifact from HashiCorp releases. It requires following Enos variables to be set: +* `ldap_plugin_version` +* `ldap_revision` -## `artifact_source:artifactory` +## `ldap_artifact_source:artifactory` This variant is for running the Enos scenario to test an artifact from Artifactory. It requires following Enos variables to be set: * `artifactory_username` * `artifactory_token` * `aws_ssh_keypair_name` * `aws_ssh_private_key_path` -* `vault_product_version` -* `vault_revision` - -# CI Bootstrap -In order to execute any of the scenarios in this repository, it is first necessary to bootstrap the -CI AWS account with the required permissions, service quotas and supporting AWS resources. There are -two Terraform modules which are used for this purpose, [service-user-iam](./ci/service-user-iam) for -the account permissions, and service quotas and [bootstrap](./ci/bootstrap) for the supporting resources. - -**Supported Regions** - enos scenarios are supported in the following regions: -`"us-east-1", "us-east-2", "us-west-1", "us-west-2"` - -## Bootstrap Process -These steps should be followed to bootstrap this repo for enos scenario execution: - -### Set up CI service user IAM role and Service Quotas -The service user that is used when executing enos scenarios from any GitHub Action workflow must have -a properly configured IAM role granting the access required to create resources in AWS. Additionally, -service quotas need to be adjusted to ensure that normal use of the ci account does not cause any -service quotas to be exceeded. The [service-user-iam](./ci/service-user-iam) module contains the IAM -Policy and Role for that grants this access as well as the service quota increase requests to adjust -the service quotas. This module should be updated whenever a new AWS resource type is required for a -scenario or a service quota limit needs to be increased. Since this is persistent and cannot be created -and destroyed each time a scenario is run, the Terraform state will be managed by Terraform Cloud. -Here are the steps to configure the GitHub Actions service user: - -#### Pre-requisites -- Full access to the CI AWS account is required. - -**Notes:** -- For help with access to Terraform Cloud and the CI Account, contact the QT team on Slack (#team-quality) - for an invite. After receiving an invite to Terraform Cloud, a personal access token can be created - by clicking `User Settings` --> `Tokens` --> `Create an API token`. -- Access to the AWS account can be done via Doormat, at: https://doormat.hashicorp.services/. - - For the vault repo the account is: `vault_ci` and for the vault-enterprise repo, the account is: - `vault-enterprise_ci`. - - Access can be requested by clicking: `Cloud Access` --> `AWS` --> `Request Account Access`. +* `ldap_plugin_version` +* `ldap_revision` +* `ldap_artifactory_repo` -1. **Create the Terraform Cloud Workspace** - The name of the workspace to be created depends on the - repository for which it is being created, but the pattern is: `-ci-enos-service-user-iam`, - e.g. `vault-ci-enos-service-user-iam`. It is important that the execution mode for the workspace be set - to `local`. For help on setting up the workspace, contact the QT team on Slack (#team-quality) +Refer to the **Variants** section in the [Vault README on GitHub](https://github.com/hashicorp/vault/blob/main/README.md). +for further information regarding Vault's `artifact_source` matrix variants.
+Note: `artifact_source:local` isn't supported in this project since we never build Vault locally. - -2. **Execute the Terraform module** -```shell -> cd ./enos/ci/service-user-iam -> export TF_WORKSPACE=-ci-enos-service-user-iam -> export TF_TOKEN_app_terraform_io= -> export TF_VAR_repository= -> terraform init -> terraform plan -> terraform apply -auto-approve -``` - -### Bootstrap the CI resources -Bootstrapping of the resources in the CI account is accomplished via the GitHub Actions workflow: -[enos-bootstrap-ci](../.github/workflows/enos-bootstrap-ci.yml). Before this workflow can be run a -workspace must be created as follows: - -1. **Create the Terraform Cloud Workspace** - The name workspace to be created depends on the repository - for which it is being created, but the pattern is: `-ci-bootstrap`, e.g. - `vault-ci-bootstrap`. It is important that the execution mode for the workspace be set to - `local`. For help on setting up the workspace, contact the QT team on Slack (#team-quality). - -Once the workspace has been created, changes to the bootstrap module will automatically be applied via -the GitHub PR workflow. Each time a PR is created for changes to files within that module the module -will be planned via the workflow described above. If the plan is ok and the PR is merged, the module -will automatically be applied via the same workflow. +**[Future Work]** In order to achieve broad coverage while +keeping test run time reasonable, the variants executed by the `enos-run` Github +Actions (CI) should use `enos scenario sample` to maximize variant distribution per scenario. \ No newline at end of file From 049e240253c443007eff495d53fa226f4446ee61 Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Fri, 22 Aug 2025 13:47:07 -0500 Subject: [PATCH 21/26] remove value for env var --- .../root_rotation_manual/scripts/test-root-rotation-manual.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/enos/modules/root_rotation_manual/scripts/test-root-rotation-manual.sh b/enos/modules/root_rotation_manual/scripts/test-root-rotation-manual.sh index e1c30e3..5eea376 100755 --- a/enos/modules/root_rotation_manual/scripts/test-root-rotation-manual.sh +++ b/enos/modules/root_rotation_manual/scripts/test-root-rotation-manual.sh @@ -8,8 +8,6 @@ fail() { exit 1 } -PLUGIN_PATH=local-secrets-ldap - # Required env vars: PLUGIN_PATH if [[ -z "${PLUGIN_PATH:-}" ]]; then fail "PLUGIN_PATH env variable has not been set" From c943b95150ba933b6b7b359349e9b59926d57686 Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Fri, 22 Aug 2025 13:47:30 -0500 Subject: [PATCH 22/26] upgrade terraform version to 1.7.0 --- enos/enos-terraform.hcl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enos/enos-terraform.hcl b/enos/enos-terraform.hcl index b1f1fc0..085c1de 100644 --- a/enos/enos-terraform.hcl +++ b/enos/enos-terraform.hcl @@ -17,7 +17,7 @@ terraform_cli "dev" { } terraform "default" { - required_version = ">= 1.2.0" + required_version = ">= 1.7.0" required_providers { aws = { From 6eb6f26963ce7899ca1b6f935ba85f4f0fbc93cb Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Fri, 22 Aug 2025 13:48:33 -0500 Subject: [PATCH 23/26] remove unsued distros --- enos/enos-scenario-openldap-leader-change.hcl | 8 -------- enos/enos-scenario-openldap-restart.hcl | 8 -------- enos/enos-scenario-openldap-smoke.hcl | 10 +--------- 3 files changed, 1 insertion(+), 25 deletions(-) diff --git a/enos/enos-scenario-openldap-leader-change.hcl b/enos/enos-scenario-openldap-leader-change.hcl index c9f85e5..320f7bd 100644 --- a/enos/enos-scenario-openldap-leader-change.hcl +++ b/enos/enos-scenario-openldap-leader-change.hcl @@ -78,11 +78,6 @@ scenario "openldap_leader_change" { edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] } - // softhsm packages not available for leap/sles. - exclude { - seal = ["pkcs11"] - distro = ["leap", "sles"] - } } terraform_cli = terraform_cli.default @@ -98,9 +93,6 @@ scenario "openldap_leader_change" { ldap_artifact_path = matrix.ldap_artifact_source != "artifactory" ? abspath(var.ldap_artifact_path) : null enos_provider = { amzn = provider.enos.ec2_user - leap = provider.enos.ec2_user - rhel = provider.enos.ec2_user - sles = provider.enos.ec2_user ubuntu = provider.enos.ubuntu } manage_service = matrix.artifact_type == "bundle" diff --git a/enos/enos-scenario-openldap-restart.hcl b/enos/enos-scenario-openldap-restart.hcl index d6ae191..ddc8733 100644 --- a/enos/enos-scenario-openldap-restart.hcl +++ b/enos/enos-scenario-openldap-restart.hcl @@ -78,11 +78,6 @@ scenario "openldap_restart" { edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] } - // softhsm packages not available for leap/sles. - exclude { - seal = ["pkcs11"] - distro = ["leap", "sles"] - } } terraform_cli = terraform_cli.default @@ -98,9 +93,6 @@ scenario "openldap_restart" { ldap_artifact_path = matrix.ldap_artifact_source != "artifactory" ? abspath(var.ldap_artifact_path) : null enos_provider = { amzn = provider.enos.ec2_user - leap = provider.enos.ec2_user - rhel = provider.enos.ec2_user - sles = provider.enos.ec2_user ubuntu = provider.enos.ubuntu } manage_service = matrix.artifact_type == "bundle" diff --git a/enos/enos-scenario-openldap-smoke.hcl b/enos/enos-scenario-openldap-smoke.hcl index e0a7708..c630249 100644 --- a/enos/enos-scenario-openldap-smoke.hcl +++ b/enos/enos-scenario-openldap-smoke.hcl @@ -80,17 +80,12 @@ scenario "openldap_smoke" { edition = [for e in matrix.edition : e if !strcontains(e, "hsm")] } - // softhsm packages not available for leap/sles. - exclude { - seal = ["pkcs11"] - distro = ["leap", "sles"] - } - // rotation manager capabilities not supported in Vault community edition exclude { edition = ["ce"] ldap_config_root_rotation_method = ["period", "schedule"] } + } terraform_cli = terraform_cli.default @@ -106,9 +101,6 @@ scenario "openldap_smoke" { ldap_artifact_path = matrix.ldap_artifact_source != "artifactory" ? abspath(var.ldap_artifact_path) : null enos_provider = { amzn = provider.enos.ec2_user - leap = provider.enos.ec2_user - rhel = provider.enos.ec2_user - sles = provider.enos.ec2_user ubuntu = provider.enos.ubuntu } manage_service = matrix.artifact_type == "bundle" From 30fa2aed6b92db087edf85f0a171d0d44ecac589 Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Fri, 22 Aug 2025 13:48:52 -0500 Subject: [PATCH 24/26] add necessary env variables --- Makefile | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index b480a49..c9d51d0 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,7 @@ LDAP_DOMAIN ?= example.com LDAP_ORG ?= example LDAP_ADMIN_PW ?= adminpassword IMAGE_TAG ?= 1.3.0 +LDAP_HOST ?= 127.0.0.1 LDAP_PORT ?= 389 LDIF_PATH ?= $(PWD)/bootstrap/ldif/seed.ldif @@ -30,6 +31,17 @@ LDAP_BIND_PASS ?= adminpassword LDAP_USER_DN ?= ou=users,dc=example,dc=com LDAP_SCHEMA ?= openldap +#plugin endpoints tests +ROTATION_PERIOD ?= 10 +ROTATION_WINDOW ?= 3600 +LDAP_DN ?= uid=mary.smith,ou=users,dc=example,dc=com +LDAP_USERNAME ?= mary.smith +LDAP_OLD_PASSWORD ?= defaultpassword +LDIF_PATH ?= $(PWD)/enos/modules/dynamic_role_crud_api/ldif +LDAP_BASE_DN ?= dc=example,dc=com +LIBRARY_SET_NAME ?= staticuser bob.johnson mary.smith +SERVICE_ACCOUNT_NAMES ?= dev-team + export LDAP_DOMAIN export LDAP_ORG export LDAP_ADMIN_PW @@ -47,6 +59,16 @@ export LDAP_BIND_PASS export LDAP_USER_DN export LDAP_SCHEMA export LDIF_PATH +export LDAP_HOST +export ROTATION_PERIOD +export ROTATION_WINDOW +export LDAP_DN +export LDAP_USERNAME +export LDAP_OLD_PASSWORD +export LDIF_PATH +export LDAP_BASE_DN +export LIBRARY_SET_NAME +export SERVICE_ACCOUNT_NAMES .PHONY: default default: dev @@ -88,7 +110,6 @@ fmt: .PHONY: setup-env setup-env: - cd bootstrap && ./setup-docker.sh cd bootstrap && ./setup-openldap.sh .PHONY: plugin-build @@ -115,13 +136,29 @@ configure: plugin-build plugin-register plugin-enable plugin-configure teardown-env: cd bootstrap && ./teardown-env.sh +.PHONY: manual-root-rotation-test +manual-root-rotation-test: + cd enos/modules/root_rotation_manual && ./scripts/test-root-rotation-manual.sh + +.PHONY: periodic-root-rotation-test +periodic-root-rotation-test: + cd enos/modules/root_rotation_period && ./scripts/test-root-rotation-period.sh + +.PHONY: scheduled-root-rotation-test +scheduled-root-rotation-test: + cd enos/modules/root_rotation_schedule && ./scripts/test-root-rotation-schedule.sh + .PHONY: static-role-test static-role-test: - cd enos/modules/static_role_crud_api && ./scripts/static-role.sh + ROLE_NAME=mary cd enos/modules/static_role_crud_api && ./scripts/static-role.sh .PHONY: dynamic-role-test dynamic-role-test: - cd enos/modules/dynamic_role_crud_api && ./scripts/dynamic-role.sh + ROLE_NAME=adam cd enos/modules/dynamic_role_crud_api && ./scripts/dynamic-role.sh + +.PHONY: library-test +library-test: + cd enos/modules/library_crud_api && ./scripts/library.sh .PHONY: teardown-env teardown-env: From 204c23ec4f590e981415ea3d4ad3ab08c2bb26aa Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Fri, 22 Aug 2025 13:51:34 -0500 Subject: [PATCH 25/26] add template file for enos vars --- enos/template_enos.vars.hcl | 176 ++++++++++++++++++++++++++++++++++++ 1 file changed, 176 insertions(+) create mode 100644 enos/template_enos.vars.hcl diff --git a/enos/template_enos.vars.hcl b/enos/template_enos.vars.hcl new file mode 100644 index 0000000..8d639bd --- /dev/null +++ b/enos/template_enos.vars.hcl @@ -0,0 +1,176 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ========================= IMPORTANT ================================= +// COPY this file to an `enos*.vars.hcl` and fill in the required values. +// ===================================================================== + +// artifactory_token is the token to use when authenticating to artifactory. +// artifactory_token = "yourtoken" + +// artifactory_host is the artifactory host to search for vault artifacts. +// artifactory_host = "https://artifactory.hashicorp.engineering/artifactory" + +// artifactory_repo is the artifactory repo to search for vault artifacts. +// artifactory_repo = "hashicorp-crt-stable-local*" + +// aws_region is the AWS region where we'll create infrastructure +// for the smoke scenario +// aws_region = "us-east-1" + +// aws_ssh_keypair_name is the AWS keypair to use for SSH +// aws_ssh_keypair_name = "enos-ci-ssh-key" + +// aws_ssh_private_key_path is the path to the AWS keypair private key +// aws_ssh_private_key_path = "./support/private_key.pem" + +// backend_log_level is the server log level for the backend. Supported values include 'trace', +// 'debug', 'info', 'warn', 'error'" +// backend_log_level = "trace" + +// backend_instance_type is the instance type to use for the Vault backend. Must support arm64 +// backend_instance_type = "t4g.small" + +// project_name is the description of the project. It will often be used to tag infrastructure +// resources. +// project_name = "vault-openldap-se-enos-integration" + +// distro_version_amzn is the version of Amazon Linux 2 to use for "distro:amzn" variants +// distro_version_amzn = "2" + +// distro_version_ubuntu is the version of ubuntu to use for "distro:ubuntu" variants +// distro_version_ubuntu = "22.04" // or "24.04" + +// ldap_artifact_path is the path to the LDAP plugin artifact (zip file) to be installed. +// ldap_artifact_path = "~/go/vault-plugins/vault-plugin-secrets-openldap.zip" + +// ldap_artifactory_repo is the Artifactory repository where the LDAP plugin artifact is stored. +// ldap_artifactory_repo = "hashicorp-vault-ecosystem-staging-local" + +// ldap_base_dn is the base distinguished name for the LDAP directory. +// ldap_base_dn = "dc=example,dc=com" + +// ldap_bind_pass is the password for the LDAP bind distinguished name. +// ldap_bind_pass = "adminpassword" + +// ldap_dynamic_role_ldif_templates_path is the path to the LDIF templates for dynamic roles. +// ldap_dynamic_role_ldif_templates_path = "/tmp" + +// ldap_dynamic_user_role_name is the name of the dynamic role for LDAP users. +// ldap_dynamic_user_role_name = "adam" + +// ldap_library_set_name is the name of the library set to use for the LDAP plugin. +// ldap_library_set_name = "dev-team" + +// ldap_plugin_version is the version of the LDAP plugin being used. +// ldap_plugin_version = "0.15.0" + +// ldap_revision is the git SHA of the LDAP plugin artifact being tested. +// ldap_revision = "2ee1253cb5ff67196d0e4747e8aedd1c4903625f" + +// ldap_rotation_period is the period after which the LDAP root creds will be rotated. +// ldap_rotation_period = "10" // (in seconds) + +// ldap_rotation_window is the time window during which the LDAP root creds can be rotated. +// ldap_rotation_window = "3600" // (in seconds) + +// ldap_schema specifies the LDAP schema to use (e.g., openldap). +// ldap_schema = "openldap" + +// ldap_service_account_names is a list of service account names to be used with the LDAP plugin. +// ldap_service_account_names = ["staticuser", "bob.johnson", "mary.smith"] + +// ldap_tag is the tag or version identifier for the LDAP plugin build. +// ldap_tag = "1.3.0" + +// ldap_username is the username for the LDAP user to authenticate. +// ldap_username = "mary" + +// ldap_user_old_password is the old password for the LDAP user. +// ldap_user_old_password = "defaultpassword" + +// ldap_user_role_name is the name of the role on the Vault side. +// ldap_user_role_name = "mary" + +// makefile_dir is the directory containing the Makefile for building the plugin. +// makefile_dir = "/Users//hashicorp/plugins/vault-plugin-secrets-openldap/" + +// plugin_dest_dir is the local directory where the plugin artifact will be stored. +// plugin_dest_dir = "/Users//go/vault-plugins" + +// plugin_dir_vault is the directory on the Vault server where plugins are installed. +// plugin_dir_vault = "/etc/vault/plugins" + +// plugin_mount_path is the mount path in Vault where the plugin will be enabled. +// plugin_mount_path = "local-secrets-ldap" + +// plugin_name is the name of the Vault plugin to be used for LDAP secrets. +// plugin_name = "vault-plugin-secrets-openldap" + +// tags are a map of tags that will be applied to infrastructure resources that +// support tagging. +// tags = { "Project Name" : "Vault", "Something Cool" : "Value" } + +// terraform_plugin_cache_dir is the directory to cache Terraform modules and providers. +// It must exist. +// terraform_plugin_cache_dir = "/Users//.terraform/plugin-cache-dir + +// ui_test_filter is the test filter to limit the ui tests to execute for the ui scenario. It will +// be appended to the ember test command as '-f=\"\"'. +// ui_test_filter = "sometest" + +// ui_run_tests sets whether to run the UI tests or not for the ui scenario. If set to false a +// cluster will be created but no tests will be run. +// ui_run_tests = true + +// vault_artifact_path is the path to CRT generated or local vault.zip bundle. When +// using the "builder:local" variant a bundle will be built from the current branch. +// In CI it will use the output of the build workflow. +// vault_artifact_path = "./dist/vault.zip" + +// vault_artifact_type is the type of Vault artifact to use when installing Vault from artifactory. +// It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" +// vault_artifact_type = "bundle" + +// vault_build_date is the build date for Vault artifact. Some validations will require the binary build +// date to match" +// vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example + +// vault_enable_audit_devices sets whether or not to enable every audit device. It true +// a file audit device will be enabled at the path /var/log/vault_audit.log, the syslog +// audit device will be enabled, and a socket audit device connecting to 127.0.0.1:9090 +// will be enabled. The netcat program is run in listening mode to provide an endpoint +// that the socket audit device can connect to. +// vault_enable_audit_devices = true + +// vault_install_dir is the directory where the vault binary will be installed on +// the remote machines. +// vault_install_dir = "/opt/vault/bin" + +// vault_local_binary_path is the path of the local binary that we're upgrading to. +// vault_local_binary_path = "./support/vault" + +// vault_instance_type is the instance type to use for the Vault backend +// vault_instance_type = "t3.small" + +// vault_instance_count is how many instances to create for the Vault cluster. +// vault_instance_count = 3 + +// vault_license_path is the path to a valid Vault enterprise edition license. +// This is only required for non-ce editions" +// vault_license_path = "./support/vault.hclic" + +// vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants. +// vault_local_build_tags = ["ui", "ent"] + +// vault_log_level is the server log level for Vault logs. Supported values (in order of detail) are +// trace, debug, info, warn, and err." +// vault_log_level = "trace" + +// vault_product_version is the version of Vault we are testing. Some validations will expect the vault +// binary and cluster to report this version. +// vault_product_version = "1.15.0" + +// vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault +// binary and cluster to report this revision. +// vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de" \ No newline at end of file From d50818aaf21ce03fd0d29ddbf554f7afcfaabe2e Mon Sep 17 00:00:00 2001 From: HamzaShili65 Date: Fri, 22 Aug 2025 14:08:17 -0500 Subject: [PATCH 26/26] change vars names --- .gitignore | 5 ++++- enos/enos-variables.hcl | 6 +++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index 15ebab5..6f26910 100644 --- a/.gitignore +++ b/.gitignore @@ -41,6 +41,9 @@ website/build .vagrant/ Vagrantfile +# Configs +*.hcl + .DS_Store .idea .vscode @@ -80,7 +83,7 @@ scripts/custom.sh # enos /enos/.enos/* -/enos/enos.vars.hcl +/enos/enos*.vars.hcl **/.terraform/* .terraform.lock.hcl diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl index ba54bfa..3b0e2b7 100644 --- a/enos/enos-variables.hcl +++ b/enos/enos-variables.hcl @@ -68,7 +68,7 @@ variable "distro_version_ubuntu" { default = "24.04" // or "20.04", "22.04" } -variable "dynamic_role_ldif_templates_path" { +variable "ldap_dynamic_role_ldif_templates_path" { description = "LDIF templates path for dynamic role CRUD API tests" default = "/tmp" } @@ -157,7 +157,7 @@ variable "ldap_user_role_name" { default = "mary" } -variable "library_set_name" { +variable "ldap_library_set_name" { description = "The name of the library set to use for library CRUD API tests" type = string default = "dev-team" @@ -205,7 +205,7 @@ variable "project_name" { default = "vault-plugin-secrets-openldap-enos-integration" } -variable "service_account_names" { +variable "ldap_service_account_names" { description = "List of service account names to create for library CRUD API tests" type = list(string) default = ["staticuser", "bob.johnson", "mary.smith"]