mirror of
https://github.com/hashicorp/vault.git
synced 2025-08-20 22:21:09 +02:00
Add support for testing `+ent.hsm` and `+ent.hsm.fips1402` Vault editions with `pkcs11` seal types utilizing a shared `softhsm` token. Softhsm2 is a software HSM that will load seal keys from a local disk via pkcs11. The pkcs11 seal implementation is fairly complex as we have to create a one or more shared tokens with various keys and distribute them to all nodes in the cluster before starting Vault. We also have to ensure that each sets labels are unique. We also make a few quality of life updates by utilizing globals for variants that don't often change and update base versions for various scenarios. * Add `seal_pkcs11` module for creating a `pkcs11` seal key using `softhsm2` as our backing implementation. * Require the latest enos provider to gain access to the `enos_user` resource to ensure correct ownership and permissions of the `softhsm2` data directory and files. * Add `pkcs11` seal to all scenarios that support configuring a seal type. * Extract system package installation out of the `vault_cluster` module and into its own `install_package` module that we can reuse. * Fix a bug when using the local builder variant that mangled the path. This likely slipped in during the migration to auto-version bumping. * Fix an issue where restarting Vault nodes with a socket seal would fail because a seal socket sync wasn't available on all nodes. Now we start the socket listener on all nodes to ensure any node can become primary and "audit" to the socket listner. * Remove unused attributes from some verify modules. * Go back to using cheaper AWS regions. * Use globals for variants. * Update initial vault version for `upgrade` and `autopilot` scenarios. * Update the consul versions for all scenarios that support a consul storage backend. Signed-off-by: Ryan Cragun <me@ryan.ec>
50 lines
1.2 KiB
Bash
50 lines
1.2 KiB
Bash
#!/bin/bash
|
|
# Copyright (c) HashiCorp, Inc.
|
|
# SPDX-License-Identifier: BUSL-1.1
|
|
|
|
set -e
|
|
|
|
fail() {
|
|
echo "$1" 1>&2
|
|
exit 1
|
|
}
|
|
|
|
[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set"
|
|
[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set"
|
|
[[ -z "$PACKAGES" ]] && fail "PACKAGES env variable has not been set"
|
|
|
|
install_packages() {
|
|
if [ "$PACKAGES" = "__skip" ]; then
|
|
return 0
|
|
fi
|
|
|
|
echo "Installing Dependencies: $PACKAGES"
|
|
if [ -f /etc/debian_version ]; then
|
|
# Do our best to make sure that we don't race with cloud-init. Wait a reasonable time until we
|
|
# see ec2 in the sources list. Very rarely cloud-init will take longer than we wait. In that case
|
|
# we'll just install our packages.
|
|
grep ec2 /etc/apt/sources.list || true
|
|
|
|
cd /tmp
|
|
sudo apt update
|
|
# shellcheck disable=2068
|
|
sudo apt install -y ${PACKAGES[@]}
|
|
else
|
|
cd /tmp
|
|
# shellcheck disable=2068
|
|
sudo yum -y install ${PACKAGES[@]}
|
|
fi
|
|
}
|
|
|
|
begin_time=$(date +%s)
|
|
end_time=$((begin_time + TIMEOUT_SECONDS))
|
|
while [ "$(date +%s)" -lt "$end_time" ]; do
|
|
if install_packages; then
|
|
exit 0
|
|
fi
|
|
|
|
sleep "$RETRY_INTERVAL"
|
|
done
|
|
|
|
fail "Timed out waiting for packages to install"
|