Backport Add transit test using managed keys into ce/main (#14534)

Also includes https://github.com/hashicorp/vault-enterprise/pull/14540
This commit is contained in:
Vault Automation 2026-05-06 10:05:40 -06:00 committed by GitHub
parent 01c243241c
commit daeade6ba1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 666 additions and 531 deletions

View File

@ -338,7 +338,33 @@ jobs:
# The dev mode binary has to exist for binary tests that are dispatched on the last runner.
env:
GOPRIVATE: github.com/hashicorp/*
run: time make prep dev
run: |
set -exo pipefail
time make prep dev
mv bin/vault vault-binary
- if: inputs.binary-tests && matrix.id == inputs.total-runners
id: build-docker-image
name: Build Docker image with custom vault binary
run: |
set -exo pipefail
if [ "${{ needs.test-matrix.outputs.is-ent-branch }}" == "true" ]; then
go run ./tools/testimagemaker/ -source=docker.io/hashicorp/vault-enterprise:latest -target=hashicorp/vault-enterprise-ci:latest -binary=./vault-binary
go run ./tools/testimagemaker/ -source=docker.io/hashicorp/vault-enterprise:2.0.0-ent.hsm -target=hashicorp/vault-enterprise-ci:latest-hsm -binary=./vault-hsm-binary -hsm
# Verify the images were built successfully
docker images hashicorp/vault-enterprise-ci:latest
echo "image=hashicorp/vault-enterprise-ci:latest" >> "$GITHUB_OUTPUT"
docker images hashicorp/vault-enterprise-ci:latest-hsm
echo "hsmimage=hashicorp/vault-enterprise-ci:latest-hsm" >> "$GITHUB_OUTPUT"
else
go run ./tools/testimagemaker/ -source=docker.io/hashicorp/vault:latest -target=hashicorp/vault-ci:latest -binary=./vault-binary
# Verify the images was built successfully
docker images hashicorp/vault-ci:latest
echo "image=hashicorp/vault-ci:latest" >> "$GITHUB_OUTPUT"
fi
- if: needs.test-matrix.outputs.is-ent-repo != 'true'
# Enterprise repo runners do not allow sudo, so can't install gVisor there yet.
name: Install gVisor
@ -402,17 +428,26 @@ jobs:
# The docker/binary tests are more expensive, and we've had problems with timeouts when running at full
# parallelism. The default if -p isn't specified is to use NumCPUs, which seems fine for regular tests.
package_parallelism=""
test_parallelism="${{ inputs.go-test-parallelism }}"
if [ -f vault-hsm-binary ]; then
VAULT_HSM_BINARY="$(pwd)/vault-hsm-binary"
export VAULT_HSM_BINARY
# The image will be preferred by most docker tests, since it doesn't require the extra
# overhead of mutating an image to add the current binary. We still populate $VAULT_BINARY
# for the sake of exec tests like TestSysPprof_Exec.
if [ -f vault-binary ]; then
VAULT_BINARY=$(pwd)/vault-binary
export VAULT_BINARY
export VAULT_IMAGE=${{ steps.build-docker-image.outputs.image }}
fi
if [ -f bin/vault ]; then
VAULT_BINARY="$(pwd)/bin/vault"
export VAULT_BINARY
if [ -f vault-hsm-binary ]; then
VAULT_HSM_BINARY=$(pwd)/vault-hsm-binary
export VAULT_HSM_BINARY
export VAULT_HSM_IMAGE=${{ steps.build-docker-image.outputs.hsmimage }}
fi
if [ -f vault-binary ] || [ -f vault-hsm-binary ]; then
package_parallelism="-p 2"
test_parallelism=4
fi
# If running Go tests on the enterprise repo, add a flag to rerun failed tests.
@ -436,7 +471,7 @@ jobs:
$package_parallelism \
-tags "${{ inputs.go-tags }}" \
-timeout=${{ inputs.go-test-timeout }} \
-parallel=${{ inputs.go-test-parallelism }} \
-parallel="$test_parallelism" \
${{ inputs.extra-flags }} \
- if: (github.repository == 'hashicorp/vault' || github.repository == 'hashicorp/vault-enterprise') && (success() || failure())
name: Prepare datadog-ci

View File

@ -6,11 +6,11 @@ package pkiext_binary
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers/testimages"
dockhelper "github.com/hashicorp/vault/sdk/helper/docker"
"github.com/hashicorp/vault/sdk/helper/testcluster"
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
@ -23,17 +23,11 @@ type VaultPkiCluster struct {
}
func NewVaultPkiCluster(t *testing.T) *VaultPkiCluster {
binary := os.Getenv("VAULT_BINARY")
if binary == "" {
t.Skip("only running docker test when $VAULT_BINARY present")
}
repo, tag := testimages.GetImageRepoAndTag(t, false)
opts := &docker.DockerClusterOptions{
ImageRepo: "docker.mirror.hashicorp.services/hashicorp/vault",
// We're replacing the binary anyway, so we're not too particular about
// the docker image version tag.
ImageTag: "latest",
VaultBinary: binary,
ImageRepo: repo,
ImageTag: tag,
ClusterOptions: testcluster.ClusterOptions{
VaultNodeConfig: &testcluster.VaultNodeConfig{
LogLevel: "TRACE",

View File

@ -0,0 +1,252 @@
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: MPL-2.0
package testimages
import (
"context"
"fmt"
"io"
"os"
"strings"
"testing"
"github.com/hashicorp/vault/helper/constants"
dockhelper "github.com/hashicorp/vault/sdk/helper/docker"
"github.com/stretchr/testify/require"
)
// GetImageRepoAndTag returns an image repo and tag that can be used to start a vault
// node via docker. Env vars are used as inputs: either VAULT_BINARY and VAULT_IMAGE
// if hsm is false, or VAULT_HSM_BINARY and VAULT_HSM_IMAGE if hsm is true.
//
// If a matching image var is set, we split that on ":" and return the two pieces
// as the repo and tag. If instead a matching binary var is set, we create an image
// using a vault-enterprise docker image as a starting point, then add softhsm
// (when hsm is true) and the specified binary to it. If neither the image or binary var
// are set, we fail the test.
//
// For devs on their workstations, they can either create an image or a binary and
// set the env vars appropriately. Creating an hsm linux binary is more challenging and
// time-consuming than creating a regular binary, so we don't want to impose that
// on people running tests that don't require one.
//
// See also tools/testimagemaker for a way to build an image for this purpose
// from the CLI.
func GetImageRepoAndTag(t *testing.T, hsm bool) (string, string) {
t.Helper()
repo, tag, output, err := CreateOrReturnDockerImage(hsm)
if err != nil && output != nil {
t.Logf("docker image create output: %s", output)
}
require.NoError(t, err)
// t.Logf("used bin=%s (%q) and img=%s (%q) to create %s:%s", bin, os.Getenv(bin), img, os.Getenv(img), repo, tag)
t.Cleanup(func() {
// When image build fails, it doesn't always return an error, but typically the error
// is visible in the output
if t.Failed() && output != nil {
t.Logf("docker image create output: %s", output)
}
})
return repo, tag
}
// CreateOrReturnDockerImage looks at the vaultBinary and vaultImage params.
// If vaultImage is populated, it is split by ":" and the two pieces are returned
// as the repo and tag. If vault_binary is populated, an image is created based on
// the latest hsm image.
// (TODO: currently hardcoded as "docker.io/hashicorp/vault-enterprise:2.0.0-ent.hsm")
// This is done by installing SoftHSM and the vaultBinary on top of that image.
// If neither is populated an error is returned.
func CreateOrReturnDockerImage(hsm bool) (repo string, tag string, output []byte, err error) {
binVar, imgVar := "VAULT_BINARY", "VAULT_IMAGE"
if hsm {
binVar, imgVar = "VAULT_HSM_BINARY", "VAULT_HSM_IMAGE"
}
bin, img := os.Getenv(binVar), os.Getenv(imgVar)
switch {
case bin == "" && img == "":
return "", "", nil, fmt.Errorf("no docker image or binary provided")
case img != "":
// Ignore the binary if an image is specified
pieces := strings.Split(img, ":")
if len(pieces) != 2 {
return "", "", nil, fmt.Errorf("bad input image format %q", img)
}
return pieces[0], pieces[1], nil, nil
default:
base := "hashicorp/vault"
if constants.IsEnterprise {
base += "-enterprise"
}
repo := base + "-ci"
tag := "latest"
source := "docker.io/" + base + ":latest"
if hsm {
source = "docker.io/hashicorp/vault-enterprise:2.0.0-ent.hsm"
tag = "latest-hsm"
}
target := fmt.Sprintf("%s:%s", repo, tag)
var output []byte
var err error
if hsm {
output, err = CreateHSMDockerImage(source, target, bin)
} else {
output, err = CreateNonHSMDockerImage(source, target, bin)
}
return repo, tag, output, err
}
}
func createBuildContextWithBinary(vaultBinary string) (dockhelper.BuildContext, error) {
f, err := os.Open(vaultBinary)
if err != nil {
return nil, fmt.Errorf("error opening vault binary file: %w", err)
}
data, err := io.ReadAll(f)
if err != nil {
return nil, fmt.Errorf("error reading vault binary file: %w", err)
}
bCtx := dockhelper.NewBuildContext()
bCtx["vault"] = &dockhelper.FileContents{
Data: data,
Mode: 0o755,
}
return bCtx, nil
}
// createDockerImage creates an image named toImage from the given context and Dockerfile.
func createDockerImage(toImage, containerFile string, bCtx dockhelper.BuildContext) ([]byte, error) {
client, err := dockhelper.NewDockerAPI()
if err != nil {
return nil, err
}
output, err := dockhelper.BuildImage(context.Background(), client, containerFile, bCtx,
dockhelper.BuildRemove(true),
dockhelper.BuildForceRemove(true),
dockhelper.BuildPullParent(true),
dockhelper.BuildTags([]string{toImage}))
if err != nil {
return nil, fmt.Errorf("error building docker image: %w (output: %s)", err, output)
}
return output, nil
}
func CreateNonHSMDockerImage(fromImage, toImage, vaultBinary string) ([]byte, error) {
bCtx := dockhelper.NewBuildContext()
var err error
bCtx, err = createBuildContextWithBinary(vaultBinary)
if err != nil {
return nil, err
}
containerFile := fmt.Sprintf(`
FROM %s
USER root
COPY vault /bin/vault
USER vault
CMD ["server", "-dev"]
`, fromImage)
return createDockerImage(toImage, containerFile, bCtx)
}
// CreateHSMDockerImage creates a new vault-enterprise hsm docker image from an existing
// hsm image. The new image includes softhsm, and optionally a new vault binary.
func CreateHSMDockerImage(fromImage, toImage, vaultBinary string) ([]byte, error) {
bCtx := dockhelper.NewBuildContext()
if vaultBinary != "" {
var err error
bCtx, err = createBuildContextWithBinary(vaultBinary)
if err != nil {
return nil, err
}
}
bCtx["setup-softhsm.sh"] = &dockhelper.FileContents{
Data: []byte(`#!/bin/bash
mkdir -p /vault/file/softhsm/tokens
# only create a new slot if there isn't an existing one
if [ ! -e /vault/file/hsm-slot ]; then
softhsm2-util --init-token --slot 0 --so-pin=12345 --pin=12345 --label "vault" | grep -oE '[0-9]+$' > /vault/file/hsm-slot
fi
exec docker-entrypoint.sh "$@"
`),
Mode: 0o755,
}
bCtx["centos-stream.repo"] = &dockhelper.FileContents{
Data: []byte(`
[centos-10-baseos]
name=CentOS Stream 10 - BaseOS
baseurl=https://mirror.stream.centos.org/10-stream/BaseOS/$basearch/os/
gpgcheck=0
enabled=1
[centos-10-appstream]
name=CentOS Stream 10 - AppStream
baseurl=https://mirror.stream.centos.org/10-stream/AppStream/$basearch/os/
gpgcheck=0
enabled=1
`),
Mode: 0o644,
}
containerFile := fmt.Sprintf(`FROM %s AS builder
USER root
COPY centos-stream.repo /etc/yum.repos.d
RUN microdnf install -y tar gzip wget make gcc gcc-c++ openssl-devel sudo microdnf automake autoconf libtool pkg-config
RUN pwd
RUN wget https://github.com/softhsm/SoftHSMv2/archive/refs/tags/2.7.0.tar.gz
RUN echo "be14a5820ec457eac5154462ffae51ba5d8a643f6760514d4b4b83a77be91573 2.7.0.tar.gz" | sha256sum -c
RUN tar -xzf 2.7.0.tar.gz
# disable GOST cryptography as it requires extra plugins
RUN cd SoftHSMv2-2.7.0 && sh autogen.sh && ./configure --disable-gost && make
FROM %s
USER root
COPY --from=builder /SoftHSMv2-2.7.0/src/lib/.libs/libsofthsm2.so /usr/lib64/libsofthsm2.so
COPY --from=builder /SoftHSMv2-2.7.0/src/bin/util/softhsm2-util /usr/bin/softhsm2-util
COPY --from=builder /SoftHSMv2-2.7.0/src/lib/common/softhsm2.conf /etc/softhsm2.conf
RUN mkdir /usr/local/lib/softhsm && ln /usr/lib64/libsofthsm2.so /usr/local/lib/softhsm/libsofthsm2.so
# Put the tokens under /vault/file since that's the data volume, and if we want
# to start a cluster using a pre-existing volume (i.e. resuming from a previous
# cluster) we need the tokens in order to unseal/unsealwrap.
RUN sed -i 's|directories.tokendir = .*|directories.tokendir = /vault/file/softhsm/tokens|g' /etc/softhsm2.conf
RUN sed -i 's/log.level = ERROR/log.level = DEBUG/' /etc/softhsm2.conf
COPY setup-softhsm.sh /usr/local/bin/setup-softhsm.sh
COPY vault /bin/vault
USER vault
CMD ["server", "-dev"]
ENTRYPOINT ["setup-softhsm.sh"]
`, fromImage, fromImage)
return createDockerImage(toImage, containerFile, bCtx)
}
const (
PKCS11Library = "/usr/lib64/libsofthsm2.so"
PKCS11Pin = "12345"
PKCS11TokenLabel = "vault"
)

View File

@ -20,7 +20,6 @@ func DefaultOptions(t *testing.T) *DockerClusterOptions {
ImageRepo: "hashicorp/vault",
ImageTag: "latest",
VaultBinary: os.Getenv("VAULT_BINARY"),
Envs: []string{"SKIP_SETCAP=true"},
ClusterOptions: testcluster.ClusterOptions{
NumCores: 3,
ClusterName: strings.ReplaceAll(t.Name(), "/", "-"),

View File

@ -89,6 +89,27 @@ func UnsealNode(ctx context.Context, cluster VaultCluster, nodeIdx int) error {
return NodeHealthy(ctx, cluster, nodeIdx)
}
func UnsealNodeWithOptions(ctx context.Context, cluster VaultCluster, nodeIdx int, reset, migrate bool) error {
if nodeIdx >= len(cluster.Nodes()) {
return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx)
}
node := cluster.Nodes()[nodeIdx]
client := node.APIClient()
for _, key := range cluster.GetBarrierOrRecoveryKeys() {
_, err := client.Sys().UnsealWithOptionsWithContext(ctx, &api.UnsealOpts{
Key: hex.EncodeToString(key),
Reset: reset,
Migrate: migrate,
})
if err != nil {
return err
}
}
return NodeHealthy(ctx, cluster, nodeIdx)
}
func UnsealAllNodes(ctx context.Context, cluster VaultCluster) error {
for i := range cluster.Nodes() {
if err := UnsealNode(ctx, cluster, i); err != nil {

View File

@ -15,5 +15,6 @@ rules:
- "*_test.go"
- "cmd/*.go"
- "cmd/**/*.go"
- "tools/*/main.go"
- sdk/database/dbplugin/server.go # effectively a cmd
- sdk/database/dbplugin/v5/plugin_server.go # effectively a cmd

View File

@ -0,0 +1,42 @@
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: MPL-2.0
package main
import (
"flag"
"fmt"
"os"
"github.com/hashicorp/vault/helper/testhelpers/testimages"
)
func main() {
var source, target, binary string
var hsm bool
flag.StringVar(&source, "source", "", "Source image name")
flag.StringVar(&target, "target", "", "Target image name")
flag.StringVar(&binary, "binary", "", "Binary path")
flag.BoolVar(&hsm, "hsm", false, "HSM style image")
flag.Parse()
if source == "" || target == "" || binary == "" {
fmt.Fprintf(os.Stderr, "Error: all of the flags -source, -target, and -binary are required\n\n")
flag.Usage()
os.Exit(1)
}
var output []byte
var err error
if hsm {
output, err = testimages.CreateHSMDockerImage(source, target, binary)
} else {
output, err = testimages.CreateNonHSMDockerImage(source, target, binary)
}
fmt.Println(string(output))
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error())
os.Exit(1)
}
}

View File

@ -15,7 +15,7 @@ import (
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers/consul"
"github.com/hashicorp/vault/helper/testhelpers/corehelpers"
"github.com/hashicorp/vault/helper/testhelpers/testimages"
"github.com/hashicorp/vault/sdk/helper/testcluster"
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
"github.com/stretchr/testify/require"
@ -34,27 +34,19 @@ func TestConsulFencing_PartitionedLeaderCantWrite(t *testing.T) {
consulStorage := consul.NewClusterStorage()
// Create cluster logger that will write cluster logs to a file in CI.
logger := corehelpers.NewTestLogger(t)
logger.SetLevel(hclog.Trace)
clusterOpts := docker.DefaultOptions(t)
// We can use an enterprise image here because we are swapping out the binary anyway.
clusterOpts.ImageRepo = "hashicorp/vault-enterprise"
clusterOpts.ClusterOptions.Logger = logger
clusterOpts.VaultBinary = ""
clusterOpts.ImageRepo, clusterOpts.ImageTag = testimages.GetImageRepoAndTag(t, false)
clusterOpts.Storage = consulStorage
logger.Info("==> starting cluster")
c, err := docker.NewDockerCluster(ctx, clusterOpts)
require.NoError(t, err)
logger.Info(" ✅ done.", "root_token", c.GetRootToken(),
"consul_token", consulStorage.Config().Token)
logger.Info("==> waiting for leader")
leaderIdx, err := testcluster.WaitForActiveNode(ctx, c)
require.NoError(t, err)
logger := c.Logger.Named("test")
leader := c.Nodes()[leaderIdx]
leaderClient := leader.APIClient()
@ -64,7 +56,6 @@ func TestConsulFencing_PartitionedLeaderCantWrite(t *testing.T) {
}
// Mount a KV v2 backend
logger.Info("==> mounting KV")
err = leaderClient.Sys().Mount("/test", &api.MountInput{
Type: "kv-v2",
})
@ -300,7 +291,6 @@ func waitForKVv2Upgrade(t *testing.T, ctx context.Context, client *api.Client, p
if err == nil {
return
}
t.Logf("waitForKVv2Upgrade: write failed: %s", err)
select {
case <-ctx.Done():
t.Fatalf("context cancelled waiting for KVv2 (%s) upgrade to complete: %s",

View File

@ -5,12 +5,12 @@ package misc
import (
"context"
"os"
"path"
"testing"
"github.com/go-test/deep"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers/testimages"
"github.com/hashicorp/vault/sdk/helper/testcluster"
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
"github.com/mitchellh/mapstructure"
@ -26,26 +26,20 @@ func TestRecovery_Docker(t *testing.T) {
ctx := context.TODO()
t.Parallel()
binary := os.Getenv("VAULT_BINARY")
if binary == "" {
t.Skip("only running docker test when $VAULT_BINARY present")
}
repo, tag := testimages.GetImageRepoAndTag(t, false)
opts := &docker.DockerClusterOptions{
ImageRepo: "hashicorp/vault",
ImageRepo: repo,
DisableMlock: true,
// We're replacing the binary anyway, so we're not too particular about
// the docker image version tag.
ImageTag: "latest",
VaultBinary: binary,
ImageTag: tag,
ClusterOptions: testcluster.ClusterOptions{
NumCores: 1,
VaultNodeConfig: &testcluster.VaultNodeConfig{
LogLevel: "TRACE",
// If you want the test to run faster locally, you could
// uncomment this performance_multiplier change.
//StorageOptions: map[string]string{
// "performance_multiplier": "1",
//},
StorageOptions: map[string]string{
"performance_multiplier": "1",
},
},
},
}
@ -130,6 +124,7 @@ func TestRecovery_Docker(t *testing.T) {
newOpts := *opts
opts := &newOpts
opts.Args = []string{"-recovery"}
opts.SkipUnsealWaitActiveNode = true
opts.StartProbe = func(client *api.Client) error {
// In recovery mode almost no paths are supported, and pretty much
// the only ones that don't require a recovery token are the ones used

View File

@ -18,6 +18,7 @@ import (
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers"
sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
"github.com/hashicorp/vault/helper/testhelpers/testimages"
"github.com/hashicorp/vault/sdk/helper/testcluster"
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
rafttest "github.com/hashicorp/vault/vault/external_tests/raft"
@ -28,6 +29,8 @@ import (
// uses docker containers for the vault nodes.
func TestRaft_Configuration_Docker(t *testing.T) {
t.Parallel()
repo, tag := testimages.GetImageRepoAndTag(t, false)
transit := sealhelper.NewTransitDockerSealServer(t)
for _, tc := range []struct {
@ -43,21 +46,13 @@ func TestRaft_Configuration_Docker(t *testing.T) {
t.Skip("only running docker test when $VAULT_BINARY present")
}
opts := &docker.DockerClusterOptions{
ImageRepo: "hashicorp/vault",
DisableMlock: true,
// We're replacing the binary anyway, so we're not too particular about
// the docker image version tag.
ImageTag: "latest",
VaultBinary: binary,
ImageRepo: repo,
ImageTag: tag,
ClusterOptions: testcluster.ClusterOptions{
VaultNodeConfig: &testcluster.VaultNodeConfig{
Seal: tc.seals,
LogLevel: "TRACE",
// If you want the test to run faster locally, you could
// uncomment this performance_multiplier change.
//StorageOptions: map[string]string{
// "performance_multiplier": "1",
//},
},
},
}
@ -147,17 +142,11 @@ func stabilize(t *testing.T, client *api.Client) {
// nodes that use raft-wal (and vice-versa)
// Having a cluster of mixed nodes, some using raft-boltdb and some using raft-wal, is not a problem.
func TestDocker_LogStore_Boltdb_To_Raftwal_And_Back(t *testing.T) {
binary := os.Getenv("VAULT_BINARY")
if binary == "" {
t.Skip("only running docker test when $VAULT_BINARY present")
}
repo, tag := testimages.GetImageRepoAndTag(t, false)
opts := &docker.DockerClusterOptions{
ImageRepo: "hashicorp/vault",
DisableMlock: true,
// We're replacing the binary anyway, so we're not too particular about
// the docker image version tag.
ImageTag: "latest",
VaultBinary: binary,
ImageRepo: repo,
ImageTag: tag,
ClusterOptions: testcluster.ClusterOptions{
VaultNodeConfig: &testcluster.VaultNodeConfig{
LogLevel: "TRACE",
@ -342,17 +331,11 @@ func TestDocker_LogStore_Boltdb_To_Raftwal_And_Back(t *testing.T) {
// by performing a snapshot restore from one cluster to another, and checking no data loss
func TestRaft_LogStore_Migration_Snapshot(t *testing.T) {
t.Parallel()
binary := os.Getenv("VAULT_BINARY")
if binary == "" {
t.Skip("only running docker test when $VAULT_BINARY present")
}
repo, tag := testimages.GetImageRepoAndTag(t, false)
opts := &docker.DockerClusterOptions{
ImageRepo: "hashicorp/vault",
DisableMlock: true,
// We're replacing the binary anyway, so we're not too particular about
// the docker image version tag.
ImageTag: "latest",
VaultBinary: binary,
ImageRepo: repo,
ImageTag: tag,
ClusterOptions: testcluster.ClusterOptions{
NumCores: 1,
VaultNodeConfig: &testcluster.VaultNodeConfig{

View File

@ -5,250 +5,46 @@ package seal_binary
import (
"bufio"
"context"
"fmt"
"io"
"net/url"
"maps"
"os"
"path"
"strconv"
"strings"
"sync"
"testing"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/api"
dockhelper "github.com/hashicorp/vault/sdk/helper/docker"
"github.com/hashicorp/vault/sdk/helper/testcluster"
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
client "github.com/moby/moby/client"
)
const (
containerConfig = `
{
"storage": {
"file": {
"path": "/tmp"
}
},
"disable_mlock": true,
"listener": [{
"tcp": {
"address": "0.0.0.0:8200",
"tls_disable": "true"
}
}],
"api_addr": "http://0.0.0.0:8200",
"cluster_addr": "http://0.0.0.0:8201",
%s
}`
sealConfig = `
"seal": [
%s
]
`
transitParameters = `
"address": "%s",
"token": "%s",
"mount_path": "%s",
"key_name": "%s",
"name": "%s"
`
transitStanza = `
{
"transit": {
%s,
"priority": %d,
"disabled": %s
}
}
`
// recoveryModeFileName serves as a signal for the softhsmSetupScript to add the `-recovery` flag
// when launching Vault.
recoveryModeFileName = "start-in-recovery-mode"
recoveryModeFileDir = "/root/"
recoveryModeFileContents = "Script setup-softhsm.sh looks for this file and starts vault in recovery mode if it sees it"
)
type transitContainerConfig struct {
Address string
Token string
MountPaths []string
KeyNames []string
}
func createBuildContextWithBinary(vaultBinary string) (dockhelper.BuildContext, error) {
f, err := os.Open(vaultBinary)
if err != nil {
return nil, fmt.Errorf("error opening vault binary file: %w", err)
}
data, err := io.ReadAll(f)
if err != nil {
return nil, fmt.Errorf("error reading vault binary file: %w", err)
}
bCtx := dockhelper.NewBuildContext()
bCtx["vault"] = &dockhelper.FileContents{
Data: data,
Mode: 0o755,
}
return bCtx, nil
}
func createDockerImage(imageRepo, imageTag, containerFile string, bCtx dockhelper.BuildContext) error {
runner, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{
ContainerName: "vault",
ImageRepo: imageRepo,
ImageTag: "latest",
})
if err != nil {
return fmt.Errorf("error creating runner: %w", err)
}
_, err = runner.BuildImage(context.Background(), containerFile, bCtx,
dockhelper.BuildRemove(true),
dockhelper.BuildForceRemove(true),
dockhelper.BuildPullParent(true),
dockhelper.BuildTags([]string{fmt.Sprintf("%s:%s", imageRepo, imageTag)}))
if err != nil {
return fmt.Errorf("error building docker image: %w", err)
}
return nil
}
// This passes the config in an environment variable, so any changes to local.json
// on the container will be overwritten if the container restarts
func createContainerWithConfig(config string, imageRepo, imageTag string, logConsumer func(s string)) (*dockhelper.Service, *dockhelper.Runner, error) {
runner, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{
ContainerName: "vault",
ImageRepo: imageRepo,
ImageTag: imageTag,
Cmd: []string{
"server", "-log-level=trace",
},
Ports: []string{"8200/tcp"},
Env: []string{
fmt.Sprintf("VAULT_LICENSE=%s", os.Getenv("VAULT_LICENSE")),
fmt.Sprintf("VAULT_LOCAL_CONFIG=%s", config),
"SKIP_SETCAP=true",
},
LogConsumer: logConsumer,
DoNotAutoRemove: true,
})
if err != nil {
return nil, nil, fmt.Errorf("error creating runner: %w", err)
}
svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (dockhelper.ServiceConfig, error) {
return *dockhelper.NewServiceURL(url.URL{Scheme: "http", Host: fmt.Sprintf("%s:%d", host, port)}), nil
})
if err != nil {
return nil, nil, fmt.Errorf("could not start docker vault: %w", err)
}
return svc, runner, nil
}
func createContainerFromImage(imageRepo, imageTag string, logConsumer func(s string)) (*dockhelper.Service, *dockhelper.Runner, error) {
return createContainerWithConfig("", imageRepo, imageTag, logConsumer)
}
func createTransitTestContainer(imageRepo, imageTag string, numKeys int) (*dockhelper.Service, *transitContainerConfig, error) {
rootToken, err := uuid.GenerateUUID()
if err != nil {
return nil, nil, fmt.Errorf("error generating UUID: %w", err)
}
mountPaths := make([]string, numKeys)
keyNames := make([]string, numKeys)
for i := range mountPaths {
mountPaths[i], err = uuid.GenerateUUID()
if err != nil {
return nil, nil, fmt.Errorf("error generating UUID: %w", err)
}
keyNames[i], err = uuid.GenerateUUID()
if err != nil {
return nil, nil, fmt.Errorf("error generating UUID: %w", err)
func init() {
if signed := os.Getenv("VAULT_LICENSE_CI"); signed != "" {
if err := os.Setenv("VAULT_LICENSE", signed); err != nil {
panic(err.Error())
}
}
}
runner, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{
ContainerName: "vault",
ImageRepo: imageRepo,
ImageTag: imageTag,
Cmd: []string{
"server", "-log-level=trace", "-dev", fmt.Sprintf("-dev-root-token-id=%s", rootToken),
"-dev-listen-address=0.0.0.0:8200",
},
Env: []string{fmt.Sprintf("VAULT_LICENSE=%s", os.Getenv("VAULT_LICENSE")), "SKIP_SETCAP=true"},
Ports: []string{"8200/tcp"},
})
if err != nil {
return nil, nil, fmt.Errorf("could not create runner: %w", err)
}
func withPriorityAndDisabled(priority int, disabled bool, seal testcluster.VaultNodeSealConfig) testcluster.VaultNodeSealConfig {
modified := seal
modified.Config = maps.Clone(seal.Config)
modified.Config["disabled"] = strconv.FormatBool(disabled)
modified.Config["priority"] = strconv.Itoa(priority)
return modified
}
svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (dockhelper.ServiceConfig, error) {
c := *dockhelper.NewServiceURL(url.URL{Scheme: "http", Host: fmt.Sprintf("%s:%d", host, port)})
type seal struct {
base func(name string, idx int) testcluster.VaultNodeSealConfig
index int
disabled bool
priority int
}
clientConfig := api.DefaultConfig()
clientConfig.Address = c.URL().String()
vault, err := api.NewClient(clientConfig)
if err != nil {
return nil, err
}
vault.SetToken(rootToken)
// Set up transit mounts and keys
for i := range mountPaths {
if err := vault.Sys().Mount(mountPaths[i], &api.MountInput{
Type: "transit",
}); err != nil {
return nil, err
}
if _, err := vault.Logical().Write(path.Join(mountPaths[i], "keys", keyNames[i]), map[string]interface{}{}); err != nil {
return nil, err
}
}
return c, nil
})
if err != nil {
return nil, nil, fmt.Errorf("could not start docker vault: %w", err)
}
mapping, err := runner.GetNetworkAndAddresses(svc.Container.Name)
if err != nil {
svc.Cleanup()
return nil, nil, fmt.Errorf("failed to get container network information: %w", err)
}
if len(mapping) != 1 {
svc.Cleanup()
return nil, nil, fmt.Errorf("expected 1 network mapping, got %d", len(mapping))
}
var ip string
for _, ip = range mapping {
// capture the container IP address from the map
}
return svc,
&transitContainerConfig{
Address: fmt.Sprintf("http://%s:8200", ip),
Token: rootToken,
MountPaths: mountPaths,
KeyNames: keyNames,
}, nil
type step struct {
expectedSealType string
seals []seal
}
func validateVaultStatusAndSealType(client *api.Client, expectedSealType string) error {
@ -268,95 +64,11 @@ func validateVaultStatusAndSealType(client *api.Client, expectedSealType string)
return nil
}
func testClient(address string) (*api.Client, error) {
clientConfig := api.DefaultConfig()
clientConfig.Address = address
testClient, err := api.NewClient(clientConfig)
if err != nil {
return nil, err
}
return testClient, nil
}
func initializeVault(client *api.Client, sealType string) ([]string, string, error) {
var keys []string
var token string
if sealType == "shamir" {
initResp, err := client.Sys().Init(&api.InitRequest{
SecretThreshold: 1,
SecretShares: 1,
})
if err != nil {
return nil, "", err
}
keys = initResp.Keys
token = initResp.RootToken
_, err = client.Sys().Unseal(initResp.Keys[0])
if err != nil {
return nil, "", err
}
} else {
initResp, err := client.Sys().Init(&api.InitRequest{
RecoveryShares: 1,
RecoveryThreshold: 1,
})
if err != nil {
return nil, "", err
}
keys = initResp.RecoveryKeys
token = initResp.RootToken
}
return keys, token, nil
}
func copyConfigToContainer(containerID string, bCtx dockhelper.BuildContext, runner *dockhelper.Runner) error {
tar, err := bCtx.ToTarball()
if err != nil {
return fmt.Errorf("error creating config tarball: %w", err)
}
_, err = runner.DockerAPI.CopyToContainer(context.Background(), containerID, client.CopyToContainerOptions{
DestinationPath: "/vault/config",
Content: tar,
})
if err != nil {
return fmt.Errorf("error copying config to container: %w", err)
}
return nil
}
func copyRecoveryModeTriggerToContainer(containerID string, runner *dockhelper.Runner) error {
bCtx := dockhelper.NewBuildContext()
bCtx[recoveryModeFileName] = &dockhelper.FileContents{
Data: []byte(recoveryModeFileContents),
Mode: 0o644,
}
tar, err := bCtx.ToTarball()
if err != nil {
return fmt.Errorf("error creating config tarball: %w", err)
}
_, err = runner.DockerAPI.CopyToContainer(context.Background(), containerID, client.CopyToContainerOptions{
DestinationPath: recoveryModeFileDir,
Content: tar,
})
if err != nil {
return fmt.Errorf("error copying revovery mode trigger file to container: %w", err)
}
return nil
}
func dockerOptions(t *testing.T, repo, tag string) *docker.DockerClusterOptions {
opts := docker.DefaultOptions(t)
opts.NumCores = 1
opts.VaultBinary = os.Getenv("VAULT_BINARY")
opts.ImageRepo, opts.ImageTag = repo, tag
opts.VaultBinary = ""
// Probably not reliable in CI with multi-node clusters, but we're assuming callers
// of this func won't change NumCores to be >1.
opts.VaultNodeConfig.StorageOptions = map[string]string{

View File

@ -0,0 +1,24 @@
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
//go:build !enterprise
package seal_binary
import (
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/sdk/helper/testcluster"
"github.com/stretchr/testify/assert"
)
func pkcsWrapper(string, int) testcluster.VaultNodeSealConfig {
return testcluster.VaultNodeSealConfig{}
}
func getRewrappedEntryCount(client *api.Client) (uint32, error) {
return 0, nil
}
func verifyRewrappedEntryCount(t *assert.CollectT, client *api.Client, initialProcessedEntries uint32) uint32 {
return 0
}

View File

@ -1,178 +1,269 @@
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
//go:build !enterprise
package seal_binary
import (
"context"
"fmt"
"os"
"strconv"
"testing"
"time"
dockhelper "github.com/hashicorp/vault/sdk/helper/docker"
dockerclient "github.com/moby/moby/client"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/constants"
sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
"github.com/hashicorp/vault/helper/testhelpers/testimages"
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSealReloadSIGHUP(t *testing.T) {
binary := os.Getenv("VAULT_BINARY")
if binary == "" {
t.Skip("only running docker test with $VAULT_BINARY present")
transit := sealhelper.NewTransitDockerSealServer(t)
repo, tag := testimages.GetImageRepoAndTag(t, constants.IsEnterprise)
type testCase struct {
name string
steps []step
disableMultiseal bool
}
transitContainer, transitConfig, err := createTransitTestContainer("hashicorp/vault", "latest", 2)
if err != nil {
t.Fatalf("error creating vault container: %s", err)
}
defer transitContainer.Cleanup()
firstTransitKeyConfig := fmt.Sprintf(transitParameters,
transitConfig.Address,
transitConfig.Token,
transitConfig.MountPaths[0],
transitConfig.KeyNames[0],
"transit-seal-1",
)
secondTransitKeyConfig := fmt.Sprintf(transitParameters,
transitConfig.Address,
transitConfig.Token,
transitConfig.MountPaths[1],
transitConfig.KeyNames[1],
"transit-seal-2",
)
testCases := map[string]struct {
sealStanzas []string
expectedSealTypes []string
}{
"migrate transit to transit": {
sealStanzas: []string{
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 1, "false"),
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 2, "true") + "," +
fmt.Sprintf(transitStanza, secondTransitKeyConfig, 1, "false"),
fmt.Sprintf(transitStanza, secondTransitKeyConfig, 1, "false"),
testCases := []testCase{
{
name: "transit to transit",
steps: []step{
{
"transit", []seal{
{base: transit.Seal, index: 0, priority: 1},
},
}, {
"multiseal", []seal{
{base: transit.Seal, index: 0, priority: 2, disabled: true},
{base: transit.Seal, index: 1, priority: 1},
},
}, {
"transit", []seal{
{base: transit.Seal, index: 1, priority: 1},
},
},
},
expectedSealTypes: []string{
"transit",
"transit",
"transit",
}, {
name: "transit to transit no multiseal",
steps: []step{
{
"transit", []seal{
{base: transit.Seal, index: 0, priority: 1},
},
}, {
"transit", []seal{
{base: transit.Seal, index: 0, priority: 2, disabled: true},
{base: transit.Seal, index: 1, priority: 1},
},
}, {
"transit", []seal{
{base: transit.Seal, index: 1, priority: 1},
},
},
},
},
"migrate shamir to transit fails": {
sealStanzas: []string{
"",
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 1, "false"),
disableMultiseal: true,
}, {
name: "transit to pkcs11",
steps: []step{
{
"transit", []seal{
{base: transit.Seal, priority: 1},
},
}, {
"multiseal", []seal{
{base: transit.Seal, priority: 1, disabled: true},
{base: pkcsWrapper, priority: 2},
},
}, {
"pkcs11", []seal{
{base: pkcsWrapper, priority: 1},
},
},
},
expectedSealTypes: []string{
"shamir",
"shamir",
}, {
name: "pkcs11 to transit",
steps: []step{
{
"pkcs11", []seal{
{base: pkcsWrapper, priority: 1},
},
}, {
"multiseal", []seal{
{base: pkcsWrapper, priority: 2, disabled: true},
{base: transit.Seal, priority: 1},
},
}, {
"transit", []seal{
{base: transit.Seal, priority: 1},
},
},
},
},
"migrate transit to shamir fails": {
sealStanzas: []string{
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 1, "false"),
"",
}, {
name: "two transit seals",
steps: []step{
{
"transit", []seal{
{base: transit.Seal, priority: 1},
},
}, {
"multiseal", []seal{
{base: transit.Seal, index: 0, priority: 1},
{base: transit.Seal, index: 1, priority: 2},
},
},
},
expectedSealTypes: []string{
"transit",
"transit",
}, {
name: "pkcs11 seal and transit seal",
steps: []step{
{
"transit", []seal{
{base: transit.Seal, priority: 1},
},
}, {
"multiseal", []seal{
{base: transit.Seal, priority: 1},
{base: pkcsWrapper, priority: 2},
},
},
},
},
"replacing seal fails": {
sealStanzas: []string{
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 1, "false"),
fmt.Sprintf(transitStanza, secondTransitKeyConfig, 1, "false"),
}, {
name: "three seals",
steps: []step{
{
"transit", []seal{
{base: transit.Seal, index: 0, priority: 1},
},
}, {
"multiseal", []seal{
{base: transit.Seal, index: 0, priority: 1},
{base: pkcsWrapper, priority: 2},
},
}, {
"multiseal", []seal{
{base: transit.Seal, index: 0, priority: 1},
{base: pkcsWrapper, priority: 2},
{base: transit.Seal, index: 1, priority: 3},
},
},
},
expectedSealTypes: []string{
"transit",
"transit",
}, {
name: "remove enabled seal",
steps: []step{
{
"transit", []seal{
{base: transit.Seal, priority: 1},
},
}, {
"multiseal", []seal{
{base: transit.Seal, priority: 1},
{base: pkcsWrapper, priority: 2},
},
}, {
"pkcs11", []seal{
{base: pkcsWrapper, priority: 2},
},
},
},
},
"more than one seal fails": {
sealStanzas: []string{
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 1, "false"),
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 1, "false") + "," +
fmt.Sprintf(transitStanza, secondTransitKeyConfig, 2, "false"),
}, {
name: "shamir to transit fails",
steps: []step{
{
"shamir", nil,
}, {
"shamir", []seal{
{base: transit.Seal, priority: 1},
},
},
},
expectedSealTypes: []string{
"transit",
"transit",
}, {
name: "transit to shamir fails",
steps: []step{
{
"transit", []seal{{base: transit.Seal, priority: 1}},
}, {
"transit", nil,
},
},
}, {
name: "replacing seal fails",
steps: []step{
{
"transit", []seal{{base: transit.Seal, index: 0, priority: 1}},
}, {
"transit", []seal{{base: transit.Seal, index: 1, priority: 1}},
},
},
},
}
containerFile := `
FROM hashicorp/vault:latest
COPY vault /bin/vault
`
bCtx, err := createBuildContextWithBinary(os.Getenv("VAULT_BINARY"))
if err != nil {
t.Fatalf("error creating build context: %s", err)
}
err = createDockerImage("hashicorp/vault", "test-image", containerFile, bCtx)
if err != nil {
t.Fatalf("error creating docker image: %s", err)
isEnterpriseCase := func(tc testCase) bool {
for _, step := range tc.steps {
if step.expectedSealType == "multiseal" || step.expectedSealType == "pkcs11" {
return true
}
}
return false
}
for name, test := range testCases {
t.Run(name, func(t *testing.T) {
var sealList string
if test.sealStanzas[0] != "" {
sealList = fmt.Sprintf(sealConfig, test.sealStanzas[0])
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if isEnterpriseCase(tc) && !constants.IsEnterprise {
t.Skip("Skipping enterprise tests")
}
vaultConfig := fmt.Sprintf(containerConfig, sealList)
svc, runner, err := createContainerWithConfig(vaultConfig, "hashicorp/vault", "test-image", func(s string) { t.Log(s) })
if err != nil {
t.Fatalf("error creating container: %s", err)
opts := dockerOptions(t, repo, tag)
for _, seal := range tc.steps[0].seals {
vncseal := withPriorityAndDisabled(seal.priority, seal.disabled, seal.base(tc.name, seal.index))
opts.VaultNodeConfig.Seal = append(opts.VaultNodeConfig.Seal, vncseal)
}
defer svc.Cleanup()
time.Sleep(5 * time.Second)
client, err := testClient(svc.Config.URL().String())
if err != nil {
t.Fatalf("err: %s", err)
if tc.steps[0].expectedSealType != "shamir" && !tc.disableMultiseal {
opts.VaultNodeConfig.EnableMultiSeal = true
}
cluster := docker.NewTestDockerCluster(t, opts)
node := cluster.Nodes()[0].(*docker.DockerClusterNode)
client := node.APIClient()
lastRewrappedEntryCount, err := getRewrappedEntryCount(client)
require.NoError(t, err)
_, token, err := initializeVault(client, test.expectedSealTypes[0])
if err != nil {
t.Fatalf("error initializing vault: %s", err)
}
client.SetToken(token)
// kv mounts are sealwrapped. In order to make sure that we don't get fooled
// by the rewrap status endpoint saying "not in progress" prior to a rewrap
// being started, we're going to arrange for there to be an extra key to wrap
// each iteration, by creating a new kv entry each iteration.
require.NoError(t, client.Sys().Mount("kv", &api.MountInput{
Type: "kv",
}))
client.Logical().Write("kv/0", map[string]any{"1": 1})
for i := range test.sealStanzas {
if test.sealStanzas[i] != "" {
sealList = fmt.Sprintf(sealList, test.sealStanzas[i])
expectFailure := len(tc.steps) < 3
for i := 1; i < len(tc.steps); i++ {
if tc.steps[i].expectedSealType != "shamir" && !tc.disableMultiseal {
opts.VaultNodeConfig.EnableMultiSeal = true
}
vaultConfig = fmt.Sprintf(containerConfig, sealList)
configCtx := dockhelper.NewBuildContext()
configCtx["local.json"] = &dockhelper.FileContents{
Data: []byte(vaultConfig),
Mode: 0o644,
opts.VaultNodeConfig.Seal = nil
for _, seal := range tc.steps[i].seals {
opts.VaultNodeConfig.Seal = append(opts.VaultNodeConfig.Seal,
withPriorityAndDisabled(seal.priority, seal.disabled, seal.base(tc.name, seal.index)))
}
require.NoError(t, node.UpdateConfig(t.Context(), opts))
require.NoError(t, node.Signal(t.Context(), "SIGHUP"))
err = copyConfigToContainer(svc.Container.ID, bCtx, runner)
if err != nil {
t.Fatalf("error copying over config file: %s", err)
}
require.EventuallyWithT(t, func(ct *assert.CollectT) {
if !tc.disableMultiseal && !expectFailure && tc.steps[i].expectedSealType != "shamir" {
lastRewrappedEntryCount = verifyRewrappedEntryCount(ct, client, lastRewrappedEntryCount+1)
}
_, err = runner.DockerAPI.ContainerKill(context.Background(), svc.Container.ID, dockerclient.ContainerKillOptions{
Signal: "SIGHUP",
})
if err != nil {
t.Fatalf("error sending SIGHUP: %s", err)
}
resp, err := client.Sys().SealStatusWithContext(t.Context())
require.NoError(t, err)
assert.Equal(ct, resp.Type, tc.steps[i].expectedSealType)
assert.False(ct, resp.Sealed)
}, 20*time.Second, time.Second/2)
err = validateVaultStatusAndSealType(client, test.expectedSealTypes[i])
if err != nil {
t.Fatalf("seal type check failed: %s", err)
}
client.Logical().Write("kv/"+strconv.Itoa(i), map[string]any{"1": 1})
}
})
}

View File

@ -4,11 +4,11 @@
package system_binary
import (
"os"
"testing"
"time"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers/testimages"
"github.com/hashicorp/vault/sdk/helper/testcluster"
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
"github.com/stretchr/testify/require"
@ -60,19 +60,15 @@ func waitForRekeyInConfig(t *testing.T, client *api.Client, rootToken string, sh
// between requiring authentication and not requiring authentication by using
// the enable_unauthenticated_access config option and reloading the config.
func TestSysRekey_ConfigReload(t *testing.T) {
binary := os.Getenv("VAULT_BINARY")
if binary == "" {
t.Skip("only running docker test when $VAULT_BINARY present")
}
repo, tag := testimages.GetImageRepoAndTag(t, false)
nodeConfig := &testcluster.VaultNodeConfig{
LogLevel: "TRACE",
}
opts := &docker.DockerClusterOptions{
ImageRepo: "hashicorp/vault",
ImageTag: "latest",
VaultBinary: binary,
DisableMlock: true,
ImageRepo: repo,
ImageTag: tag,
ClusterOptions: testcluster.ClusterOptions{
NumCores: 1,
VaultNodeConfig: nodeConfig,