mirror of
https://github.com/hashicorp/vault.git
synced 2026-05-12 00:13:45 +02:00
Merge remote-tracking branch 'remotes/from/ce/release/2.x.x' into release/2.x.x
This commit is contained in:
commit
3d3e2e1aaf
51
.github/workflows/test-go.yml
vendored
51
.github/workflows/test-go.yml
vendored
@ -338,7 +338,33 @@ jobs:
|
||||
# The dev mode binary has to exist for binary tests that are dispatched on the last runner.
|
||||
env:
|
||||
GOPRIVATE: github.com/hashicorp/*
|
||||
run: time make prep dev
|
||||
run: |
|
||||
set -exo pipefail
|
||||
time make prep dev
|
||||
mv bin/vault vault-binary
|
||||
- if: inputs.binary-tests && matrix.id == inputs.total-runners
|
||||
id: build-docker-image
|
||||
name: Build Docker image with custom vault binary
|
||||
run: |
|
||||
set -exo pipefail
|
||||
|
||||
if [ "${{ needs.test-matrix.outputs.is-ent-branch }}" == "true" ]; then
|
||||
go run ./tools/testimagemaker/ -source=docker.io/hashicorp/vault-enterprise:latest -target=hashicorp/vault-enterprise-ci:latest -binary=./vault-binary
|
||||
go run ./tools/testimagemaker/ -source=docker.io/hashicorp/vault-enterprise:2.0.0-ent.hsm -target=hashicorp/vault-enterprise-ci:latest-hsm -binary=./vault-hsm-binary -hsm
|
||||
|
||||
# Verify the images were built successfully
|
||||
docker images hashicorp/vault-enterprise-ci:latest
|
||||
echo "image=hashicorp/vault-enterprise-ci:latest" >> "$GITHUB_OUTPUT"
|
||||
|
||||
docker images hashicorp/vault-enterprise-ci:latest-hsm
|
||||
echo "hsmimage=hashicorp/vault-enterprise-ci:latest-hsm" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
go run ./tools/testimagemaker/ -source=docker.io/hashicorp/vault:latest -target=hashicorp/vault-ci:latest -binary=./vault-binary
|
||||
|
||||
# Verify the images was built successfully
|
||||
docker images hashicorp/vault-ci:latest
|
||||
echo "image=hashicorp/vault-ci:latest" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
- if: needs.test-matrix.outputs.is-ent-repo != 'true'
|
||||
# Enterprise repo runners do not allow sudo, so can't install gVisor there yet.
|
||||
name: Install gVisor
|
||||
@ -402,17 +428,26 @@ jobs:
|
||||
# The docker/binary tests are more expensive, and we've had problems with timeouts when running at full
|
||||
# parallelism. The default if -p isn't specified is to use NumCPUs, which seems fine for regular tests.
|
||||
package_parallelism=""
|
||||
test_parallelism="${{ inputs.go-test-parallelism }}"
|
||||
|
||||
if [ -f vault-hsm-binary ]; then
|
||||
VAULT_HSM_BINARY="$(pwd)/vault-hsm-binary"
|
||||
export VAULT_HSM_BINARY
|
||||
# The image will be preferred by most docker tests, since it doesn't require the extra
|
||||
# overhead of mutating an image to add the current binary. We still populate $VAULT_BINARY
|
||||
# for the sake of exec tests like TestSysPprof_Exec.
|
||||
if [ -f vault-binary ]; then
|
||||
VAULT_BINARY=$(pwd)/vault-binary
|
||||
export VAULT_BINARY
|
||||
export VAULT_IMAGE=${{ steps.build-docker-image.outputs.image }}
|
||||
fi
|
||||
|
||||
if [ -f bin/vault ]; then
|
||||
VAULT_BINARY="$(pwd)/bin/vault"
|
||||
export VAULT_BINARY
|
||||
if [ -f vault-hsm-binary ]; then
|
||||
VAULT_HSM_BINARY=$(pwd)/vault-hsm-binary
|
||||
export VAULT_HSM_BINARY
|
||||
export VAULT_HSM_IMAGE=${{ steps.build-docker-image.outputs.hsmimage }}
|
||||
fi
|
||||
|
||||
if [ -f vault-binary ] || [ -f vault-hsm-binary ]; then
|
||||
package_parallelism="-p 2"
|
||||
test_parallelism=4
|
||||
fi
|
||||
|
||||
# If running Go tests on the enterprise repo, add a flag to rerun failed tests.
|
||||
@ -436,7 +471,7 @@ jobs:
|
||||
$package_parallelism \
|
||||
-tags "${{ inputs.go-tags }}" \
|
||||
-timeout=${{ inputs.go-test-timeout }} \
|
||||
-parallel=${{ inputs.go-test-parallelism }} \
|
||||
-parallel="$test_parallelism" \
|
||||
${{ inputs.extra-flags }} \
|
||||
- if: (github.repository == 'hashicorp/vault' || github.repository == 'hashicorp/vault-enterprise') && (success() || failure())
|
||||
name: Prepare datadog-ci
|
||||
|
||||
@ -96,25 +96,26 @@ func sealStatusRequestWithContext(ctx context.Context, c *Sys, r *Request) (*Sea
|
||||
}
|
||||
|
||||
type SealStatusResponse struct {
|
||||
Type string `json:"type"`
|
||||
Initialized bool `json:"initialized"`
|
||||
Sealed bool `json:"sealed"`
|
||||
T int `json:"t"`
|
||||
N int `json:"n"`
|
||||
Progress int `json:"progress"`
|
||||
Nonce string `json:"nonce"`
|
||||
Version string `json:"version"`
|
||||
BuildDate string `json:"build_date"`
|
||||
Migration bool `json:"migration"`
|
||||
ClusterName string `json:"cluster_name,omitempty"`
|
||||
ClusterID string `json:"cluster_id,omitempty"`
|
||||
RecoverySeal bool `json:"recovery_seal"`
|
||||
RecoverySealType string `json:"recovery_seal_type,omitempty"`
|
||||
StorageType string `json:"storage_type,omitempty"`
|
||||
HCPLinkStatus string `json:"hcp_link_status,omitempty"`
|
||||
HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"`
|
||||
RemovedFromCluster *bool `json:"removed_from_cluster,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Initialized bool `json:"initialized"`
|
||||
Sealed bool `json:"sealed"`
|
||||
T int `json:"t"`
|
||||
N int `json:"n"`
|
||||
Progress int `json:"progress"`
|
||||
Nonce string `json:"nonce"`
|
||||
Version string `json:"version"`
|
||||
BuildDate string `json:"build_date"`
|
||||
Migration bool `json:"migration"`
|
||||
ClusterName string `json:"cluster_name,omitempty"`
|
||||
ClusterID string `json:"cluster_id,omitempty"`
|
||||
RecoverySeal bool `json:"recovery_seal"`
|
||||
RecoverySealType string `json:"recovery_seal_type,omitempty"`
|
||||
StorageType string `json:"storage_type,omitempty"`
|
||||
HCPLinkStatus string `json:"hcp_link_status,omitempty"`
|
||||
HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"`
|
||||
RemovedFromCluster *bool `json:"removed_from_cluster,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
MigrationDoneAtEpoch int64 `json:"migration_done_at_epoch,omitempty"`
|
||||
}
|
||||
|
||||
type UnsealOpts struct {
|
||||
|
||||
@ -6,11 +6,11 @@ package pkiext_binary
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/testimages"
|
||||
dockhelper "github.com/hashicorp/vault/sdk/helper/docker"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
|
||||
@ -23,17 +23,11 @@ type VaultPkiCluster struct {
|
||||
}
|
||||
|
||||
func NewVaultPkiCluster(t *testing.T) *VaultPkiCluster {
|
||||
binary := os.Getenv("VAULT_BINARY")
|
||||
if binary == "" {
|
||||
t.Skip("only running docker test when $VAULT_BINARY present")
|
||||
}
|
||||
repo, tag := testimages.GetImageRepoAndTag(t, false)
|
||||
|
||||
opts := &docker.DockerClusterOptions{
|
||||
ImageRepo: "docker.mirror.hashicorp.services/hashicorp/vault",
|
||||
// We're replacing the binary anyway, so we're not too particular about
|
||||
// the docker image version tag.
|
||||
ImageTag: "latest",
|
||||
VaultBinary: binary,
|
||||
ImageRepo: repo,
|
||||
ImageTag: tag,
|
||||
ClusterOptions: testcluster.ClusterOptions{
|
||||
VaultNodeConfig: &testcluster.VaultNodeConfig{
|
||||
LogLevel: "TRACE",
|
||||
|
||||
3
changelog/_14271.txt
Normal file
3
changelog/_14271.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
core/seal (enterprise): Make it possible for new nodes to join a cluster configured with Seal High Availability.
|
||||
```
|
||||
3
changelog/_14334.txt
Normal file
3
changelog/_14334.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
api: Add migration_done_at_epoch to sys/seal-status response.
|
||||
```
|
||||
3
changelog/_14335.txt
Normal file
3
changelog/_14335.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
sdk: Small bugfixes relating to docker test container cleanup and image building.
|
||||
```
|
||||
9
changelog/_14350.txt
Normal file
9
changelog/_14350.txt
Normal file
@ -0,0 +1,9 @@
|
||||
```release-note:improvement
|
||||
sdk: Expand support for docker test cluster options like seals, kms libraries, and entropy augmentation. DockerClusterNode.UpdateConfig now takes a full set of cluster options instead of just node config.
|
||||
```
|
||||
```release-note:improvement
|
||||
core (Enterprise): Sanitized config now shows kms_library config.
|
||||
```
|
||||
```release-note:Bug
|
||||
core (Enterprise): Fix parsing of kms_libary and entropy config in JSON format.
|
||||
```
|
||||
@ -560,7 +560,7 @@ func (c *ServerCommand) runRecoveryMode() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
hasPartialPaths, err := hasPartiallyWrappedPaths(ctx, backend)
|
||||
hasPartialPaths, err := vault.HasPartiallyWrappedPaths(ctx, backend)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Cannot determine if there are partially seal wrapped entries in storage: %v", err))
|
||||
return 1
|
||||
@ -1935,7 +1935,7 @@ func (c *ServerCommand) configureSeals(ctx context.Context, config *server.Confi
|
||||
return nil, nil, fmt.Errorf("Error getting seal generation info: %v", err)
|
||||
}
|
||||
|
||||
hasPartialPaths, err := hasPartiallyWrappedPaths(ctx, backend)
|
||||
hasPartialPaths, err := vault.HasPartiallyWrappedPaths(ctx, backend)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Cannot determine if there are partially seal wrapped entries in storage: %v", err)
|
||||
}
|
||||
@ -2752,25 +2752,16 @@ func (c *ServerCommand) computeSealGenerationInfo(existingSealGenInfo *vaultseal
|
||||
Enabled: multisealEnabled,
|
||||
}
|
||||
|
||||
if multisealEnabled || (existingSealGenInfo != nil && existingSealGenInfo.Enabled) {
|
||||
err := newSealGenInfo.Validate(existingSealGenInfo, hasPartiallyWrappedPaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Validate multi seal concerns of the seal configuration. Note that at this
|
||||
// point Vault is starting up, not initializing (as in "vault operator init").
|
||||
err := vaultseal.ValidateMultiSealGenerationInfo(false, newSealGenInfo, existingSealGenInfo, hasPartiallyWrappedPaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newSealGenInfo, nil
|
||||
}
|
||||
|
||||
func hasPartiallyWrappedPaths(ctx context.Context, backend physical.Backend) (bool, error) {
|
||||
paths, err := vault.GetPartiallySealWrappedPaths(ctx, backend)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return len(paths) > 0, nil
|
||||
}
|
||||
|
||||
func initHaBackend(c *ServerCommand, config *server.Config, coreConfig *vault.CoreConfig, backend physical.Backend) (bool, error) {
|
||||
// Initialize the separate HA storage backend, if it exists
|
||||
var ok bool
|
||||
|
||||
@ -330,11 +330,13 @@ func testLoadConfigFile_json2(t *testing.T, entropy *configutil.Entropy) {
|
||||
|
||||
func testParseEntropy(t *testing.T, oss bool) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inConfig string
|
||||
outErr error
|
||||
outEntropy configutil.Entropy
|
||||
}{
|
||||
{
|
||||
name: "good",
|
||||
inConfig: `entropy "seal" {
|
||||
mode = "augmentation"
|
||||
}`,
|
||||
@ -342,18 +344,21 @@ func testParseEntropy(t *testing.T, oss bool) {
|
||||
outEntropy: configutil.Entropy{Mode: configutil.EntropyAugmentation},
|
||||
},
|
||||
{
|
||||
name: "bad mode",
|
||||
inConfig: `entropy "seal" {
|
||||
mode = "a_mode_that_is_not_supported"
|
||||
}`,
|
||||
outErr: fmt.Errorf("the specified entropy mode %q is not supported", "a_mode_that_is_not_supported"),
|
||||
},
|
||||
{
|
||||
name: "bad device",
|
||||
inConfig: `entropy "device_that_is_not_supported" {
|
||||
mode = "augmentation"
|
||||
}`,
|
||||
outErr: fmt.Errorf("only the %q type of external entropy is supported", "seal"),
|
||||
},
|
||||
{
|
||||
name: "duplicate section",
|
||||
inConfig: `entropy "seal" {
|
||||
mode = "augmentation"
|
||||
}
|
||||
@ -362,6 +367,15 @@ func testParseEntropy(t *testing.T, oss bool) {
|
||||
}`,
|
||||
outErr: fmt.Errorf("only one %q block is permitted", "entropy"),
|
||||
},
|
||||
{
|
||||
name: "json",
|
||||
inConfig: `{
|
||||
"entropy": {
|
||||
"seal": {"mode": "augmentation"}
|
||||
}`,
|
||||
outErr: nil,
|
||||
outEntropy: configutil.Entropy{Mode: configutil.EntropyAugmentation},
|
||||
},
|
||||
}
|
||||
|
||||
config := Config{
|
||||
@ -369,25 +383,27 @@ func testParseEntropy(t *testing.T, oss bool) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
obj, _ := hcl.Parse(strings.TrimSpace(test.inConfig))
|
||||
list, _ := obj.Node.(*ast.ObjectList)
|
||||
objList := list.Filter("entropy")
|
||||
err := configutil.ParseEntropy(config.SharedConfig, objList, "entropy")
|
||||
// validate the error, both should be nil or have the same Error()
|
||||
switch {
|
||||
case oss:
|
||||
if config.Entropy != nil {
|
||||
t.Fatalf("parsing Entropy should not be possible in oss but got a non-nil config.Entropy: %#v", config.Entropy)
|
||||
}
|
||||
case err != nil && test.outErr != nil:
|
||||
if err.Error() != test.outErr.Error() {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
obj, _ := hcl.Parse(strings.TrimSpace(test.inConfig))
|
||||
list, _ := obj.Node.(*ast.ObjectList)
|
||||
objList := list.Filter("entropy")
|
||||
err := configutil.ParseEntropy(config.SharedConfig, objList, "entropy")
|
||||
// validate the error, both should be nil or have the same Error()
|
||||
switch {
|
||||
case oss:
|
||||
if config.Entropy != nil {
|
||||
t.Fatalf("parsing Entropy should not be possible in oss but got a non-nil config.Entropy: %#v", config.Entropy)
|
||||
}
|
||||
case err != nil && test.outErr != nil:
|
||||
if err.Error() != test.outErr.Error() {
|
||||
t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr)
|
||||
}
|
||||
case err != test.outErr:
|
||||
t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr)
|
||||
case err == nil && config.Entropy != nil && *config.Entropy != test.outEntropy:
|
||||
t.Fatalf("entropy config mismatch: expected %#v got %#v", test.outEntropy, *config.Entropy)
|
||||
}
|
||||
case err != test.outErr:
|
||||
t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr)
|
||||
case err == nil && config.Entropy != nil && *config.Entropy != test.outEntropy:
|
||||
t.Fatalf("entropy config mismatch: expected %#v got %#v", test.outEntropy, *config.Entropy)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -4,8 +4,10 @@
|
||||
package sealhelper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/api"
|
||||
@ -14,9 +16,12 @@ import (
|
||||
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
|
||||
"github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/internalshared/configutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
"github.com/hashicorp/vault/vault/seal"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type TransitSealServer struct {
|
||||
@ -36,7 +41,6 @@ func NewTransitSealServer(t testing.TB, idx int) *TransitSealServer {
|
||||
}
|
||||
teststorage.InmemBackendSetup(conf, opts)
|
||||
cluster := vault.NewTestCluster(t, conf, opts)
|
||||
cluster.Start()
|
||||
|
||||
if err := cluster.Cores[0].Client.Sys().Mount("transit", &api.MountInput{
|
||||
Type: "transit",
|
||||
@ -79,3 +83,62 @@ func (tss *TransitSealServer) MakeSeal(t testing.TB, key string) (vault.Seal, er
|
||||
}
|
||||
return vault.NewAutoSeal(access), nil
|
||||
}
|
||||
|
||||
type TransitDockerSealServer struct {
|
||||
cluster *docker.DockerCluster
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func NewTransitDockerSealServer(t *testing.T) *TransitDockerSealServer {
|
||||
opts := docker.DefaultOptions(t)
|
||||
opts.NumCores = 1
|
||||
opts.ImageRepo, opts.ImageTag = "hashicorp/vault", "latest"
|
||||
opts.VaultNodeConfig.StorageOptions = map[string]string{
|
||||
"performance_multiplier": "1",
|
||||
}
|
||||
opts.DisableTLS = true // simplify, this way we don't have to deal with ca
|
||||
opts.ClusterName = strings.ReplaceAll(t.Name()+"-transit", "/", "-")
|
||||
return &TransitDockerSealServer{t: t, cluster: docker.NewTestDockerCluster(t, opts)}
|
||||
}
|
||||
|
||||
func (tc *TransitDockerSealServer) APIClient() *api.Client {
|
||||
return tc.cluster.Nodes()[0].APIClient()
|
||||
}
|
||||
|
||||
func (tc *TransitDockerSealServer) SealWithPriorityAndDisabled(name string, idx int, disabled bool, priority int) testcluster.VaultNodeSealConfig {
|
||||
seal := tc.Seal(name, idx)
|
||||
seal.Config["disabled"] = strconv.FormatBool(disabled)
|
||||
seal.Config["priority"] = strconv.Itoa(priority)
|
||||
return seal
|
||||
}
|
||||
|
||||
// Seal creates a seal using the given mount name and an idx that identifies a key.
|
||||
// The mount and key will be created.
|
||||
func (tc *TransitDockerSealServer) Seal(name string, idx int) testcluster.VaultNodeSealConfig {
|
||||
client := tc.cluster.Nodes()[0].APIClient()
|
||||
if m, _ := client.Sys().GetMount(name); m == nil {
|
||||
require.NoError(tc.t, client.Sys().Mount(name, &api.MountInput{
|
||||
Type: "transit",
|
||||
}))
|
||||
}
|
||||
|
||||
keyName := fmt.Sprintf("transit-seal-%d", idx+1)
|
||||
|
||||
_, err := client.Logical().Write(path.Join(name, "keys", keyName), nil)
|
||||
require.NoError(tc.t, err)
|
||||
|
||||
return testcluster.VaultNodeSealConfig{
|
||||
Type: "transit",
|
||||
Config: map[string]string{
|
||||
// For another docker container to talk to this cluster they
|
||||
// must use the real api address, not the remapped localhost
|
||||
// address test code uses.
|
||||
"address": tc.cluster.Nodes()[0].(*docker.DockerClusterNode).RealAPIAddr,
|
||||
"token": tc.cluster.GetRootToken(),
|
||||
"mount_path": name,
|
||||
"key_name": keyName,
|
||||
"name": strings.ReplaceAll(name, " ", "_") + "-" + keyName,
|
||||
"priority": "1",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
252
helper/testhelpers/testimages/hsm.go
Normal file
252
helper/testhelpers/testimages/hsm.go
Normal file
@ -0,0 +1,252 @@
|
||||
// Copyright IBM Corp. 2016, 2025
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package testimages
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/helper/constants"
|
||||
dockhelper "github.com/hashicorp/vault/sdk/helper/docker"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// GetImageRepoAndTag returns an image repo and tag that can be used to start a vault
|
||||
// node via docker. Env vars are used as inputs: either VAULT_BINARY and VAULT_IMAGE
|
||||
// if hsm is false, or VAULT_HSM_BINARY and VAULT_HSM_IMAGE if hsm is true.
|
||||
//
|
||||
// If a matching image var is set, we split that on ":" and return the two pieces
|
||||
// as the repo and tag. If instead a matching binary var is set, we create an image
|
||||
// using a vault-enterprise docker image as a starting point, then add softhsm
|
||||
// (when hsm is true) and the specified binary to it. If neither the image or binary var
|
||||
// are set, we fail the test.
|
||||
//
|
||||
// For devs on their workstations, they can either create an image or a binary and
|
||||
// set the env vars appropriately. Creating an hsm linux binary is more challenging and
|
||||
// time-consuming than creating a regular binary, so we don't want to impose that
|
||||
// on people running tests that don't require one.
|
||||
//
|
||||
// See also tools/testimagemaker for a way to build an image for this purpose
|
||||
// from the CLI.
|
||||
func GetImageRepoAndTag(t *testing.T, hsm bool) (string, string) {
|
||||
t.Helper()
|
||||
repo, tag, output, err := CreateOrReturnDockerImage(hsm)
|
||||
if err != nil && output != nil {
|
||||
t.Logf("docker image create output: %s", output)
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
// t.Logf("used bin=%s (%q) and img=%s (%q) to create %s:%s", bin, os.Getenv(bin), img, os.Getenv(img), repo, tag)
|
||||
t.Cleanup(func() {
|
||||
// When image build fails, it doesn't always return an error, but typically the error
|
||||
// is visible in the output
|
||||
if t.Failed() && output != nil {
|
||||
t.Logf("docker image create output: %s", output)
|
||||
}
|
||||
})
|
||||
|
||||
return repo, tag
|
||||
}
|
||||
|
||||
// CreateOrReturnDockerImage looks at the vaultBinary and vaultImage params.
|
||||
// If vaultImage is populated, it is split by ":" and the two pieces are returned
|
||||
// as the repo and tag. If vault_binary is populated, an image is created based on
|
||||
// the latest hsm image.
|
||||
// (TODO: currently hardcoded as "docker.io/hashicorp/vault-enterprise:2.0.0-ent.hsm")
|
||||
// This is done by installing SoftHSM and the vaultBinary on top of that image.
|
||||
// If neither is populated an error is returned.
|
||||
func CreateOrReturnDockerImage(hsm bool) (repo string, tag string, output []byte, err error) {
|
||||
binVar, imgVar := "VAULT_BINARY", "VAULT_IMAGE"
|
||||
if hsm {
|
||||
binVar, imgVar = "VAULT_HSM_BINARY", "VAULT_HSM_IMAGE"
|
||||
}
|
||||
bin, img := os.Getenv(binVar), os.Getenv(imgVar)
|
||||
switch {
|
||||
case bin == "" && img == "":
|
||||
return "", "", nil, fmt.Errorf("no docker image or binary provided")
|
||||
case img != "":
|
||||
// Ignore the binary if an image is specified
|
||||
pieces := strings.Split(img, ":")
|
||||
if len(pieces) != 2 {
|
||||
return "", "", nil, fmt.Errorf("bad input image format %q", img)
|
||||
}
|
||||
return pieces[0], pieces[1], nil, nil
|
||||
default:
|
||||
base := "hashicorp/vault"
|
||||
if constants.IsEnterprise {
|
||||
base += "-enterprise"
|
||||
}
|
||||
repo := base + "-ci"
|
||||
tag := "latest"
|
||||
source := "docker.io/" + base + ":latest"
|
||||
if hsm {
|
||||
source = "docker.io/hashicorp/vault-enterprise:2.0.0-ent.hsm"
|
||||
tag = "latest-hsm"
|
||||
}
|
||||
target := fmt.Sprintf("%s:%s", repo, tag)
|
||||
var output []byte
|
||||
var err error
|
||||
if hsm {
|
||||
output, err = CreateHSMDockerImage(source, target, bin)
|
||||
} else {
|
||||
output, err = CreateNonHSMDockerImage(source, target, bin)
|
||||
}
|
||||
return repo, tag, output, err
|
||||
}
|
||||
}
|
||||
|
||||
func createBuildContextWithBinary(vaultBinary string) (dockhelper.BuildContext, error) {
|
||||
f, err := os.Open(vaultBinary)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening vault binary file: %w", err)
|
||||
}
|
||||
data, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading vault binary file: %w", err)
|
||||
}
|
||||
|
||||
bCtx := dockhelper.NewBuildContext()
|
||||
bCtx["vault"] = &dockhelper.FileContents{
|
||||
Data: data,
|
||||
Mode: 0o755,
|
||||
}
|
||||
|
||||
return bCtx, nil
|
||||
}
|
||||
|
||||
// createDockerImage creates an image named toImage from the given context and Dockerfile.
|
||||
func createDockerImage(toImage, containerFile string, bCtx dockhelper.BuildContext) ([]byte, error) {
|
||||
client, err := dockhelper.NewDockerAPI()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output, err := dockhelper.BuildImage(context.Background(), client, containerFile, bCtx,
|
||||
dockhelper.BuildRemove(true),
|
||||
dockhelper.BuildForceRemove(true),
|
||||
dockhelper.BuildPullParent(true),
|
||||
dockhelper.BuildTags([]string{toImage}))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building docker image: %w (output: %s)", err, output)
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func CreateNonHSMDockerImage(fromImage, toImage, vaultBinary string) ([]byte, error) {
|
||||
bCtx := dockhelper.NewBuildContext()
|
||||
var err error
|
||||
bCtx, err = createBuildContextWithBinary(vaultBinary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
containerFile := fmt.Sprintf(`
|
||||
FROM %s
|
||||
USER root
|
||||
|
||||
COPY vault /bin/vault
|
||||
|
||||
USER vault
|
||||
CMD ["server", "-dev"]
|
||||
`, fromImage)
|
||||
return createDockerImage(toImage, containerFile, bCtx)
|
||||
}
|
||||
|
||||
// CreateHSMDockerImage creates a new vault-enterprise hsm docker image from an existing
|
||||
// hsm image. The new image includes softhsm, and optionally a new vault binary.
|
||||
func CreateHSMDockerImage(fromImage, toImage, vaultBinary string) ([]byte, error) {
|
||||
bCtx := dockhelper.NewBuildContext()
|
||||
if vaultBinary != "" {
|
||||
var err error
|
||||
bCtx, err = createBuildContextWithBinary(vaultBinary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
bCtx["setup-softhsm.sh"] = &dockhelper.FileContents{
|
||||
Data: []byte(`#!/bin/bash
|
||||
|
||||
mkdir -p /vault/file/softhsm/tokens
|
||||
|
||||
# only create a new slot if there isn't an existing one
|
||||
if [ ! -e /vault/file/hsm-slot ]; then
|
||||
softhsm2-util --init-token --slot 0 --so-pin=12345 --pin=12345 --label "vault" | grep -oE '[0-9]+$' > /vault/file/hsm-slot
|
||||
fi
|
||||
|
||||
exec docker-entrypoint.sh "$@"
|
||||
`),
|
||||
Mode: 0o755,
|
||||
}
|
||||
|
||||
bCtx["centos-stream.repo"] = &dockhelper.FileContents{
|
||||
Data: []byte(`
|
||||
[centos-10-baseos]
|
||||
name=CentOS Stream 10 - BaseOS
|
||||
baseurl=https://mirror.stream.centos.org/10-stream/BaseOS/$basearch/os/
|
||||
gpgcheck=0
|
||||
enabled=1
|
||||
|
||||
[centos-10-appstream]
|
||||
name=CentOS Stream 10 - AppStream
|
||||
baseurl=https://mirror.stream.centos.org/10-stream/AppStream/$basearch/os/
|
||||
gpgcheck=0
|
||||
enabled=1
|
||||
`),
|
||||
Mode: 0o644,
|
||||
}
|
||||
|
||||
containerFile := fmt.Sprintf(`FROM %s AS builder
|
||||
USER root
|
||||
|
||||
COPY centos-stream.repo /etc/yum.repos.d
|
||||
|
||||
RUN microdnf install -y tar gzip wget make gcc gcc-c++ openssl-devel sudo microdnf automake autoconf libtool pkg-config
|
||||
|
||||
RUN pwd
|
||||
|
||||
RUN wget https://github.com/softhsm/SoftHSMv2/archive/refs/tags/2.7.0.tar.gz
|
||||
RUN echo "be14a5820ec457eac5154462ffae51ba5d8a643f6760514d4b4b83a77be91573 2.7.0.tar.gz" | sha256sum -c
|
||||
RUN tar -xzf 2.7.0.tar.gz
|
||||
|
||||
# disable GOST cryptography as it requires extra plugins
|
||||
RUN cd SoftHSMv2-2.7.0 && sh autogen.sh && ./configure --disable-gost && make
|
||||
|
||||
FROM %s
|
||||
|
||||
USER root
|
||||
|
||||
COPY --from=builder /SoftHSMv2-2.7.0/src/lib/.libs/libsofthsm2.so /usr/lib64/libsofthsm2.so
|
||||
COPY --from=builder /SoftHSMv2-2.7.0/src/bin/util/softhsm2-util /usr/bin/softhsm2-util
|
||||
COPY --from=builder /SoftHSMv2-2.7.0/src/lib/common/softhsm2.conf /etc/softhsm2.conf
|
||||
RUN mkdir /usr/local/lib/softhsm && ln /usr/lib64/libsofthsm2.so /usr/local/lib/softhsm/libsofthsm2.so
|
||||
|
||||
# Put the tokens under /vault/file since that's the data volume, and if we want
|
||||
# to start a cluster using a pre-existing volume (i.e. resuming from a previous
|
||||
# cluster) we need the tokens in order to unseal/unsealwrap.
|
||||
RUN sed -i 's|directories.tokendir = .*|directories.tokendir = /vault/file/softhsm/tokens|g' /etc/softhsm2.conf
|
||||
|
||||
RUN sed -i 's/log.level = ERROR/log.level = DEBUG/' /etc/softhsm2.conf
|
||||
|
||||
COPY setup-softhsm.sh /usr/local/bin/setup-softhsm.sh
|
||||
|
||||
COPY vault /bin/vault
|
||||
|
||||
USER vault
|
||||
CMD ["server", "-dev"]
|
||||
ENTRYPOINT ["setup-softhsm.sh"]
|
||||
`, fromImage, fromImage)
|
||||
return createDockerImage(toImage, containerFile, bCtx)
|
||||
}
|
||||
|
||||
const (
|
||||
PKCS11Library = "/usr/lib64/libsofthsm2.so"
|
||||
PKCS11Pin = "12345"
|
||||
PKCS11TokenLabel = "vault"
|
||||
)
|
||||
@ -251,9 +251,15 @@ func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr
|
||||
}
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
cleanup := func(ctx context.Context) {
|
||||
for i := 0; i < 10; i++ {
|
||||
_, err := d.DockerAPI.ContainerRemove(ctx, result.Container.ID, client.ContainerRemoveOptions{Force: true})
|
||||
// It seems podman does a STOP, then waits 10s, and then resorts to KILL.
|
||||
// We don't have that patience, so we'll just start with a KILL.
|
||||
_, _ = d.DockerAPI.ContainerKill(ctx, result.Container.ID, client.ContainerKillOptions{})
|
||||
_, err := d.DockerAPI.ContainerRemove(ctx, result.Container.ID, client.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: true,
|
||||
})
|
||||
if err == nil || errdefs.IsNotFound(err) {
|
||||
return
|
||||
}
|
||||
@ -290,14 +296,14 @@ func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr
|
||||
}, bo)
|
||||
if err != nil {
|
||||
if !d.RunOptions.DoNotAutoRemove {
|
||||
cleanup()
|
||||
cleanup(ctx)
|
||||
}
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return &Service{
|
||||
Config: config,
|
||||
Cleanup: cleanup,
|
||||
Cleanup: func() { cleanup(context.TODO()) },
|
||||
Container: result.Container,
|
||||
StartResult: result,
|
||||
}, result.Container.ID, nil
|
||||
@ -781,7 +787,6 @@ func (bCtx *BuildContext) ToTarball() (io.Reader, error) {
|
||||
var err error
|
||||
buffer := new(bytes.Buffer)
|
||||
tarBuilder := tar.NewWriter(buffer)
|
||||
defer tarBuilder.Close()
|
||||
|
||||
now := time.Now()
|
||||
for filepath, contents := range *bCtx {
|
||||
@ -820,6 +825,9 @@ func (bCtx *BuildContext) ToTarball() (io.Reader, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := tarBuilder.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes.NewReader(buffer.Bytes()), nil
|
||||
}
|
||||
|
||||
@ -900,6 +908,7 @@ func BuildImage(ctx context.Context, api *client.Client, containerfile string, c
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build image: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
output, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
|
||||
@ -118,6 +118,9 @@ func (dc *DockerCluster) GetRecoveryKeys() [][]byte {
|
||||
}
|
||||
|
||||
func (dc *DockerCluster) GetBarrierOrRecoveryKeys() [][]byte {
|
||||
if r := dc.GetRecoveryKeys(); len(r) > 0 {
|
||||
return r
|
||||
}
|
||||
return dc.GetBarrierKeys()
|
||||
}
|
||||
|
||||
@ -173,16 +176,23 @@ func (n *DockerClusterNode) Name() string {
|
||||
return n.Cluster.ClusterName + "-" + n.NodeID
|
||||
}
|
||||
|
||||
func (dc *DockerCluster) setupNode0(ctx context.Context) error {
|
||||
func (dc *DockerCluster) setupNode0(ctx context.Context, hasSealConfig bool) error {
|
||||
client := dc.ClusterNodes[0].client
|
||||
|
||||
var resp *api.InitResponse
|
||||
var err error
|
||||
req := &api.InitRequest{
|
||||
SecretShares: 3,
|
||||
SecretThreshold: 3,
|
||||
}
|
||||
if hasSealConfig {
|
||||
req = &api.InitRequest{
|
||||
RecoveryShares: 3,
|
||||
RecoveryThreshold: 3,
|
||||
}
|
||||
}
|
||||
for ctx.Err() == nil {
|
||||
resp, err = client.Sys().Init(&api.InitRequest{
|
||||
SecretShares: 3,
|
||||
SecretThreshold: 3,
|
||||
})
|
||||
resp, err = client.Sys().Init(req)
|
||||
if err == nil && resp != nil {
|
||||
break
|
||||
}
|
||||
@ -215,7 +225,9 @@ func (dc *DockerCluster) setupNode0(ctx context.Context) error {
|
||||
client.SetToken(dc.rootToken)
|
||||
dc.ClusterNodes[0].client = client
|
||||
|
||||
err = testcluster.UnsealNode(ctx, dc, 0)
|
||||
if !hasSealConfig {
|
||||
err = testcluster.UnsealNode(ctx, dc, 0)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -424,7 +436,7 @@ func NewTestDockerClusterWithErr(t *testing.T, opts *DockerClusterOptions) (*Doc
|
||||
opts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-")
|
||||
}
|
||||
if opts.Logger == nil {
|
||||
opts.Logger = logging.NewVaultLogger(log.Trace).Named(t.Name())
|
||||
opts.Logger = logging.NewVaultLogger(log.Trace).Named(opts.ClusterName)
|
||||
}
|
||||
if opts.NetworkName == "" {
|
||||
opts.NetworkName = os.Getenv("TEST_DOCKER_NETWORK_NAME")
|
||||
@ -467,6 +479,9 @@ func NewDockerCluster(ctx context.Context, opts *DockerClusterOptions) (*DockerC
|
||||
storage: opts.Storage,
|
||||
disableMlock: opts.DisableMlock,
|
||||
disableTLS: opts.DisableTLS,
|
||||
barrierKeys: opts.BarrierKeys,
|
||||
recoveryKeys: opts.RecoveryKeys,
|
||||
rootToken: opts.RootToken,
|
||||
}
|
||||
|
||||
if err := dc.setupDockerCluster(ctx, opts); err != nil {
|
||||
@ -622,7 +637,9 @@ func (n *DockerClusterNode) Cleanup() {
|
||||
|
||||
// Stop kills the container of the node
|
||||
func (n *DockerClusterNode) Stop() {
|
||||
n.Logger.Trace("stopping node")
|
||||
n.cleanupContainer()
|
||||
n.Logger.Trace("node stopped")
|
||||
}
|
||||
|
||||
func (n *DockerClusterNode) cleanup() error {
|
||||
@ -655,17 +672,7 @@ func (n *DockerClusterNode) createTLSDisabledListenerConfig() map[string]interfa
|
||||
}}
|
||||
}
|
||||
|
||||
func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOptions) error {
|
||||
if n.DataVolumeName == "" {
|
||||
vol, err := n.DockerAPI.VolumeCreate(ctx, docker.VolumeCreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.DataVolumeName = vol.Volume.Name
|
||||
n.cleanupVolume = func() {
|
||||
_, _ = n.DockerAPI.VolumeRemove(ctx, vol.Volume.Name, docker.VolumeRemoveOptions{})
|
||||
}
|
||||
}
|
||||
func (n *DockerClusterNode) writeConfig(opts *DockerClusterOptions) ([]string, error) {
|
||||
vaultCfg := map[string]interface{}{}
|
||||
var listenerConfig []map[string]interface{}
|
||||
|
||||
@ -698,7 +705,7 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
|
||||
for _, suite := range config.TLSCipherSuites {
|
||||
name, err := tlsutil.GetCipherName(suite)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad TLSCipherSuite %d on listener %d: %w", suite, i, err)
|
||||
return nil, fmt.Errorf("bad TLSCipherSuite %d on listener %d: %w", suite, i, err)
|
||||
}
|
||||
suites = append(suites, name)
|
||||
}
|
||||
@ -707,7 +714,7 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
|
||||
listenerConfig = append(listenerConfig, cfg)
|
||||
portStr := fmt.Sprintf("%d/tcp", config.Port)
|
||||
if strutil.StrListContains(ports, portStr) {
|
||||
return fmt.Errorf("duplicate port %d specified", config.Port)
|
||||
return nil, fmt.Errorf("duplicate port %d specified", config.Port)
|
||||
}
|
||||
ports = append(ports, portStr)
|
||||
}
|
||||
@ -735,12 +742,45 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
|
||||
storageOpts = opts.Storage.Opts()
|
||||
}
|
||||
|
||||
if opts != nil && opts.VaultNodeConfig != nil {
|
||||
if opts.VaultNodeConfig != nil {
|
||||
for k, v := range opts.VaultNodeConfig.StorageOptions {
|
||||
if _, ok := storageOpts[k].(string); !ok {
|
||||
storageOpts[k] = v
|
||||
}
|
||||
}
|
||||
if len(opts.VaultNodeConfig.Seal) > 0 {
|
||||
var seals []map[string]any
|
||||
for _, seal := range opts.VaultNodeConfig.Seal {
|
||||
seals = append(seals, map[string]any{
|
||||
seal.Type: seal.Config,
|
||||
})
|
||||
}
|
||||
vaultCfg["seal"] = seals
|
||||
}
|
||||
if len(opts.VaultNodeConfig.KMSLibrary) > 0 {
|
||||
libs := []map[string][]map[string]any{}
|
||||
for _, kmsl := range opts.VaultNodeConfig.KMSLibrary {
|
||||
libs = append(libs, map[string][]map[string]any{
|
||||
kmsl.Type: {
|
||||
{
|
||||
"name": kmsl.Name,
|
||||
"library": kmsl.Library,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
vaultCfg["kms_library"] = libs
|
||||
}
|
||||
if opts.VaultNodeConfig.Entropy != nil {
|
||||
vaultCfg["entropy"] = []map[string]map[string]any{
|
||||
{
|
||||
"seal": map[string]any{
|
||||
"seal_name": opts.VaultNodeConfig.Entropy.SealName,
|
||||
"mode": "augmentation",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
vaultCfg["storage"] = map[string]interface{}{
|
||||
storageType: storageOpts,
|
||||
@ -759,43 +799,66 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
|
||||
|
||||
vaultCfg["administrative_namespace_path"] = opts.AdministrativeNamespacePath
|
||||
|
||||
systemJSON, err := json.Marshal(vaultCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(n.WorkDir, "system.json"), systemJSON, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if opts.VaultNodeConfig != nil {
|
||||
localCfg := *opts.VaultNodeConfig
|
||||
if opts.VaultNodeConfig.LicensePath != "" {
|
||||
b, err := os.ReadFile(opts.VaultNodeConfig.LicensePath)
|
||||
if err != nil || len(b) == 0 {
|
||||
return fmt.Errorf("unable to read LicensePath at %q: %w", opts.VaultNodeConfig.LicensePath, err)
|
||||
return nil, fmt.Errorf("unable to read LicensePath at %q: %w", opts.VaultNodeConfig.LicensePath, err)
|
||||
}
|
||||
localCfg.LicensePath = "/vault/config/license"
|
||||
dest := filepath.Join(n.WorkDir, "license")
|
||||
err = os.WriteFile(dest, b, 0o644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing license to %q: %w", dest, err)
|
||||
return nil, fmt.Errorf("error writing license to %q: %w", dest, err)
|
||||
}
|
||||
}
|
||||
localJSON, err := json.Marshal(localCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var conf map[string]interface{}
|
||||
if err := json.Unmarshal(localJSON, &conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for k, v := range conf {
|
||||
vaultCfg[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
userJSON, err := json.Marshal(localCfg)
|
||||
configJSON, err := json.Marshal(vaultCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(n.WorkDir, "config.json"), configJSON, 0o644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n.Logger.Trace("node config", "config.json", string(configJSON))
|
||||
return ports, nil
|
||||
}
|
||||
|
||||
func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOptions) error {
|
||||
if n.DataVolumeName == "" {
|
||||
vol, err := n.DockerAPI.VolumeCreate(ctx, docker.VolumeCreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(n.WorkDir, "user.json"), userJSON, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
n.DataVolumeName = vol.Volume.Name
|
||||
n.Logger.Trace("created volume", "name", n.DataVolumeName)
|
||||
n.cleanupVolume = func() {
|
||||
n.Logger.Trace("cleanup volume", "name", n.DataVolumeName)
|
||||
_, _ = n.DockerAPI.VolumeRemove(ctx, vol.Volume.Name, docker.VolumeRemoveOptions{})
|
||||
}
|
||||
}
|
||||
ports, err := n.writeConfig(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !opts.DisableTLS {
|
||||
// Create a temporary cert so vault will start up
|
||||
err = n.setupCert("127.0.0.1")
|
||||
err := n.setupCert("127.0.0.1")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -851,10 +914,7 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
|
||||
|
||||
if opts.DisableTLS {
|
||||
postStartFunc = func(containerID string, realIP string) error {
|
||||
// If we signal Vault before it installs its sighup handler, it'll die.
|
||||
wg.Wait()
|
||||
n.Logger.Trace("running poststart", "containerID", containerID, "IP", realIP)
|
||||
return n.runner.RefreshFiles(ctx, containerID)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -904,6 +964,11 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
protocol := "https"
|
||||
if opts.DisableTLS {
|
||||
protocol = "http"
|
||||
}
|
||||
svc, _, err := r.StartNewService(ctx, false, false, func(ctx context.Context, host string, port int) (dockhelper.ServiceConfig, error) {
|
||||
config, err := n.apiConfig()
|
||||
if err != nil {
|
||||
@ -1033,21 +1098,14 @@ func (n *DockerClusterNode) Signal(ctx context.Context, signal string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (n *DockerClusterNode) UpdateConfig(ctx context.Context, config *testcluster.VaultNodeConfig) error {
|
||||
// Marshal the config to JSON
|
||||
configJSON, err := json.Marshal(config)
|
||||
func (n *DockerClusterNode) UpdateConfig(ctx context.Context, opts *DockerClusterOptions) error {
|
||||
_, err := n.writeConfig(opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal config: %w", err)
|
||||
}
|
||||
|
||||
// Write the config to the work directory
|
||||
configPath := filepath.Join(n.WorkDir, "user.json")
|
||||
if err := os.WriteFile(configPath, configJSON, 0o644); err != nil {
|
||||
return fmt.Errorf("failed to write config file: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the updated config to the container
|
||||
if err := dockhelper.CopyToContainer(ctx, n.DockerAPI, n.Container.ID, configPath, "/vault/config/user.json"); err != nil {
|
||||
if err := dockhelper.CopyToContainer(ctx, n.DockerAPI, n.Container.ID, n.WorkDir, "/vault/config"); err != nil {
|
||||
return fmt.Errorf("failed to copy config to container: %w", err)
|
||||
}
|
||||
|
||||
@ -1265,17 +1323,34 @@ func (dc *DockerCluster) setupDockerCluster(ctx context.Context, opts *DockerClu
|
||||
if opts.SkipInit {
|
||||
continue
|
||||
}
|
||||
hasSealConfig := opts.VaultNodeConfig != nil && len(opts.VaultNodeConfig.Seal) > 0
|
||||
if i == 0 {
|
||||
if err := dc.setupNode0(ctx); err != nil {
|
||||
if err := dc.setupNode0(ctx, hasSealConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := dc.joinNode(ctx, i, 0); err != nil {
|
||||
if err := dc.joinNode(ctx, i, 0, hasSealConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if opts.SkipInit && !opts.SkipUnsealWaitActiveNode {
|
||||
if len(opts.VaultNodeConfig.Seal) == 0 {
|
||||
if err := testcluster.UnsealAllNodes(ctx, dc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := testcluster.WaitForActiveNode(ctx, dc); err != nil {
|
||||
return err
|
||||
}
|
||||
status, err := dc.ClusterNodes[0].APIClient().Sys().SealStatusWithContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dc.ID = status.ClusterID
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1288,7 +1363,7 @@ func (dc *DockerCluster) AddNode(ctx context.Context, opts *DockerClusterOptions
|
||||
return err
|
||||
}
|
||||
|
||||
return dc.joinNode(ctx, len(dc.ClusterNodes)-1, leaderIdx)
|
||||
return dc.joinNode(ctx, len(dc.ClusterNodes)-1, leaderIdx, len(opts.VaultNodeConfig.Seal) > 0)
|
||||
}
|
||||
|
||||
const MaxContainerNameLen = 63
|
||||
@ -1378,7 +1453,7 @@ func copyDirContents(to string, from string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dc *DockerCluster) joinNode(ctx context.Context, nodeIdx int, leaderIdx int) error {
|
||||
func (dc *DockerCluster) joinNode(ctx context.Context, nodeIdx int, leaderIdx int, autoseal bool) error {
|
||||
if dc.storage != nil && dc.storage.Type() != "raft" {
|
||||
// Storage is not raft so nothing to do but unseal.
|
||||
return testcluster.UnsealNode(ctx, dc, nodeIdx)
|
||||
@ -1409,6 +1484,9 @@ func (dc *DockerCluster) joinNode(ctx context.Context, nodeIdx int, leaderIdx in
|
||||
return fmt.Errorf("failed to join cluster: %w", err)
|
||||
}
|
||||
|
||||
if autoseal {
|
||||
return nil
|
||||
}
|
||||
return testcluster.UnsealNode(ctx, dc, nodeIdx)
|
||||
}
|
||||
|
||||
|
||||
@ -20,7 +20,6 @@ func DefaultOptions(t *testing.T) *DockerClusterOptions {
|
||||
ImageRepo: "hashicorp/vault",
|
||||
ImageTag: "latest",
|
||||
VaultBinary: os.Getenv("VAULT_BINARY"),
|
||||
Envs: []string{"SKIP_SETCAP=true"},
|
||||
ClusterOptions: testcluster.ClusterOptions{
|
||||
NumCores: 3,
|
||||
ClusterName: strings.ReplaceAll(t.Name(), "/", "-"),
|
||||
|
||||
@ -45,11 +45,7 @@ type VaultNodeConfig struct {
|
||||
// DisableMlock bool `hcl:"disable_mlock"`
|
||||
|
||||
// Not configurable yet:
|
||||
// Listeners []*Listener `hcl:"-"`
|
||||
// Seals []*KMS `hcl:"-"`
|
||||
// Entropy *Entropy `hcl:"-"`
|
||||
// Telemetry *Telemetry `hcl:"telemetry"`
|
||||
// HCPLinkConf *HCPLinkConfig `hcl:"cloud"`
|
||||
// PidFile string `hcl:"pid_file"`
|
||||
// ServiceRegistrationType string
|
||||
// ServiceRegistrationOptions map[string]string
|
||||
@ -58,6 +54,9 @@ type VaultNodeConfig struct {
|
||||
AdditionalListeners []VaultNodeListenerConfig `json:"-"`
|
||||
CustomListenerConfigOpts map[string]interface{} `json:"-"`
|
||||
AdditionalTCPPorts []int `json:"-"`
|
||||
Seal []VaultNodeSealConfig `json:"-"`
|
||||
KMSLibrary []VaultNodeKMSLibrary `json:"-"`
|
||||
Entropy *VaultNodeEntropy `json:"-"`
|
||||
|
||||
DefaultMaxRequestDuration time.Duration `json:"default_max_request_duration"`
|
||||
LogFormat string `json:"log_format"`
|
||||
@ -84,6 +83,7 @@ type VaultNodeConfig struct {
|
||||
LicensePath string `json:"license_path"`
|
||||
FeatureFlags []string `json:"feature_flags,omitempty"`
|
||||
EnableUnauthenticatedAccess []string `json:"enable_unauthenticated_access,omitempty"`
|
||||
EnableMultiSeal bool `json:"enable_multiseal"`
|
||||
}
|
||||
|
||||
type ClusterNode struct {
|
||||
@ -100,6 +100,10 @@ type ClusterOptions struct {
|
||||
ClusterName string
|
||||
KeepStandbysSealed bool
|
||||
SkipInit bool
|
||||
SkipUnsealWaitActiveNode bool
|
||||
BarrierKeys [][]byte
|
||||
RecoveryKeys [][]byte
|
||||
RootToken string
|
||||
CACert []byte
|
||||
NumCores int
|
||||
TmpDir string
|
||||
@ -118,6 +122,21 @@ type VaultNodeListenerConfig struct {
|
||||
TLSCipherSuites []uint16
|
||||
}
|
||||
|
||||
type VaultNodeSealConfig struct {
|
||||
Type string
|
||||
Config map[string]string
|
||||
}
|
||||
|
||||
type VaultNodeKMSLibrary struct {
|
||||
Type string
|
||||
Name string
|
||||
Library string
|
||||
}
|
||||
|
||||
type VaultNodeEntropy struct {
|
||||
SealName string
|
||||
}
|
||||
|
||||
type CA struct {
|
||||
CACert *x509.Certificate
|
||||
CACertBytes []byte
|
||||
|
||||
@ -89,6 +89,27 @@ func UnsealNode(ctx context.Context, cluster VaultCluster, nodeIdx int) error {
|
||||
return NodeHealthy(ctx, cluster, nodeIdx)
|
||||
}
|
||||
|
||||
func UnsealNodeWithOptions(ctx context.Context, cluster VaultCluster, nodeIdx int, reset, migrate bool) error {
|
||||
if nodeIdx >= len(cluster.Nodes()) {
|
||||
return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx)
|
||||
}
|
||||
node := cluster.Nodes()[nodeIdx]
|
||||
client := node.APIClient()
|
||||
|
||||
for _, key := range cluster.GetBarrierOrRecoveryKeys() {
|
||||
_, err := client.Sys().UnsealWithOptionsWithContext(ctx, &api.UnsealOpts{
|
||||
Key: hex.EncodeToString(key),
|
||||
Reset: reset,
|
||||
Migrate: migrate,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return NodeHealthy(ctx, cluster, nodeIdx)
|
||||
}
|
||||
|
||||
func UnsealAllNodes(ctx context.Context, cluster VaultCluster) error {
|
||||
for i := range cluster.Nodes() {
|
||||
if err := UnsealNode(ctx, cluster, i); err != nil {
|
||||
|
||||
@ -15,5 +15,6 @@ rules:
|
||||
- "*_test.go"
|
||||
- "cmd/*.go"
|
||||
- "cmd/**/*.go"
|
||||
- "tools/*/main.go"
|
||||
- sdk/database/dbplugin/server.go # effectively a cmd
|
||||
- sdk/database/dbplugin/v5/plugin_server.go # effectively a cmd
|
||||
|
||||
42
tools/testimagemaker/main.go
Normal file
42
tools/testimagemaker/main.go
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright IBM Corp. 2016, 2025
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/vault/helper/testhelpers/testimages"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var source, target, binary string
|
||||
var hsm bool
|
||||
|
||||
flag.StringVar(&source, "source", "", "Source image name")
|
||||
flag.StringVar(&target, "target", "", "Target image name")
|
||||
flag.StringVar(&binary, "binary", "", "Binary path")
|
||||
flag.BoolVar(&hsm, "hsm", false, "HSM style image")
|
||||
flag.Parse()
|
||||
|
||||
if source == "" || target == "" || binary == "" {
|
||||
fmt.Fprintf(os.Stderr, "Error: all of the flags -source, -target, and -binary are required\n\n")
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var output []byte
|
||||
var err error
|
||||
if hsm {
|
||||
output, err = testimages.CreateHSMDockerImage(source, target, binary)
|
||||
} else {
|
||||
output, err = testimages.CreateNonHSMDockerImage(source, target, binary)
|
||||
}
|
||||
fmt.Println(string(output))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@ -1337,7 +1337,7 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
||||
|
||||
// For recovery mode we've now configured enough to return early.
|
||||
if c.recoveryMode {
|
||||
checkResult, err := c.checkForSealMigration(context.Background(), conf.UnwrapSeal)
|
||||
checkResult, _, err := c.checkForSealMigration(context.Background(), conf.UnwrapSeal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error checking if a seal migration is needed: %w", err)
|
||||
}
|
||||
@ -1840,7 +1840,7 @@ func (c *Core) unsealFragment(key []byte, migrate bool) error {
|
||||
return fmt.Errorf("can't perform a seal migration while joining a raft cluster")
|
||||
}
|
||||
if !migrate && c.migrationInfo != nil {
|
||||
done, err := c.sealMigrated(ctx)
|
||||
done, _, err := c.sealMigrated(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking to see if seal is migrated: %w", err)
|
||||
}
|
||||
@ -1876,6 +1876,10 @@ func (c *Core) unsealFragment(key []byte, migrate bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.ValidateMultiSealConfig(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sealToUse := c.seal
|
||||
if migrate {
|
||||
c.logger.Info("unsealing using migration seal")
|
||||
@ -2072,26 +2076,28 @@ func (c *Core) getUnsealKey(ctx context.Context, seal Seal) ([]byte, error) {
|
||||
// For the auto->auto same seal migration scenario, it will return false even
|
||||
// if the preceding conditions are true but we cannot decrypt the master key
|
||||
// in storage using the configured seal.
|
||||
func (c *Core) sealMigrated(ctx context.Context) (bool, error) {
|
||||
// When no error is returned, returns a string that gives more information about
|
||||
// why the bool return value is set as it is.
|
||||
func (c *Core) sealMigrated(ctx context.Context) (bool, string, error) {
|
||||
sealMigDone := c.sealMigrationDone.Load()
|
||||
if sealMigDone != nil && !sealMigDone.IsZero() {
|
||||
return true, nil
|
||||
return true, "sealMigrationDone nonzero", nil
|
||||
}
|
||||
|
||||
existBarrierSealConfig, existRecoverySealConfig, err := c.PhysicalSealConfigs(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
if !c.seal.BarrierSealConfigType().IsSameAs(existBarrierSealConfig.Type) {
|
||||
return false, nil
|
||||
return false, "barrier seal config type in seal matches what's in storage", nil
|
||||
}
|
||||
if c.seal.RecoveryKeySupported() && !SealConfigTypeRecovery.IsSameAs(existRecoverySealConfig.Type) {
|
||||
return false, nil
|
||||
return false, "recovery seal config type in seal matches what's in storage", nil
|
||||
}
|
||||
|
||||
if c.seal.BarrierSealConfigType() != c.migrationInfo.seal.BarrierSealConfigType() {
|
||||
return true, nil
|
||||
return true, "barrier seal config type in seal doesn't match what's in storage", nil
|
||||
}
|
||||
|
||||
// The above checks can handle the auto->shamir and shamir->auto
|
||||
@ -2103,13 +2109,13 @@ func (c *Core) sealMigrated(ctx context.Context) (bool, error) {
|
||||
|
||||
switch {
|
||||
case len(keys) > 0 && err == nil:
|
||||
return true, nil
|
||||
return true, "seal has stored keys", nil
|
||||
case len(keysMig) > 0 && errMig == nil:
|
||||
return false, nil
|
||||
return false, "migration seal has stored keys", nil
|
||||
case errors.Is(err, &ErrDecrypt{}) && errors.Is(errMig, &ErrDecrypt{}):
|
||||
return false, fmt.Errorf("decrypt error, neither the old nor new seal can read stored keys: old seal err=%v, new seal err=%v", errMig, err)
|
||||
return false, "", fmt.Errorf("decrypt error, neither the old nor new seal can read stored keys: old seal err=%v, new seal err=%v", errMig, err)
|
||||
default:
|
||||
return false, fmt.Errorf("neither the old nor new seal can read stored keys: old seal err=%v, new seal err=%v", errMig, err)
|
||||
return false, "", fmt.Errorf("neither the old nor new seal can read stored keys: old seal err=%v, new seal err=%v", errMig, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -2121,17 +2127,17 @@ func (c *Core) migrateSeal(ctx context.Context) error {
|
||||
return c.migrateMultiSealConfig(ctx)
|
||||
}
|
||||
|
||||
ok, err := c.sealMigrated(ctx)
|
||||
ok, info, err := c.sealMigrated(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking if seal is migrated or not: %w", err)
|
||||
}
|
||||
|
||||
if ok {
|
||||
c.logger.Info("migration is already performed")
|
||||
c.logger.Info("migration is already performed", "info", info)
|
||||
return nil
|
||||
}
|
||||
|
||||
c.logger.Info("seal migration initiated")
|
||||
c.logger.Info("seal migration initiated", "info", info)
|
||||
|
||||
switch {
|
||||
case c.migrationInfo.seal.RecoveryKeySupported() && c.seal.RecoveryKeySupported():
|
||||
@ -3345,16 +3351,16 @@ const (
|
||||
sealMigrationCheckDoNotAjust
|
||||
)
|
||||
|
||||
func (c *Core) checkForSealMigration(ctx context.Context, unwrapSeal Seal) (sealMigrationCheckResult, error) {
|
||||
func (c *Core) checkForSealMigration(ctx context.Context, unwrapSeal Seal) (sealMigrationCheckResult, string, error) {
|
||||
existBarrierSealConfig, _, err := c.PhysicalSealConfigs(ctx)
|
||||
if err != nil {
|
||||
return sealMigrationCheckError, fmt.Errorf("Error checking for existing seal: %s", err)
|
||||
return sealMigrationCheckError, "", fmt.Errorf("Error checking for existing seal: %s", err)
|
||||
}
|
||||
|
||||
// If we don't have an existing config or if it's the deprecated auto seal
|
||||
// which needs an upgrade, skip out
|
||||
if existBarrierSealConfig == nil || existBarrierSealConfig.Type == WrapperTypeHsmAutoDeprecated.String() {
|
||||
return sealMigrationCheckSkip, nil
|
||||
return sealMigrationCheckSkip, "no seal config or deprecated", nil
|
||||
}
|
||||
|
||||
if unwrapSeal == nil {
|
||||
@ -3368,28 +3374,28 @@ func (c *Core) checkForSealMigration(ctx context.Context, unwrapSeal Seal) (seal
|
||||
case storedType == configuredType:
|
||||
// We have the same barrier type and the unwrap seal is nil so we're not
|
||||
// migrating from same to same, IOW we assume it's not a migration.
|
||||
return sealMigrationCheckDoNotAjust, nil
|
||||
return sealMigrationCheckDoNotAjust, "same barrier and unwrap seal is nil", nil
|
||||
case configuredType == SealConfigTypeShamir:
|
||||
// The stored barrier config is not shamir, there is no disabled seal
|
||||
// in config, and either no configured seal (which equates to Shamir)
|
||||
// or an explicitly configured Shamir seal.
|
||||
return sealMigrationCheckError, fmt.Errorf("cannot seal migrate from %q to Shamir, no disabled seal in configuration",
|
||||
return sealMigrationCheckError, "", fmt.Errorf("cannot seal migrate from %q to Shamir, no disabled seal in configuration",
|
||||
existBarrierSealConfig.Type)
|
||||
case storedType == SealConfigTypeShamir:
|
||||
// The configured seal is not Shamir, the stored seal config is Shamir.
|
||||
// This is a migration away from Shamir.
|
||||
|
||||
return sealMigrationCheckAdjust, nil
|
||||
return sealMigrationCheckAdjust, "configured seal is not shamir and stored seal config is", nil
|
||||
case configuredType == SealConfigTypeMultiseal && c.IsMultisealEnabled():
|
||||
// We are going from a single non-shamir seal to multiseal, and multi seal is supported.
|
||||
// This scenario is not considered a migration in the sense of requiring an unwrapSeal,
|
||||
// but we will update the stored SealConfig later (see Core.migrateMultiSealConfig).
|
||||
|
||||
return sealMigrationCheckDoNotAjust, nil
|
||||
return sealMigrationCheckDoNotAjust, "single non-shamir to multiseal", nil
|
||||
case configuredType == SealConfigTypeMultiseal:
|
||||
// The configured seal is multiseal and we know the stored type is not shamir, thus
|
||||
// we are going from auto seal to multiseal.
|
||||
return sealMigrationCheckError, fmt.Errorf("cannot seal migrate from %q to %q, multiple seals are not supported",
|
||||
return sealMigrationCheckError, "", fmt.Errorf("cannot seal migrate from %q to %q, multiple seals are not supported",
|
||||
existBarrierSealConfig.Type, c.seal.BarrierSealConfigType())
|
||||
case storedType == SealConfigTypeMultiseal:
|
||||
// The stored type is multiseal and we know the type the configured type is not shamir,
|
||||
@ -3398,12 +3404,12 @@ func (c *Core) checkForSealMigration(ctx context.Context, unwrapSeal Seal) (seal
|
||||
// This scenario is not considered a migration in the sense of requiring an unwrapSeal,
|
||||
// but we will update the stored SealConfig later (see Core.migrateMultiSealConfig).
|
||||
|
||||
return sealMigrationCheckDoNotAjust, nil
|
||||
return sealMigrationCheckDoNotAjust, "multiseal to autoseal", nil
|
||||
default:
|
||||
// We know at this point that there is a configured non-Shamir seal,
|
||||
// that it does not match the stored non-Shamir seal config, and that
|
||||
// there is no explicitly disabled seal stanza.
|
||||
return sealMigrationCheckError, fmt.Errorf("cannot seal migrate from %q to %q, no disabled seal in configuration",
|
||||
return sealMigrationCheckError, "", fmt.Errorf("cannot seal migrate from %q to %q, no disabled seal in configuration",
|
||||
existBarrierSealConfig.Type, c.seal.BarrierSealConfigType())
|
||||
}
|
||||
} else {
|
||||
@ -3411,9 +3417,9 @@ func (c *Core) checkForSealMigration(ctx context.Context, unwrapSeal Seal) (seal
|
||||
// in the config and disabled.
|
||||
|
||||
if unwrapSeal.BarrierSealConfigType() == SealConfigTypeShamir {
|
||||
return sealMigrationCheckError, errors.New("Shamir seals cannot be set disabled (they should simply not be set)")
|
||||
return sealMigrationCheckError, "", errors.New("Shamir seals cannot be set disabled (they should simply not be set)")
|
||||
}
|
||||
return sealMigrationCheckDoNotAjust, nil
|
||||
return sealMigrationCheckDoNotAjust, "unchanged", nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -3439,7 +3445,7 @@ func (c *Core) checkForSealMigration(ctx context.Context, unwrapSeal Seal) (seal
|
||||
func (c *Core) adjustForSealMigration(unwrapSeal Seal) error {
|
||||
ctx := context.Background()
|
||||
|
||||
checkResult, err := c.checkForSealMigration(ctx, unwrapSeal)
|
||||
checkResult, _, err := c.checkForSealMigration(ctx, unwrapSeal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -3723,7 +3729,7 @@ func (c *Core) IsSealMigrated(lock bool) bool {
|
||||
c.stateLock.RLock()
|
||||
defer c.stateLock.RUnlock()
|
||||
}
|
||||
done, _ := c.sealMigrated(context.Background())
|
||||
done, _, _ := c.sealMigrated(context.Background())
|
||||
return done
|
||||
}
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ import (
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/consul"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/corehelpers"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/testimages"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -34,27 +34,19 @@ func TestConsulFencing_PartitionedLeaderCantWrite(t *testing.T) {
|
||||
|
||||
consulStorage := consul.NewClusterStorage()
|
||||
|
||||
// Create cluster logger that will write cluster logs to a file in CI.
|
||||
logger := corehelpers.NewTestLogger(t)
|
||||
logger.SetLevel(hclog.Trace)
|
||||
|
||||
clusterOpts := docker.DefaultOptions(t)
|
||||
// We can use an enterprise image here because we are swapping out the binary anyway.
|
||||
clusterOpts.ImageRepo = "hashicorp/vault-enterprise"
|
||||
clusterOpts.ClusterOptions.Logger = logger
|
||||
clusterOpts.VaultBinary = ""
|
||||
clusterOpts.ImageRepo, clusterOpts.ImageTag = testimages.GetImageRepoAndTag(t, false)
|
||||
|
||||
clusterOpts.Storage = consulStorage
|
||||
|
||||
logger.Info("==> starting cluster")
|
||||
c, err := docker.NewDockerCluster(ctx, clusterOpts)
|
||||
require.NoError(t, err)
|
||||
logger.Info(" ✅ done.", "root_token", c.GetRootToken(),
|
||||
"consul_token", consulStorage.Config().Token)
|
||||
|
||||
logger.Info("==> waiting for leader")
|
||||
leaderIdx, err := testcluster.WaitForActiveNode(ctx, c)
|
||||
require.NoError(t, err)
|
||||
|
||||
logger := c.Logger.Named("test")
|
||||
|
||||
leader := c.Nodes()[leaderIdx]
|
||||
leaderClient := leader.APIClient()
|
||||
|
||||
@ -64,7 +56,6 @@ func TestConsulFencing_PartitionedLeaderCantWrite(t *testing.T) {
|
||||
}
|
||||
|
||||
// Mount a KV v2 backend
|
||||
logger.Info("==> mounting KV")
|
||||
err = leaderClient.Sys().Mount("/test", &api.MountInput{
|
||||
Type: "kv-v2",
|
||||
})
|
||||
@ -300,7 +291,6 @@ func waitForKVv2Upgrade(t *testing.T, ctx context.Context, client *api.Client, p
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
t.Logf("waitForKVv2Upgrade: write failed: %s", err)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatalf("context cancelled waiting for KVv2 (%s) upgrade to complete: %s",
|
||||
|
||||
@ -5,12 +5,12 @@ package misc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/testimages"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
@ -26,26 +26,20 @@ func TestRecovery_Docker(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
t.Parallel()
|
||||
binary := os.Getenv("VAULT_BINARY")
|
||||
if binary == "" {
|
||||
t.Skip("only running docker test when $VAULT_BINARY present")
|
||||
}
|
||||
repo, tag := testimages.GetImageRepoAndTag(t, false)
|
||||
opts := &docker.DockerClusterOptions{
|
||||
ImageRepo: "hashicorp/vault",
|
||||
ImageRepo: repo,
|
||||
DisableMlock: true,
|
||||
// We're replacing the binary anyway, so we're not too particular about
|
||||
// the docker image version tag.
|
||||
ImageTag: "latest",
|
||||
VaultBinary: binary,
|
||||
ImageTag: tag,
|
||||
ClusterOptions: testcluster.ClusterOptions{
|
||||
NumCores: 1,
|
||||
VaultNodeConfig: &testcluster.VaultNodeConfig{
|
||||
LogLevel: "TRACE",
|
||||
// If you want the test to run faster locally, you could
|
||||
// uncomment this performance_multiplier change.
|
||||
//StorageOptions: map[string]string{
|
||||
// "performance_multiplier": "1",
|
||||
//},
|
||||
StorageOptions: map[string]string{
|
||||
"performance_multiplier": "1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -130,6 +124,7 @@ func TestRecovery_Docker(t *testing.T) {
|
||||
newOpts := *opts
|
||||
opts := &newOpts
|
||||
opts.Args = []string{"-recovery"}
|
||||
opts.SkipUnsealWaitActiveNode = true
|
||||
opts.StartProbe = func(client *api.Client) error {
|
||||
// In recovery mode almost no paths are supported, and pretty much
|
||||
// the only ones that don't require a recovery token are the ones used
|
||||
|
||||
@ -17,6 +17,8 @@ import (
|
||||
autopilot "github.com/hashicorp/raft-autopilot"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/helper/testhelpers"
|
||||
sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/testimages"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
|
||||
rafttest "github.com/hashicorp/vault/vault/external_tests/raft"
|
||||
@ -27,36 +29,42 @@ import (
|
||||
// uses docker containers for the vault nodes.
|
||||
func TestRaft_Configuration_Docker(t *testing.T) {
|
||||
t.Parallel()
|
||||
binary := os.Getenv("VAULT_BINARY")
|
||||
if binary == "" {
|
||||
t.Skip("only running docker test when $VAULT_BINARY present")
|
||||
}
|
||||
opts := &docker.DockerClusterOptions{
|
||||
ImageRepo: "hashicorp/vault",
|
||||
DisableMlock: true,
|
||||
// We're replacing the binary anyway, so we're not too particular about
|
||||
// the docker image version tag.
|
||||
ImageTag: "latest",
|
||||
VaultBinary: binary,
|
||||
ClusterOptions: testcluster.ClusterOptions{
|
||||
VaultNodeConfig: &testcluster.VaultNodeConfig{
|
||||
LogLevel: "TRACE",
|
||||
// If you want the test to run faster locally, you could
|
||||
// uncomment this performance_multiplier change.
|
||||
//StorageOptions: map[string]string{
|
||||
// "performance_multiplier": "1",
|
||||
//},
|
||||
},
|
||||
},
|
||||
}
|
||||
cluster := docker.NewTestDockerCluster(t, opts)
|
||||
defer cluster.Cleanup()
|
||||
rafttest.Raft_Configuration_Test(t, cluster)
|
||||
repo, tag := testimages.GetImageRepoAndTag(t, false)
|
||||
|
||||
if err := cluster.AddNode(context.TODO(), opts); err != nil {
|
||||
t.Fatal(err)
|
||||
transit := sealhelper.NewTransitDockerSealServer(t)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
seals []testcluster.VaultNodeSealConfig
|
||||
}{
|
||||
{"shamir", nil},
|
||||
{"autoseal", []testcluster.VaultNodeSealConfig{transit.Seal("test", 1)}},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
binary := os.Getenv("VAULT_BINARY")
|
||||
if binary == "" {
|
||||
t.Skip("only running docker test when $VAULT_BINARY present")
|
||||
}
|
||||
opts := &docker.DockerClusterOptions{
|
||||
DisableMlock: true,
|
||||
ImageRepo: repo,
|
||||
ImageTag: tag,
|
||||
ClusterOptions: testcluster.ClusterOptions{
|
||||
VaultNodeConfig: &testcluster.VaultNodeConfig{
|
||||
Seal: tc.seals,
|
||||
LogLevel: "TRACE",
|
||||
},
|
||||
},
|
||||
}
|
||||
cluster := docker.NewTestDockerCluster(t, opts)
|
||||
rafttest.Raft_Configuration_Test(t, cluster)
|
||||
|
||||
if err := cluster.AddNode(context.Background(), opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rafttest.Raft_Configuration_Test(t, cluster)
|
||||
})
|
||||
}
|
||||
rafttest.Raft_Configuration_Test(t, cluster)
|
||||
}
|
||||
|
||||
// removeRaftNode removes a node from the raft configuration using the leader client
|
||||
@ -134,17 +142,11 @@ func stabilize(t *testing.T, client *api.Client) {
|
||||
// nodes that use raft-wal (and vice-versa)
|
||||
// Having a cluster of mixed nodes, some using raft-boltdb and some using raft-wal, is not a problem.
|
||||
func TestDocker_LogStore_Boltdb_To_Raftwal_And_Back(t *testing.T) {
|
||||
binary := os.Getenv("VAULT_BINARY")
|
||||
if binary == "" {
|
||||
t.Skip("only running docker test when $VAULT_BINARY present")
|
||||
}
|
||||
repo, tag := testimages.GetImageRepoAndTag(t, false)
|
||||
opts := &docker.DockerClusterOptions{
|
||||
ImageRepo: "hashicorp/vault",
|
||||
DisableMlock: true,
|
||||
// We're replacing the binary anyway, so we're not too particular about
|
||||
// the docker image version tag.
|
||||
ImageTag: "latest",
|
||||
VaultBinary: binary,
|
||||
ImageRepo: repo,
|
||||
ImageTag: tag,
|
||||
ClusterOptions: testcluster.ClusterOptions{
|
||||
VaultNodeConfig: &testcluster.VaultNodeConfig{
|
||||
LogLevel: "TRACE",
|
||||
@ -329,17 +331,11 @@ func TestDocker_LogStore_Boltdb_To_Raftwal_And_Back(t *testing.T) {
|
||||
// by performing a snapshot restore from one cluster to another, and checking no data loss
|
||||
func TestRaft_LogStore_Migration_Snapshot(t *testing.T) {
|
||||
t.Parallel()
|
||||
binary := os.Getenv("VAULT_BINARY")
|
||||
if binary == "" {
|
||||
t.Skip("only running docker test when $VAULT_BINARY present")
|
||||
}
|
||||
repo, tag := testimages.GetImageRepoAndTag(t, false)
|
||||
opts := &docker.DockerClusterOptions{
|
||||
ImageRepo: "hashicorp/vault",
|
||||
DisableMlock: true,
|
||||
// We're replacing the binary anyway, so we're not too particular about
|
||||
// the docker image version tag.
|
||||
ImageTag: "latest",
|
||||
VaultBinary: binary,
|
||||
ImageRepo: repo,
|
||||
ImageTag: tag,
|
||||
ClusterOptions: testcluster.ClusterOptions{
|
||||
NumCores: 1,
|
||||
VaultNodeConfig: &testcluster.VaultNodeConfig{
|
||||
|
||||
@ -4,246 +4,47 @@
|
||||
package seal_binary
|
||||
|
||||
import (
|
||||
"context"
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"maps"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/api"
|
||||
dockhelper "github.com/hashicorp/vault/sdk/helper/docker"
|
||||
client "github.com/moby/moby/client"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
|
||||
)
|
||||
|
||||
const (
|
||||
containerConfig = `
|
||||
{
|
||||
"storage": {
|
||||
"file": {
|
||||
"path": "/tmp"
|
||||
}
|
||||
},
|
||||
|
||||
"disable_mlock": true,
|
||||
|
||||
"listener": [{
|
||||
"tcp": {
|
||||
"address": "0.0.0.0:8200",
|
||||
"tls_disable": "true"
|
||||
}
|
||||
}],
|
||||
|
||||
"api_addr": "http://0.0.0.0:8200",
|
||||
"cluster_addr": "http://0.0.0.0:8201",
|
||||
%s
|
||||
}`
|
||||
|
||||
sealConfig = `
|
||||
"seal": [
|
||||
%s
|
||||
]
|
||||
`
|
||||
|
||||
transitParameters = `
|
||||
"address": "%s",
|
||||
"token": "%s",
|
||||
"mount_path": "%s",
|
||||
"key_name": "%s",
|
||||
"name": "%s"
|
||||
`
|
||||
|
||||
transitStanza = `
|
||||
{
|
||||
"transit": {
|
||||
%s,
|
||||
"priority": %d,
|
||||
"disabled": %s
|
||||
}
|
||||
}
|
||||
`
|
||||
// recoveryModeFileName serves as a signal for the softhsmSetupScript to add the `-recovery` flag
|
||||
// when launching Vault.
|
||||
recoveryModeFileName = "start-in-recovery-mode"
|
||||
recoveryModeFileDir = "/root/"
|
||||
recoveryModeFileContents = "Script setup-softhsm.sh looks for this file and starts vault in recovery mode if it sees it"
|
||||
)
|
||||
|
||||
type transitContainerConfig struct {
|
||||
Address string
|
||||
Token string
|
||||
MountPaths []string
|
||||
KeyNames []string
|
||||
}
|
||||
|
||||
func createBuildContextWithBinary(vaultBinary string) (dockhelper.BuildContext, error) {
|
||||
f, err := os.Open(vaultBinary)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening vault binary file: %w", err)
|
||||
}
|
||||
data, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading vault binary file: %w", err)
|
||||
}
|
||||
|
||||
bCtx := dockhelper.NewBuildContext()
|
||||
bCtx["vault"] = &dockhelper.FileContents{
|
||||
Data: data,
|
||||
Mode: 0o755,
|
||||
}
|
||||
|
||||
return bCtx, nil
|
||||
}
|
||||
|
||||
func createDockerImage(imageRepo, imageTag, containerFile string, bCtx dockhelper.BuildContext) error {
|
||||
runner, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{
|
||||
ContainerName: "vault",
|
||||
ImageRepo: imageRepo,
|
||||
ImageTag: "latest",
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating runner: %w", err)
|
||||
}
|
||||
|
||||
_, err = runner.BuildImage(context.Background(), containerFile, bCtx,
|
||||
dockhelper.BuildRemove(true),
|
||||
dockhelper.BuildForceRemove(true),
|
||||
dockhelper.BuildPullParent(true),
|
||||
dockhelper.BuildTags([]string{fmt.Sprintf("%s:%s", imageRepo, imageTag)}))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building docker image: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This passes the config in an environment variable, so any changes to local.json
|
||||
// on the container will be overwritten if the container restarts
|
||||
func createContainerWithConfig(config string, imageRepo, imageTag string, logConsumer func(s string)) (*dockhelper.Service, *dockhelper.Runner, error) {
|
||||
runner, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{
|
||||
ContainerName: "vault",
|
||||
ImageRepo: imageRepo,
|
||||
ImageTag: imageTag,
|
||||
Cmd: []string{
|
||||
"server", "-log-level=trace",
|
||||
},
|
||||
Ports: []string{"8200/tcp"},
|
||||
Env: []string{
|
||||
fmt.Sprintf("VAULT_LICENSE=%s", os.Getenv("VAULT_LICENSE")),
|
||||
fmt.Sprintf("VAULT_LOCAL_CONFIG=%s", config),
|
||||
"SKIP_SETCAP=true",
|
||||
},
|
||||
LogConsumer: logConsumer,
|
||||
DoNotAutoRemove: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating runner: %w", err)
|
||||
}
|
||||
|
||||
svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (dockhelper.ServiceConfig, error) {
|
||||
return *dockhelper.NewServiceURL(url.URL{Scheme: "http", Host: fmt.Sprintf("%s:%d", host, port)}), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not start docker vault: %w", err)
|
||||
}
|
||||
|
||||
return svc, runner, nil
|
||||
}
|
||||
|
||||
func createContainerFromImage(imageRepo, imageTag string, logConsumer func(s string)) (*dockhelper.Service, *dockhelper.Runner, error) {
|
||||
return createContainerWithConfig("", imageRepo, imageTag, logConsumer)
|
||||
}
|
||||
|
||||
func createTransitTestContainer(imageRepo, imageTag string, numKeys int) (*dockhelper.Service, *transitContainerConfig, error) {
|
||||
rootToken, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error generating UUID: %w", err)
|
||||
}
|
||||
|
||||
mountPaths := make([]string, numKeys)
|
||||
keyNames := make([]string, numKeys)
|
||||
|
||||
for i := range mountPaths {
|
||||
mountPaths[i], err = uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error generating UUID: %w", err)
|
||||
}
|
||||
|
||||
keyNames[i], err = uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error generating UUID: %w", err)
|
||||
func init() {
|
||||
if signed := os.Getenv("VAULT_LICENSE_CI"); signed != "" {
|
||||
if err := os.Setenv("VAULT_LICENSE", signed); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
runner, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{
|
||||
ContainerName: "vault",
|
||||
ImageRepo: imageRepo,
|
||||
ImageTag: imageTag,
|
||||
Cmd: []string{
|
||||
"server", "-log-level=trace", "-dev", fmt.Sprintf("-dev-root-token-id=%s", rootToken),
|
||||
"-dev-listen-address=0.0.0.0:8200",
|
||||
},
|
||||
Env: []string{fmt.Sprintf("VAULT_LICENSE=%s", os.Getenv("VAULT_LICENSE")), "SKIP_SETCAP=true"},
|
||||
Ports: []string{"8200/tcp"},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not create runner: %w", err)
|
||||
}
|
||||
func withPriorityAndDisabled(priority int, disabled bool, seal testcluster.VaultNodeSealConfig) testcluster.VaultNodeSealConfig {
|
||||
modified := seal
|
||||
modified.Config = maps.Clone(seal.Config)
|
||||
modified.Config["disabled"] = strconv.FormatBool(disabled)
|
||||
modified.Config["priority"] = strconv.Itoa(priority)
|
||||
return modified
|
||||
}
|
||||
|
||||
svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (dockhelper.ServiceConfig, error) {
|
||||
c := *dockhelper.NewServiceURL(url.URL{Scheme: "http", Host: fmt.Sprintf("%s:%d", host, port)})
|
||||
type seal struct {
|
||||
base func(name string, idx int) testcluster.VaultNodeSealConfig
|
||||
index int
|
||||
disabled bool
|
||||
priority int
|
||||
}
|
||||
|
||||
clientConfig := api.DefaultConfig()
|
||||
clientConfig.Address = c.URL().String()
|
||||
vault, err := api.NewClient(clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vault.SetToken(rootToken)
|
||||
|
||||
// Set up transit mounts and keys
|
||||
for i := range mountPaths {
|
||||
if err := vault.Sys().Mount(mountPaths[i], &api.MountInput{
|
||||
Type: "transit",
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := vault.Logical().Write(path.Join(mountPaths[i], "keys", keyNames[i]), map[string]interface{}{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not start docker vault: %w", err)
|
||||
}
|
||||
|
||||
mapping, err := runner.GetNetworkAndAddresses(svc.Container.Name)
|
||||
if err != nil {
|
||||
svc.Cleanup()
|
||||
return nil, nil, fmt.Errorf("failed to get container network information: %w", err)
|
||||
}
|
||||
|
||||
if len(mapping) != 1 {
|
||||
svc.Cleanup()
|
||||
return nil, nil, fmt.Errorf("expected 1 network mapping, got %d", len(mapping))
|
||||
}
|
||||
|
||||
var ip string
|
||||
for _, ip = range mapping {
|
||||
// capture the container IP address from the map
|
||||
}
|
||||
|
||||
return svc,
|
||||
&transitContainerConfig{
|
||||
Address: fmt.Sprintf("http://%s:8200", ip),
|
||||
Token: rootToken,
|
||||
MountPaths: mountPaths,
|
||||
KeyNames: keyNames,
|
||||
}, nil
|
||||
type step struct {
|
||||
expectedSealType string
|
||||
seals []seal
|
||||
}
|
||||
|
||||
func validateVaultStatusAndSealType(client *api.Client, expectedSealType string) error {
|
||||
@ -263,86 +64,134 @@ func validateVaultStatusAndSealType(client *api.Client, expectedSealType string)
|
||||
return nil
|
||||
}
|
||||
|
||||
func testClient(address string) (*api.Client, error) {
|
||||
clientConfig := api.DefaultConfig()
|
||||
clientConfig.Address = address
|
||||
testClient, err := api.NewClient(clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func dockerOptions(t *testing.T, repo, tag string) *docker.DockerClusterOptions {
|
||||
opts := docker.DefaultOptions(t)
|
||||
opts.NumCores = 1
|
||||
opts.ImageRepo, opts.ImageTag = repo, tag
|
||||
opts.VaultBinary = ""
|
||||
// Probably not reliable in CI with multi-node clusters, but we're assuming callers
|
||||
// of this func won't change NumCores to be >1.
|
||||
opts.VaultNodeConfig.StorageOptions = map[string]string{
|
||||
"performance_multiplier": "1",
|
||||
}
|
||||
return testClient, nil
|
||||
return opts
|
||||
}
|
||||
|
||||
func initializeVault(client *api.Client, sealType string) ([]string, string, error) {
|
||||
var keys []string
|
||||
var token string
|
||||
|
||||
if sealType == "shamir" {
|
||||
initResp, err := client.Sys().Init(&api.InitRequest{
|
||||
SecretThreshold: 1,
|
||||
SecretShares: 1,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
keys = initResp.Keys
|
||||
token = initResp.RootToken
|
||||
|
||||
_, err = client.Sys().Unseal(initResp.Keys[0])
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
} else {
|
||||
initResp, err := client.Sys().Init(&api.InitRequest{
|
||||
RecoveryShares: 1,
|
||||
RecoveryThreshold: 1,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
keys = initResp.RecoveryKeys
|
||||
token = initResp.RootToken
|
||||
}
|
||||
|
||||
return keys, token, nil
|
||||
type logScanner struct {
|
||||
wg sync.WaitGroup
|
||||
l sync.Mutex
|
||||
ch chan string
|
||||
pw *io.PipeWriter
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func copyConfigToContainer(containerID string, bCtx dockhelper.BuildContext, runner *dockhelper.Runner) error {
|
||||
tar, err := bCtx.ToTarball()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating config tarball: %w", err)
|
||||
func newLogScanner(t *testing.T, underlying io.Writer, bufLines int) (*logScanner, io.Writer) {
|
||||
pr, pw := io.Pipe()
|
||||
ls := &logScanner{
|
||||
ch: make(chan string, bufLines),
|
||||
pw: pw,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
|
||||
_, err = runner.DockerAPI.CopyToContainer(context.Background(), containerID, client.CopyToContainerOptions{
|
||||
DestinationPath: "/vault/config",
|
||||
Content: tar,
|
||||
ls.wg.Add(1)
|
||||
go func() {
|
||||
defer ls.wg.Done()
|
||||
// bufio.Scanner is perfect here because hclog writes each log entry
|
||||
// ending with a newline character.
|
||||
scanner := bufio.NewScanner(pr)
|
||||
|
||||
// scanner.Scan() will block until a new line is written to the pipe,
|
||||
// and it will exit automatically when pw.Close() is called.
|
||||
for scanner.Scan() {
|
||||
logLine := scanner.Text()
|
||||
underlying.Write([]byte(logLine + "\n"))
|
||||
select {
|
||||
case <-ls.stop:
|
||||
return
|
||||
case ls.ch <- logLine:
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
t.Fatalf("Scanner error: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := ls.Close(); err != nil {
|
||||
t.Logf("Error closing scanner: %v", err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error copying config to container: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return ls, pw
|
||||
}
|
||||
|
||||
func copyRecoveryModeTriggerToContainer(containerID string, runner *dockhelper.Runner) error {
|
||||
bCtx := dockhelper.NewBuildContext()
|
||||
bCtx[recoveryModeFileName] = &dockhelper.FileContents{
|
||||
Data: []byte(recoveryModeFileContents),
|
||||
Mode: 0o644,
|
||||
func (ls *logScanner) Lines() <-chan string {
|
||||
return ls.ch
|
||||
}
|
||||
|
||||
func (ls *logScanner) Close() error {
|
||||
ls.l.Lock()
|
||||
defer ls.l.Unlock()
|
||||
|
||||
close(ls.stop)
|
||||
err := ls.pw.Close()
|
||||
ls.wg.Wait()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type logMatcher struct {
|
||||
targets map[string]bool
|
||||
lines <-chan string
|
||||
done chan struct{}
|
||||
l sync.RWMutex
|
||||
}
|
||||
|
||||
func newLogMatcher(lines <-chan string, targets []string) *logMatcher {
|
||||
tmap := make(map[string]bool)
|
||||
for _, target := range targets {
|
||||
tmap[target] = false
|
||||
}
|
||||
tar, err := bCtx.ToTarball()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating config tarball: %w", err)
|
||||
lm := &logMatcher{
|
||||
targets: tmap,
|
||||
lines: lines,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
_, err = runner.DockerAPI.CopyToContainer(context.Background(), containerID, client.CopyToContainerOptions{
|
||||
DestinationPath: recoveryModeFileDir,
|
||||
Content: tar,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error copying revovery mode trigger file to container: %w", err)
|
||||
}
|
||||
return nil
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-lm.done:
|
||||
return
|
||||
case line := <-lines:
|
||||
for target, ok := range tmap {
|
||||
if !ok && strings.Contains(line, target) {
|
||||
lm.l.Lock()
|
||||
tmap[target] = true
|
||||
lm.l.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return lm
|
||||
}
|
||||
|
||||
func (lm *logMatcher) stop() {
|
||||
close(lm.done)
|
||||
}
|
||||
|
||||
func (lm *logMatcher) missing() []string {
|
||||
lm.l.RLock()
|
||||
defer lm.l.RUnlock()
|
||||
|
||||
var ret []string
|
||||
for target, found := range lm.targets {
|
||||
if !found {
|
||||
ret = append(ret, target)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
24
vault/external_tests/seal_binary/seal_docker_util_oss.go
Normal file
24
vault/external_tests/seal_binary/seal_docker_util_oss.go
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright IBM Corp. 2016, 2025
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
//go:build !enterprise
|
||||
|
||||
package seal_binary
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func pkcsWrapper(string, int) testcluster.VaultNodeSealConfig {
|
||||
return testcluster.VaultNodeSealConfig{}
|
||||
}
|
||||
|
||||
func getRewrappedEntryCount(client *api.Client) (uint32, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func verifyRewrappedEntryCount(t *assert.CollectT, client *api.Client, initialProcessedEntries uint32) uint32 {
|
||||
return 0
|
||||
}
|
||||
@ -1,178 +1,269 @@
|
||||
// Copyright IBM Corp. 2016, 2025
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
//go:build !enterprise
|
||||
|
||||
package seal_binary
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dockhelper "github.com/hashicorp/vault/sdk/helper/docker"
|
||||
dockerclient "github.com/moby/moby/client"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/helper/constants"
|
||||
sealhelper "github.com/hashicorp/vault/helper/testhelpers/seal"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/testimages"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSealReloadSIGHUP(t *testing.T) {
|
||||
binary := os.Getenv("VAULT_BINARY")
|
||||
if binary == "" {
|
||||
t.Skip("only running docker test with $VAULT_BINARY present")
|
||||
transit := sealhelper.NewTransitDockerSealServer(t)
|
||||
|
||||
repo, tag := testimages.GetImageRepoAndTag(t, constants.IsEnterprise)
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
steps []step
|
||||
disableMultiseal bool
|
||||
}
|
||||
|
||||
transitContainer, transitConfig, err := createTransitTestContainer("hashicorp/vault", "latest", 2)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating vault container: %s", err)
|
||||
}
|
||||
defer transitContainer.Cleanup()
|
||||
|
||||
firstTransitKeyConfig := fmt.Sprintf(transitParameters,
|
||||
transitConfig.Address,
|
||||
transitConfig.Token,
|
||||
transitConfig.MountPaths[0],
|
||||
transitConfig.KeyNames[0],
|
||||
"transit-seal-1",
|
||||
)
|
||||
|
||||
secondTransitKeyConfig := fmt.Sprintf(transitParameters,
|
||||
transitConfig.Address,
|
||||
transitConfig.Token,
|
||||
transitConfig.MountPaths[1],
|
||||
transitConfig.KeyNames[1],
|
||||
"transit-seal-2",
|
||||
)
|
||||
|
||||
testCases := map[string]struct {
|
||||
sealStanzas []string
|
||||
expectedSealTypes []string
|
||||
}{
|
||||
"migrate transit to transit": {
|
||||
sealStanzas: []string{
|
||||
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 1, "false"),
|
||||
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 2, "true") + "," +
|
||||
fmt.Sprintf(transitStanza, secondTransitKeyConfig, 1, "false"),
|
||||
fmt.Sprintf(transitStanza, secondTransitKeyConfig, 1, "false"),
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "transit to transit",
|
||||
steps: []step{
|
||||
{
|
||||
"transit", []seal{
|
||||
{base: transit.Seal, index: 0, priority: 1},
|
||||
},
|
||||
}, {
|
||||
"multiseal", []seal{
|
||||
{base: transit.Seal, index: 0, priority: 2, disabled: true},
|
||||
{base: transit.Seal, index: 1, priority: 1},
|
||||
},
|
||||
}, {
|
||||
"transit", []seal{
|
||||
{base: transit.Seal, index: 1, priority: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedSealTypes: []string{
|
||||
"transit",
|
||||
"transit",
|
||||
"transit",
|
||||
}, {
|
||||
name: "transit to transit no multiseal",
|
||||
steps: []step{
|
||||
{
|
||||
"transit", []seal{
|
||||
{base: transit.Seal, index: 0, priority: 1},
|
||||
},
|
||||
}, {
|
||||
"transit", []seal{
|
||||
{base: transit.Seal, index: 0, priority: 2, disabled: true},
|
||||
{base: transit.Seal, index: 1, priority: 1},
|
||||
},
|
||||
}, {
|
||||
"transit", []seal{
|
||||
{base: transit.Seal, index: 1, priority: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"migrate shamir to transit fails": {
|
||||
sealStanzas: []string{
|
||||
"",
|
||||
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 1, "false"),
|
||||
disableMultiseal: true,
|
||||
}, {
|
||||
name: "transit to pkcs11",
|
||||
steps: []step{
|
||||
{
|
||||
"transit", []seal{
|
||||
{base: transit.Seal, priority: 1},
|
||||
},
|
||||
}, {
|
||||
"multiseal", []seal{
|
||||
{base: transit.Seal, priority: 1, disabled: true},
|
||||
{base: pkcsWrapper, priority: 2},
|
||||
},
|
||||
}, {
|
||||
"pkcs11", []seal{
|
||||
{base: pkcsWrapper, priority: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedSealTypes: []string{
|
||||
"shamir",
|
||||
"shamir",
|
||||
}, {
|
||||
name: "pkcs11 to transit",
|
||||
steps: []step{
|
||||
{
|
||||
"pkcs11", []seal{
|
||||
{base: pkcsWrapper, priority: 1},
|
||||
},
|
||||
}, {
|
||||
"multiseal", []seal{
|
||||
{base: pkcsWrapper, priority: 2, disabled: true},
|
||||
{base: transit.Seal, priority: 1},
|
||||
},
|
||||
}, {
|
||||
"transit", []seal{
|
||||
{base: transit.Seal, priority: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"migrate transit to shamir fails": {
|
||||
sealStanzas: []string{
|
||||
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 1, "false"),
|
||||
"",
|
||||
}, {
|
||||
name: "two transit seals",
|
||||
steps: []step{
|
||||
{
|
||||
"transit", []seal{
|
||||
{base: transit.Seal, priority: 1},
|
||||
},
|
||||
}, {
|
||||
"multiseal", []seal{
|
||||
{base: transit.Seal, index: 0, priority: 1},
|
||||
{base: transit.Seal, index: 1, priority: 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedSealTypes: []string{
|
||||
"transit",
|
||||
"transit",
|
||||
}, {
|
||||
name: "pkcs11 seal and transit seal",
|
||||
steps: []step{
|
||||
{
|
||||
"transit", []seal{
|
||||
{base: transit.Seal, priority: 1},
|
||||
},
|
||||
}, {
|
||||
"multiseal", []seal{
|
||||
{base: transit.Seal, priority: 1},
|
||||
{base: pkcsWrapper, priority: 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"replacing seal fails": {
|
||||
sealStanzas: []string{
|
||||
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 1, "false"),
|
||||
fmt.Sprintf(transitStanza, secondTransitKeyConfig, 1, "false"),
|
||||
}, {
|
||||
name: "three seals",
|
||||
steps: []step{
|
||||
{
|
||||
"transit", []seal{
|
||||
{base: transit.Seal, index: 0, priority: 1},
|
||||
},
|
||||
}, {
|
||||
"multiseal", []seal{
|
||||
{base: transit.Seal, index: 0, priority: 1},
|
||||
{base: pkcsWrapper, priority: 2},
|
||||
},
|
||||
}, {
|
||||
"multiseal", []seal{
|
||||
{base: transit.Seal, index: 0, priority: 1},
|
||||
{base: pkcsWrapper, priority: 2},
|
||||
{base: transit.Seal, index: 1, priority: 3},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedSealTypes: []string{
|
||||
"transit",
|
||||
"transit",
|
||||
}, {
|
||||
name: "remove enabled seal",
|
||||
steps: []step{
|
||||
{
|
||||
"transit", []seal{
|
||||
{base: transit.Seal, priority: 1},
|
||||
},
|
||||
}, {
|
||||
"multiseal", []seal{
|
||||
{base: transit.Seal, priority: 1},
|
||||
{base: pkcsWrapper, priority: 2},
|
||||
},
|
||||
}, {
|
||||
"pkcs11", []seal{
|
||||
{base: pkcsWrapper, priority: 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"more than one seal fails": {
|
||||
sealStanzas: []string{
|
||||
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 1, "false"),
|
||||
fmt.Sprintf(transitStanza, firstTransitKeyConfig, 1, "false") + "," +
|
||||
fmt.Sprintf(transitStanza, secondTransitKeyConfig, 2, "false"),
|
||||
}, {
|
||||
name: "shamir to transit fails",
|
||||
steps: []step{
|
||||
{
|
||||
"shamir", nil,
|
||||
}, {
|
||||
"shamir", []seal{
|
||||
{base: transit.Seal, priority: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedSealTypes: []string{
|
||||
"transit",
|
||||
"transit",
|
||||
}, {
|
||||
name: "transit to shamir fails",
|
||||
steps: []step{
|
||||
{
|
||||
"transit", []seal{{base: transit.Seal, priority: 1}},
|
||||
}, {
|
||||
"transit", nil,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "replacing seal fails",
|
||||
steps: []step{
|
||||
{
|
||||
"transit", []seal{{base: transit.Seal, index: 0, priority: 1}},
|
||||
}, {
|
||||
"transit", []seal{{base: transit.Seal, index: 1, priority: 1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
containerFile := `
|
||||
FROM hashicorp/vault:latest
|
||||
COPY vault /bin/vault
|
||||
`
|
||||
bCtx, err := createBuildContextWithBinary(os.Getenv("VAULT_BINARY"))
|
||||
if err != nil {
|
||||
t.Fatalf("error creating build context: %s", err)
|
||||
}
|
||||
err = createDockerImage("hashicorp/vault", "test-image", containerFile, bCtx)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating docker image: %s", err)
|
||||
isEnterpriseCase := func(tc testCase) bool {
|
||||
for _, step := range tc.steps {
|
||||
if step.expectedSealType == "multiseal" || step.expectedSealType == "pkcs11" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
for name, test := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
var sealList string
|
||||
if test.sealStanzas[0] != "" {
|
||||
sealList = fmt.Sprintf(sealConfig, test.sealStanzas[0])
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if isEnterpriseCase(tc) && !constants.IsEnterprise {
|
||||
t.Skip("Skipping enterprise tests")
|
||||
}
|
||||
|
||||
vaultConfig := fmt.Sprintf(containerConfig, sealList)
|
||||
|
||||
svc, runner, err := createContainerWithConfig(vaultConfig, "hashicorp/vault", "test-image", func(s string) { t.Log(s) })
|
||||
if err != nil {
|
||||
t.Fatalf("error creating container: %s", err)
|
||||
opts := dockerOptions(t, repo, tag)
|
||||
for _, seal := range tc.steps[0].seals {
|
||||
vncseal := withPriorityAndDisabled(seal.priority, seal.disabled, seal.base(tc.name, seal.index))
|
||||
opts.VaultNodeConfig.Seal = append(opts.VaultNodeConfig.Seal, vncseal)
|
||||
}
|
||||
defer svc.Cleanup()
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
client, err := testClient(svc.Config.URL().String())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
if tc.steps[0].expectedSealType != "shamir" && !tc.disableMultiseal {
|
||||
opts.VaultNodeConfig.EnableMultiSeal = true
|
||||
}
|
||||
cluster := docker.NewTestDockerCluster(t, opts)
|
||||
node := cluster.Nodes()[0].(*docker.DockerClusterNode)
|
||||
client := node.APIClient()
|
||||
lastRewrappedEntryCount, err := getRewrappedEntryCount(client)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, token, err := initializeVault(client, test.expectedSealTypes[0])
|
||||
if err != nil {
|
||||
t.Fatalf("error initializing vault: %s", err)
|
||||
}
|
||||
client.SetToken(token)
|
||||
// kv mounts are sealwrapped. In order to make sure that we don't get fooled
|
||||
// by the rewrap status endpoint saying "not in progress" prior to a rewrap
|
||||
// being started, we're going to arrange for there to be an extra key to wrap
|
||||
// each iteration, by creating a new kv entry each iteration.
|
||||
require.NoError(t, client.Sys().Mount("kv", &api.MountInput{
|
||||
Type: "kv",
|
||||
}))
|
||||
client.Logical().Write("kv/0", map[string]any{"1": 1})
|
||||
|
||||
for i := range test.sealStanzas {
|
||||
if test.sealStanzas[i] != "" {
|
||||
sealList = fmt.Sprintf(sealList, test.sealStanzas[i])
|
||||
expectFailure := len(tc.steps) < 3
|
||||
for i := 1; i < len(tc.steps); i++ {
|
||||
if tc.steps[i].expectedSealType != "shamir" && !tc.disableMultiseal {
|
||||
opts.VaultNodeConfig.EnableMultiSeal = true
|
||||
}
|
||||
|
||||
vaultConfig = fmt.Sprintf(containerConfig, sealList)
|
||||
configCtx := dockhelper.NewBuildContext()
|
||||
configCtx["local.json"] = &dockhelper.FileContents{
|
||||
Data: []byte(vaultConfig),
|
||||
Mode: 0o644,
|
||||
opts.VaultNodeConfig.Seal = nil
|
||||
for _, seal := range tc.steps[i].seals {
|
||||
opts.VaultNodeConfig.Seal = append(opts.VaultNodeConfig.Seal,
|
||||
withPriorityAndDisabled(seal.priority, seal.disabled, seal.base(tc.name, seal.index)))
|
||||
}
|
||||
require.NoError(t, node.UpdateConfig(t.Context(), opts))
|
||||
require.NoError(t, node.Signal(t.Context(), "SIGHUP"))
|
||||
|
||||
err = copyConfigToContainer(svc.Container.ID, bCtx, runner)
|
||||
if err != nil {
|
||||
t.Fatalf("error copying over config file: %s", err)
|
||||
}
|
||||
require.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
if !tc.disableMultiseal && !expectFailure && tc.steps[i].expectedSealType != "shamir" {
|
||||
lastRewrappedEntryCount = verifyRewrappedEntryCount(ct, client, lastRewrappedEntryCount+1)
|
||||
}
|
||||
|
||||
_, err = runner.DockerAPI.ContainerKill(context.Background(), svc.Container.ID, dockerclient.ContainerKillOptions{
|
||||
Signal: "SIGHUP",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("error sending SIGHUP: %s", err)
|
||||
}
|
||||
resp, err := client.Sys().SealStatusWithContext(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(ct, resp.Type, tc.steps[i].expectedSealType)
|
||||
assert.False(ct, resp.Sealed)
|
||||
}, 20*time.Second, time.Second/2)
|
||||
|
||||
err = validateVaultStatusAndSealType(client, test.expectedSealTypes[i])
|
||||
if err != nil {
|
||||
t.Fatalf("seal type check failed: %s", err)
|
||||
}
|
||||
client.Logical().Write("kv/"+strconv.Itoa(i), map[string]any{"1": 1})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -4,11 +4,11 @@
|
||||
package system_binary
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/testimages"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -60,19 +60,15 @@ func waitForRekeyInConfig(t *testing.T, client *api.Client, rootToken string, sh
|
||||
// between requiring authentication and not requiring authentication by using
|
||||
// the enable_unauthenticated_access config option and reloading the config.
|
||||
func TestSysRekey_ConfigReload(t *testing.T) {
|
||||
binary := os.Getenv("VAULT_BINARY")
|
||||
if binary == "" {
|
||||
t.Skip("only running docker test when $VAULT_BINARY present")
|
||||
}
|
||||
repo, tag := testimages.GetImageRepoAndTag(t, false)
|
||||
|
||||
nodeConfig := &testcluster.VaultNodeConfig{
|
||||
LogLevel: "TRACE",
|
||||
}
|
||||
opts := &docker.DockerClusterOptions{
|
||||
ImageRepo: "hashicorp/vault",
|
||||
ImageTag: "latest",
|
||||
VaultBinary: binary,
|
||||
DisableMlock: true,
|
||||
ImageRepo: repo,
|
||||
ImageTag: tag,
|
||||
ClusterOptions: testcluster.ClusterOptions{
|
||||
NumCores: 1,
|
||||
VaultNodeConfig: nodeConfig,
|
||||
@ -115,7 +111,7 @@ func TestSysRekey_ConfigReload(t *testing.T) {
|
||||
nodeConfig.EnableUnauthenticatedAccess = []string{"rekey"}
|
||||
|
||||
// Update the config and copy it to the container
|
||||
err := node.UpdateConfig(t.Context(), nodeConfig)
|
||||
err := node.UpdateConfig(t.Context(), opts)
|
||||
require.NoError(t, err, "failed to update config")
|
||||
|
||||
// Send SIGHUP to reload the configuration
|
||||
@ -154,7 +150,7 @@ func TestSysRekey_ConfigReload(t *testing.T) {
|
||||
nodeConfig.EnableUnauthenticatedAccess = nil
|
||||
|
||||
// Update the config and copy it to the container
|
||||
err := node.UpdateConfig(t.Context(), nodeConfig)
|
||||
err := node.UpdateConfig(t.Context(), opts)
|
||||
require.NoError(t, err, "failed to update config")
|
||||
|
||||
// Send SIGHUP to reload the configuration
|
||||
|
||||
@ -163,6 +163,10 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.ValidateMultiSealConfig(ctx, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&initInProgress, 1)
|
||||
defer atomic.StoreUint32(&initInProgress, 0)
|
||||
barrierConfig := initParams.BarrierConfig
|
||||
@ -443,6 +447,23 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// ValidateMultiSealConfig is an utility method for verifying SealGenerationInfo.
|
||||
// Its purpose is to read the existing SealGenerationInfo from storage, if any,
|
||||
// and to determine whether there are partially wrapped paths.
|
||||
// Argument onInit indicates whether Vault is being initialized and thus creating
|
||||
// the initial barrier seal.
|
||||
func (c *Core) ValidateMultiSealConfig(ctx context.Context, onInit bool) error {
|
||||
existingSgi, err := PhysicalSealGenInfo(ctx, c.PhysicalAccess())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading existing seal generation info from storage: %w", err)
|
||||
}
|
||||
hasPartiallyWrappedPaths, err := HasPartiallyWrappedPaths(ctx, c.PhysicalAccess())
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine whether partially wrapped entries in storage: %w", err)
|
||||
}
|
||||
return seal.ValidateMultiSealGenerationInfo(onInit, c.seal.GetAccess().GetSealGenerationInfo(), existingSgi, hasPartiallyWrappedPaths)
|
||||
}
|
||||
|
||||
// UnsealWithStoredKeys performs auto-unseal using stored keys. An error
|
||||
// return value of "nil" implies the Vault instance is unsealed.
|
||||
//
|
||||
|
||||
@ -6025,25 +6025,26 @@ func (b *SystemBackend) pathInternalOpenAPI(ctx context.Context, req *logical.Re
|
||||
}
|
||||
|
||||
type SealStatusResponse struct {
|
||||
Type string `json:"type"`
|
||||
Initialized bool `json:"initialized"`
|
||||
Sealed bool `json:"sealed"`
|
||||
T int `json:"t"`
|
||||
N int `json:"n"`
|
||||
Progress int `json:"progress"`
|
||||
Nonce string `json:"nonce"`
|
||||
Version string `json:"version"`
|
||||
BuildDate string `json:"build_date"`
|
||||
Migration bool `json:"migration"`
|
||||
ClusterName string `json:"cluster_name,omitempty"`
|
||||
ClusterID string `json:"cluster_id,omitempty"`
|
||||
RecoverySeal bool `json:"recovery_seal"`
|
||||
StorageType string `json:"storage_type,omitempty"`
|
||||
HCPLinkStatus string `json:"hcp_link_status,omitempty"`
|
||||
HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
RecoverySealType string `json:"recovery_seal_type,omitempty"`
|
||||
RemovedFromCluster *bool `json:"removed_from_cluster,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Initialized bool `json:"initialized"`
|
||||
Sealed bool `json:"sealed"`
|
||||
T int `json:"t"`
|
||||
N int `json:"n"`
|
||||
Progress int `json:"progress"`
|
||||
Nonce string `json:"nonce"`
|
||||
Version string `json:"version"`
|
||||
BuildDate string `json:"build_date"`
|
||||
Migration bool `json:"migration"`
|
||||
ClusterName string `json:"cluster_name,omitempty"`
|
||||
ClusterID string `json:"cluster_id,omitempty"`
|
||||
RecoverySeal bool `json:"recovery_seal"`
|
||||
StorageType string `json:"storage_type,omitempty"`
|
||||
HCPLinkStatus string `json:"hcp_link_status,omitempty"`
|
||||
HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
RecoverySealType string `json:"recovery_seal_type,omitempty"`
|
||||
RemovedFromCluster *bool `json:"removed_from_cluster,omitempty"`
|
||||
MigrationDoneAtEpoch int64 `json:"migration_done_at_epoch,omitempty"`
|
||||
}
|
||||
|
||||
type SealBackendStatus struct {
|
||||
@ -6156,6 +6157,9 @@ func (core *Core) GetSealStatus(ctx context.Context, lock bool) (*SealStatusResp
|
||||
RecoverySealType: recoverySealType,
|
||||
StorageType: core.StorageType(),
|
||||
}
|
||||
if p := core.sealMigrationDone.Load(); p != nil {
|
||||
s.MigrationDoneAtEpoch = p.Unix()
|
||||
}
|
||||
|
||||
if resourceIDonHCP != "" {
|
||||
s.HCPLinkStatus = hcpLinkStatus
|
||||
|
||||
@ -1022,6 +1022,8 @@ func (c *Core) getRaftChallenge(leaderInfo *raft.LeaderJoinInfo) (*raftInformati
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We compare here the local seal configuration to that of the leader we are trying to join,
|
||||
// thus there is no need to call ValidateSealGenerationInfo.
|
||||
if !CompatibleSealTypes(sealConfig.Type, c.seal.BarrierSealConfigType().String()) {
|
||||
return nil, fmt.Errorf("incompatible seal types between raft leader (%s) and follower (%s)", sealConfig.Type, c.seal.BarrierSealConfigType())
|
||||
}
|
||||
|
||||
@ -8,16 +8,13 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
wrapping "github.com/hashicorp/go-kms-wrapping/v2"
|
||||
"github.com/hashicorp/go-kms-wrapping/v2/aead"
|
||||
@ -62,174 +59,6 @@ type SealGenerationInfo struct {
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
// Validate is used to sanity check the seal generation info being created
|
||||
func (sgi *SealGenerationInfo) Validate(existingSgi *SealGenerationInfo, hasPartiallyWrappedPaths bool) error {
|
||||
existingSealsLen := 0
|
||||
numConfiguredSeals := len(sgi.Seals)
|
||||
configuredSealNameAndType := sealNameAndTypeAsStr(sgi.Seals)
|
||||
|
||||
// If no previous generation info exists, make sure we perform the initial migration/setup
|
||||
// check for enabled configured seals to allow an old style seal migration configuration
|
||||
if existingSgi == nil {
|
||||
if numConfiguredSeals > 1 {
|
||||
return fmt.Errorf("Initializing a cluster or enabling multi-seal on an existing "+
|
||||
"cluster must occur with a single seal before adding additional seals\n"+
|
||||
"Configured seals: %v", configuredSealNameAndType)
|
||||
}
|
||||
|
||||
// No point in comparing anything more as we don't have any information around the
|
||||
// existing seal if any actually existed
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate that we're in a safe spot with respect to disabling multiseal
|
||||
if existingSgi.Enabled && !sgi.Enabled {
|
||||
if len(existingSgi.Seals) > 1 {
|
||||
return fmt.Errorf("multi-seal is disabled but previous configuration had multiple seals. re-enable and migrate to a single seal before disabling multi-seal")
|
||||
} else if !existingSgi.IsRewrapped() {
|
||||
return fmt.Errorf("multi-seal is disabled but previous storage was not fully re-wrapped, re-enable multi-seal and allow rewrapping to complete before disabling multi-seal")
|
||||
}
|
||||
}
|
||||
|
||||
existingSealNameAndType := sealNameAndTypeAsStr(existingSgi.Seals)
|
||||
previousShamirConfigured := false
|
||||
|
||||
if sgi.Generation == existingSgi.Generation {
|
||||
if !haveMatchingSeals(sgi.Seals, existingSgi.Seals) {
|
||||
return fmt.Errorf("existing seal generation is the same, but the configured seals are different\n"+
|
||||
"Existing seals: %v\n"+
|
||||
"Configured seals: %v", existingSealNameAndType, configuredSealNameAndType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
existingSealsLen = len(existingSgi.Seals)
|
||||
for _, sealKmsConfig := range existingSgi.Seals {
|
||||
if sealKmsConfig.Type == wrapping.WrapperTypeShamir.String() {
|
||||
previousShamirConfigured = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !previousShamirConfigured && (!existingSgi.IsRewrapped() || hasPartiallyWrappedPaths) && os.Getenv("VAULT_SEAL_REWRAP_SAFETY") != "disable" {
|
||||
return errors.New("cannot make seal config changes while seal re-wrap is in progress, please revert any seal configuration changes")
|
||||
}
|
||||
|
||||
numSealsToAdd := 0
|
||||
// With a previously configured shamir seal, we are either going from [shamir]->[auto]
|
||||
// or [shamir]->[another shamir] (since we do not allow multiple shamir
|
||||
// seals, and, mixed shamir and auto seals). Also, we do not allow shamir seals to
|
||||
// be set disabled, so, the number of seals to add is always going to be the length
|
||||
// of new seal configs.
|
||||
if previousShamirConfigured {
|
||||
numSealsToAdd = numConfiguredSeals
|
||||
} else {
|
||||
numSealsToAdd = numConfiguredSeals - existingSealsLen
|
||||
}
|
||||
|
||||
numSealsToDelete := existingSealsLen - numConfiguredSeals
|
||||
switch {
|
||||
case numSealsToAdd > 1:
|
||||
return fmt.Errorf("cannot add more than one seal\n"+
|
||||
"Existing seals: %v\n"+
|
||||
"Configured seals: %v", existingSealNameAndType, configuredSealNameAndType)
|
||||
|
||||
case numSealsToDelete > 1:
|
||||
return fmt.Errorf("cannot delete more than one seal\n"+
|
||||
"Existing seals: %v\n"+
|
||||
"Configured seals: %v", existingSealNameAndType, configuredSealNameAndType)
|
||||
|
||||
case !previousShamirConfigured && existingSgi != nil && !haveCommonSeal(existingSgi.Seals, sgi.Seals):
|
||||
// With a previously configured shamir seal, we are either going from [shamir]->[auto] or [shamir]->[another shamir],
|
||||
// in which case we cannot have a common seal because shamir seals cannot be set to disabled, they can only be deleted.
|
||||
return fmt.Errorf("must have at least one seal in common with the old generation\n"+
|
||||
"Existing seals: %v\n"+
|
||||
"Configured seals: %v", existingSealNameAndType, configuredSealNameAndType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sealNameAndTypeAsStr(seals []*configutil.KMS) string {
|
||||
info := []string{}
|
||||
for _, seal := range seals {
|
||||
info = append(info, fmt.Sprintf("Name: %s Type: %s", seal.Name, seal.Type))
|
||||
}
|
||||
return fmt.Sprintf("[%s]", strings.Join(info, ", "))
|
||||
}
|
||||
|
||||
// haveMatchingSeals verifies that we have the corresponding matching seals by name and type, config and other
|
||||
// properties are ignored in the comparison
|
||||
func haveMatchingSeals(existingSealKmsConfigs, newSealKmsConfigs []*configutil.KMS) bool {
|
||||
if len(existingSealKmsConfigs) != len(newSealKmsConfigs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, existingSealKmsConfig := range existingSealKmsConfigs {
|
||||
found := false
|
||||
for _, newSealKmsConfig := range newSealKmsConfigs {
|
||||
if cmp.Equal(existingSealKmsConfig, newSealKmsConfig, compareKMSConfigByNameAndType()) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// haveCommonSeal verifies that we have at least one matching seal across
|
||||
// the inputs by name and type, config and other properties are ignored in
|
||||
// the comparison
|
||||
func haveCommonSeal(existingSealKmsConfigs, newSealKmsConfigs []*configutil.KMS) bool {
|
||||
for _, existingSealKmsConfig := range existingSealKmsConfigs {
|
||||
for _, newSealKmsConfig := range newSealKmsConfigs {
|
||||
// Technically we might be matching the "wrong" seal if the old seal was renamed to
|
||||
// "transit-disabled" and we have a new seal named transit. There isn't any way for
|
||||
// us to properly distinguish between them
|
||||
if cmp.Equal(existingSealKmsConfig, newSealKmsConfig, compareKMSConfigByNameAndType()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We might have renamed a disabled seal that was previously used so attempt to match by
|
||||
// removing the "-disabled" suffix
|
||||
for _, seal := range findRenamedDisabledSeals(newSealKmsConfigs) {
|
||||
clonedSeal := seal.Clone()
|
||||
clonedSeal.Name = strings.TrimSuffix(clonedSeal.Name, configutil.KmsRenameDisabledSuffix)
|
||||
|
||||
for _, existingSealKmsConfig := range existingSealKmsConfigs {
|
||||
if cmp.Equal(existingSealKmsConfig, clonedSeal, compareKMSConfigByNameAndType()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func findRenamedDisabledSeals(configs []*configutil.KMS) []*configutil.KMS {
|
||||
disabledSeals := []*configutil.KMS{}
|
||||
for _, seal := range configs {
|
||||
if seal.Disabled && strings.HasSuffix(seal.Name, configutil.KmsRenameDisabledSuffix) {
|
||||
disabledSeals = append(disabledSeals, seal)
|
||||
}
|
||||
}
|
||||
return disabledSeals
|
||||
}
|
||||
|
||||
func compareKMSConfigByNameAndType() cmp.Option {
|
||||
// We only match based on name and type to avoid configuration changes such
|
||||
// as a Vault token change in the config map from eliminating the match and
|
||||
// preventing startup on a matching seal.
|
||||
return cmp.Comparer(func(a, b *configutil.KMS) bool {
|
||||
return a.Name == b.Name && a.Type == b.Type
|
||||
})
|
||||
}
|
||||
|
||||
// SetRewrapped updates the SealGenerationInfo's rewrapped status to the provided value.
|
||||
func (sgi *SealGenerationInfo) SetRewrapped(value bool) {
|
||||
sgi.rewrapped.Store(value)
|
||||
|
||||
10
vault/seal/seal_generation_validation_ce.go
Normal file
10
vault/seal/seal_generation_validation_ce.go
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright IBM Corp. 2016, 2025
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
//go:build !enterprise
|
||||
|
||||
package seal
|
||||
|
||||
func ValidateMultiSealGenerationInfo(_ bool, _, _ *SealGenerationInfo, _ bool) error {
|
||||
return nil
|
||||
}
|
||||
16
vault/seal_util_ce.go
Normal file
16
vault/seal_util_ce.go
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright IBM Corp. 2016, 2025
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
//go:build !enterprise
|
||||
|
||||
package vault
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
)
|
||||
|
||||
func HasPartiallyWrappedPaths(_ context.Context, _ physical.Backend) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user