Merge remote-tracking branch 'remotes/from/ce/main'

This commit is contained in:
hc-github-team-secure-vault-core 2026-04-29 18:26:05 +00:00
commit 7fec40165a
6 changed files with 392 additions and 80 deletions

9
changelog/_14350.txt Normal file
View File

@ -0,0 +1,9 @@
```release-note:improvement
sdk: Expand support for docker test cluster options like seals, kms libraries, and entropy augmentation. DockerClusterNode.UpdateConfig now takes a full set of cluster options instead of just node config.
```
```release-note:improvement
core (Enterprise): Sanitized config now shows kms_library config.
```
```release-note:Bug
core (Enterprise): Fix parsing of kms_libary and entropy config in JSON format.
```

View File

@ -330,11 +330,13 @@ func testLoadConfigFile_json2(t *testing.T, entropy *configutil.Entropy) {
func testParseEntropy(t *testing.T, oss bool) {
tests := []struct {
name string
inConfig string
outErr error
outEntropy configutil.Entropy
}{
{
name: "good",
inConfig: `entropy "seal" {
mode = "augmentation"
}`,
@ -342,18 +344,21 @@ func testParseEntropy(t *testing.T, oss bool) {
outEntropy: configutil.Entropy{Mode: configutil.EntropyAugmentation},
},
{
name: "bad mode",
inConfig: `entropy "seal" {
mode = "a_mode_that_is_not_supported"
}`,
outErr: fmt.Errorf("the specified entropy mode %q is not supported", "a_mode_that_is_not_supported"),
},
{
name: "bad device",
inConfig: `entropy "device_that_is_not_supported" {
mode = "augmentation"
}`,
outErr: fmt.Errorf("only the %q type of external entropy is supported", "seal"),
},
{
name: "duplicate section",
inConfig: `entropy "seal" {
mode = "augmentation"
}
@ -362,6 +367,15 @@ func testParseEntropy(t *testing.T, oss bool) {
}`,
outErr: fmt.Errorf("only one %q block is permitted", "entropy"),
},
{
name: "json",
inConfig: `{
"entropy": {
"seal": {"mode": "augmentation"}
}`,
outErr: nil,
outEntropy: configutil.Entropy{Mode: configutil.EntropyAugmentation},
},
}
config := Config{
@ -369,25 +383,27 @@ func testParseEntropy(t *testing.T, oss bool) {
}
for _, test := range tests {
obj, _ := hcl.Parse(strings.TrimSpace(test.inConfig))
list, _ := obj.Node.(*ast.ObjectList)
objList := list.Filter("entropy")
err := configutil.ParseEntropy(config.SharedConfig, objList, "entropy")
// validate the error, both should be nil or have the same Error()
switch {
case oss:
if config.Entropy != nil {
t.Fatalf("parsing Entropy should not be possible in oss but got a non-nil config.Entropy: %#v", config.Entropy)
}
case err != nil && test.outErr != nil:
if err.Error() != test.outErr.Error() {
t.Run(test.name, func(t *testing.T) {
obj, _ := hcl.Parse(strings.TrimSpace(test.inConfig))
list, _ := obj.Node.(*ast.ObjectList)
objList := list.Filter("entropy")
err := configutil.ParseEntropy(config.SharedConfig, objList, "entropy")
// validate the error, both should be nil or have the same Error()
switch {
case oss:
if config.Entropy != nil {
t.Fatalf("parsing Entropy should not be possible in oss but got a non-nil config.Entropy: %#v", config.Entropy)
}
case err != nil && test.outErr != nil:
if err.Error() != test.outErr.Error() {
t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr)
}
case err != test.outErr:
t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr)
case err == nil && config.Entropy != nil && *config.Entropy != test.outEntropy:
t.Fatalf("entropy config mismatch: expected %#v got %#v", test.outEntropy, *config.Entropy)
}
case err != test.outErr:
t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr)
case err == nil && config.Entropy != nil && *config.Entropy != test.outEntropy:
t.Fatalf("entropy config mismatch: expected %#v got %#v", test.outEntropy, *config.Entropy)
}
})
}
}

View File

@ -118,6 +118,9 @@ func (dc *DockerCluster) GetRecoveryKeys() [][]byte {
}
func (dc *DockerCluster) GetBarrierOrRecoveryKeys() [][]byte {
if r := dc.GetRecoveryKeys(); len(r) > 0 {
return r
}
return dc.GetBarrierKeys()
}
@ -173,16 +176,23 @@ func (n *DockerClusterNode) Name() string {
return n.Cluster.ClusterName + "-" + n.NodeID
}
func (dc *DockerCluster) setupNode0(ctx context.Context) error {
func (dc *DockerCluster) setupNode0(ctx context.Context, hasSealConfig bool) error {
client := dc.ClusterNodes[0].client
var resp *api.InitResponse
var err error
req := &api.InitRequest{
SecretShares: 3,
SecretThreshold: 3,
}
if hasSealConfig {
req = &api.InitRequest{
RecoveryShares: 3,
RecoveryThreshold: 3,
}
}
for ctx.Err() == nil {
resp, err = client.Sys().Init(&api.InitRequest{
SecretShares: 3,
SecretThreshold: 3,
})
resp, err = client.Sys().Init(req)
if err == nil && resp != nil {
break
}
@ -215,7 +225,9 @@ func (dc *DockerCluster) setupNode0(ctx context.Context) error {
client.SetToken(dc.rootToken)
dc.ClusterNodes[0].client = client
err = testcluster.UnsealNode(ctx, dc, 0)
if !hasSealConfig {
err = testcluster.UnsealNode(ctx, dc, 0)
}
if err != nil {
return err
}
@ -424,7 +436,7 @@ func NewTestDockerClusterWithErr(t *testing.T, opts *DockerClusterOptions) (*Doc
opts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-")
}
if opts.Logger == nil {
opts.Logger = logging.NewVaultLogger(log.Trace).Named(t.Name())
opts.Logger = logging.NewVaultLogger(log.Trace).Named(opts.ClusterName)
}
if opts.NetworkName == "" {
opts.NetworkName = os.Getenv("TEST_DOCKER_NETWORK_NAME")
@ -467,6 +479,9 @@ func NewDockerCluster(ctx context.Context, opts *DockerClusterOptions) (*DockerC
storage: opts.Storage,
disableMlock: opts.DisableMlock,
disableTLS: opts.DisableTLS,
barrierKeys: opts.BarrierKeys,
recoveryKeys: opts.RecoveryKeys,
rootToken: opts.RootToken,
}
if err := dc.setupDockerCluster(ctx, opts); err != nil {
@ -622,7 +637,9 @@ func (n *DockerClusterNode) Cleanup() {
// Stop kills the container of the node
func (n *DockerClusterNode) Stop() {
n.Logger.Trace("stopping node")
n.cleanupContainer()
n.Logger.Trace("node stopped")
}
func (n *DockerClusterNode) cleanup() error {
@ -655,17 +672,7 @@ func (n *DockerClusterNode) createTLSDisabledListenerConfig() map[string]interfa
}}
}
func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOptions) error {
if n.DataVolumeName == "" {
vol, err := n.DockerAPI.VolumeCreate(ctx, docker.VolumeCreateOptions{})
if err != nil {
return err
}
n.DataVolumeName = vol.Volume.Name
n.cleanupVolume = func() {
_, _ = n.DockerAPI.VolumeRemove(ctx, vol.Volume.Name, docker.VolumeRemoveOptions{})
}
}
func (n *DockerClusterNode) writeConfig(opts *DockerClusterOptions) ([]string, error) {
vaultCfg := map[string]interface{}{}
var listenerConfig []map[string]interface{}
@ -698,7 +705,7 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
for _, suite := range config.TLSCipherSuites {
name, err := tlsutil.GetCipherName(suite)
if err != nil {
return fmt.Errorf("bad TLSCipherSuite %d on listener %d: %w", suite, i, err)
return nil, fmt.Errorf("bad TLSCipherSuite %d on listener %d: %w", suite, i, err)
}
suites = append(suites, name)
}
@ -707,7 +714,7 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
listenerConfig = append(listenerConfig, cfg)
portStr := fmt.Sprintf("%d/tcp", config.Port)
if strutil.StrListContains(ports, portStr) {
return fmt.Errorf("duplicate port %d specified", config.Port)
return nil, fmt.Errorf("duplicate port %d specified", config.Port)
}
ports = append(ports, portStr)
}
@ -735,12 +742,45 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
storageOpts = opts.Storage.Opts()
}
if opts != nil && opts.VaultNodeConfig != nil {
if opts.VaultNodeConfig != nil {
for k, v := range opts.VaultNodeConfig.StorageOptions {
if _, ok := storageOpts[k].(string); !ok {
storageOpts[k] = v
}
}
if len(opts.VaultNodeConfig.Seal) > 0 {
var seals []map[string]any
for _, seal := range opts.VaultNodeConfig.Seal {
seals = append(seals, map[string]any{
seal.Type: seal.Config,
})
}
vaultCfg["seal"] = seals
}
if len(opts.VaultNodeConfig.KMSLibrary) > 0 {
libs := []map[string][]map[string]any{}
for _, kmsl := range opts.VaultNodeConfig.KMSLibrary {
libs = append(libs, map[string][]map[string]any{
kmsl.Type: {
{
"name": kmsl.Name,
"library": kmsl.Library,
},
},
})
}
vaultCfg["kms_library"] = libs
}
if opts.VaultNodeConfig.Entropy != nil {
vaultCfg["entropy"] = []map[string]map[string]any{
{
"seal": map[string]any{
"seal_name": opts.VaultNodeConfig.Entropy.SealName,
"mode": "augmentation",
},
},
}
}
}
vaultCfg["storage"] = map[string]interface{}{
storageType: storageOpts,
@ -759,43 +799,66 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
vaultCfg["administrative_namespace_path"] = opts.AdministrativeNamespacePath
systemJSON, err := json.Marshal(vaultCfg)
if err != nil {
return err
}
err = os.WriteFile(filepath.Join(n.WorkDir, "system.json"), systemJSON, 0o644)
if err != nil {
return err
}
if opts.VaultNodeConfig != nil {
localCfg := *opts.VaultNodeConfig
if opts.VaultNodeConfig.LicensePath != "" {
b, err := os.ReadFile(opts.VaultNodeConfig.LicensePath)
if err != nil || len(b) == 0 {
return fmt.Errorf("unable to read LicensePath at %q: %w", opts.VaultNodeConfig.LicensePath, err)
return nil, fmt.Errorf("unable to read LicensePath at %q: %w", opts.VaultNodeConfig.LicensePath, err)
}
localCfg.LicensePath = "/vault/config/license"
dest := filepath.Join(n.WorkDir, "license")
err = os.WriteFile(dest, b, 0o644)
if err != nil {
return fmt.Errorf("error writing license to %q: %w", dest, err)
return nil, fmt.Errorf("error writing license to %q: %w", dest, err)
}
}
localJSON, err := json.Marshal(localCfg)
if err != nil {
return nil, err
}
var conf map[string]interface{}
if err := json.Unmarshal(localJSON, &conf); err != nil {
return nil, err
}
for k, v := range conf {
vaultCfg[k] = v
}
}
}
userJSON, err := json.Marshal(localCfg)
configJSON, err := json.Marshal(vaultCfg)
if err != nil {
return nil, err
}
err = os.WriteFile(filepath.Join(n.WorkDir, "config.json"), configJSON, 0o644)
if err != nil {
return nil, err
}
n.Logger.Trace("node config", "config.json", string(configJSON))
return ports, nil
}
func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOptions) error {
if n.DataVolumeName == "" {
vol, err := n.DockerAPI.VolumeCreate(ctx, docker.VolumeCreateOptions{})
if err != nil {
return err
}
err = os.WriteFile(filepath.Join(n.WorkDir, "user.json"), userJSON, 0o644)
if err != nil {
return err
n.DataVolumeName = vol.Volume.Name
n.Logger.Trace("created volume", "name", n.DataVolumeName)
n.cleanupVolume = func() {
n.Logger.Trace("cleanup volume", "name", n.DataVolumeName)
_, _ = n.DockerAPI.VolumeRemove(ctx, vol.Volume.Name, docker.VolumeRemoveOptions{})
}
}
ports, err := n.writeConfig(opts)
if err != nil {
return err
}
if !opts.DisableTLS {
// Create a temporary cert so vault will start up
err = n.setupCert("127.0.0.1")
err := n.setupCert("127.0.0.1")
if err != nil {
return err
}
@ -851,10 +914,7 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
if opts.DisableTLS {
postStartFunc = func(containerID string, realIP string) error {
// If we signal Vault before it installs its sighup handler, it'll die.
wg.Wait()
n.Logger.Trace("running poststart", "containerID", containerID, "IP", realIP)
return n.runner.RefreshFiles(ctx, containerID)
return nil
}
}
@ -904,6 +964,11 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
return err
}
}
protocol := "https"
if opts.DisableTLS {
protocol = "http"
}
svc, _, err := r.StartNewService(ctx, false, false, func(ctx context.Context, host string, port int) (dockhelper.ServiceConfig, error) {
config, err := n.apiConfig()
if err != nil {
@ -1033,21 +1098,14 @@ func (n *DockerClusterNode) Signal(ctx context.Context, signal string) error {
return err
}
func (n *DockerClusterNode) UpdateConfig(ctx context.Context, config *testcluster.VaultNodeConfig) error {
// Marshal the config to JSON
configJSON, err := json.Marshal(config)
func (n *DockerClusterNode) UpdateConfig(ctx context.Context, opts *DockerClusterOptions) error {
_, err := n.writeConfig(opts)
if err != nil {
return fmt.Errorf("failed to marshal config: %w", err)
}
// Write the config to the work directory
configPath := filepath.Join(n.WorkDir, "user.json")
if err := os.WriteFile(configPath, configJSON, 0o644); err != nil {
return fmt.Errorf("failed to write config file: %w", err)
return err
}
// Copy the updated config to the container
if err := dockhelper.CopyToContainer(ctx, n.DockerAPI, n.Container.ID, configPath, "/vault/config/user.json"); err != nil {
if err := dockhelper.CopyToContainer(ctx, n.DockerAPI, n.Container.ID, n.WorkDir, "/vault/config"); err != nil {
return fmt.Errorf("failed to copy config to container: %w", err)
}
@ -1266,16 +1324,33 @@ func (dc *DockerCluster) setupDockerCluster(ctx context.Context, opts *DockerClu
continue
}
if i == 0 {
if err := dc.setupNode0(ctx); err != nil {
hasSealConfig := opts.VaultNodeConfig != nil && len(opts.VaultNodeConfig.Seal) > 0
if err := dc.setupNode0(ctx, hasSealConfig); err != nil {
return err
}
} else {
if err := dc.joinNode(ctx, i, 0); err != nil {
if err := dc.joinNode(ctx, i, 0, false); err != nil {
return err
}
}
}
if opts.SkipInit && !opts.SkipUnsealWaitActiveNode {
if len(opts.VaultNodeConfig.Seal) == 0 {
if err := testcluster.UnsealAllNodes(ctx, dc); err != nil {
return err
}
}
if _, err := testcluster.WaitForActiveNode(ctx, dc); err != nil {
return err
}
status, err := dc.ClusterNodes[0].APIClient().Sys().SealStatusWithContext(ctx)
if err != nil {
return err
}
dc.ID = status.ClusterID
}
return nil
}
@ -1288,7 +1363,7 @@ func (dc *DockerCluster) AddNode(ctx context.Context, opts *DockerClusterOptions
return err
}
return dc.joinNode(ctx, len(dc.ClusterNodes)-1, leaderIdx)
return dc.joinNode(ctx, len(dc.ClusterNodes)-1, leaderIdx, len(opts.VaultNodeConfig.Seal) > 0)
}
const MaxContainerNameLen = 63
@ -1378,7 +1453,7 @@ func copyDirContents(to string, from string) error {
return nil
}
func (dc *DockerCluster) joinNode(ctx context.Context, nodeIdx int, leaderIdx int) error {
func (dc *DockerCluster) joinNode(ctx context.Context, nodeIdx int, leaderIdx int, autoseal bool) error {
if dc.storage != nil && dc.storage.Type() != "raft" {
// Storage is not raft so nothing to do but unseal.
return testcluster.UnsealNode(ctx, dc, nodeIdx)
@ -1409,6 +1484,9 @@ func (dc *DockerCluster) joinNode(ctx context.Context, nodeIdx int, leaderIdx in
return fmt.Errorf("failed to join cluster: %w", err)
}
if autoseal {
return nil
}
return testcluster.UnsealNode(ctx, dc, nodeIdx)
}

View File

@ -45,11 +45,7 @@ type VaultNodeConfig struct {
// DisableMlock bool `hcl:"disable_mlock"`
// Not configurable yet:
// Listeners []*Listener `hcl:"-"`
// Seals []*KMS `hcl:"-"`
// Entropy *Entropy `hcl:"-"`
// Telemetry *Telemetry `hcl:"telemetry"`
// HCPLinkConf *HCPLinkConfig `hcl:"cloud"`
// PidFile string `hcl:"pid_file"`
// ServiceRegistrationType string
// ServiceRegistrationOptions map[string]string
@ -58,6 +54,9 @@ type VaultNodeConfig struct {
AdditionalListeners []VaultNodeListenerConfig `json:"-"`
CustomListenerConfigOpts map[string]interface{} `json:"-"`
AdditionalTCPPorts []int `json:"-"`
Seal []VaultNodeSealConfig `json:"-"`
KMSLibrary []VaultNodeKMSLibrary `json:"-"`
Entropy *VaultNodeEntropy `json:"-"`
DefaultMaxRequestDuration time.Duration `json:"default_max_request_duration"`
LogFormat string `json:"log_format"`
@ -84,6 +83,7 @@ type VaultNodeConfig struct {
LicensePath string `json:"license_path"`
FeatureFlags []string `json:"feature_flags,omitempty"`
EnableUnauthenticatedAccess []string `json:"enable_unauthenticated_access,omitempty"`
EnableMultiSeal bool `json:"enable_multiseal"`
}
type ClusterNode struct {
@ -100,6 +100,10 @@ type ClusterOptions struct {
ClusterName string
KeepStandbysSealed bool
SkipInit bool
SkipUnsealWaitActiveNode bool
BarrierKeys [][]byte
RecoveryKeys [][]byte
RootToken string
CACert []byte
NumCores int
TmpDir string
@ -118,6 +122,21 @@ type VaultNodeListenerConfig struct {
TLSCipherSuites []uint16
}
type VaultNodeSealConfig struct {
Type string
Config map[string]string
}
type VaultNodeKMSLibrary struct {
Type string
Name string
Library string
}
type VaultNodeEntropy struct {
SealName string
}
type CA struct {
CACert *x509.Certificate
CACertBytes []byte

View File

@ -4,17 +4,25 @@
package seal_binary
import (
"bufio"
"context"
"fmt"
"io"
"net/url"
"os"
"path"
"strconv"
"strings"
"sync"
"testing"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/api"
dockhelper "github.com/hashicorp/vault/sdk/helper/docker"
"github.com/hashicorp/vault/sdk/helper/testcluster"
"github.com/hashicorp/vault/sdk/helper/testcluster/docker"
client "github.com/moby/moby/client"
"github.com/stretchr/testify/require"
)
const (
@ -346,3 +354,185 @@ func copyRecoveryModeTriggerToContainer(containerID string, runner *dockhelper.R
}
return nil
}
func dockerOptions(t *testing.T, repo, tag string) *docker.DockerClusterOptions {
opts := docker.DefaultOptions(t)
opts.NumCores = 1
opts.VaultBinary = os.Getenv("VAULT_BINARY")
opts.ImageRepo, opts.ImageTag = repo, tag
// Probably not reliable in CI with multi-node clusters, but we're assuming callers
// of this func won't change NumCores to be >1.
opts.VaultNodeConfig.StorageOptions = map[string]string{
"performance_multiplier": "1",
}
return opts
}
type transitCluster struct {
cluster *docker.DockerCluster
t *testing.T
}
func newTransitCluster(t *testing.T) *transitCluster {
opts := dockerOptions(t, "hashicorp/vault", "latest")
opts.DisableTLS = true // simplify, this way we don't have to deal with ca
opts.ClusterName = strings.ReplaceAll(t.Name()+"-transit", "/", "-")
return &transitCluster{t: t, cluster: docker.NewTestDockerCluster(t, opts)}
}
func (tc *transitCluster) SealWithPriorityAndDisabled(name string, idx int, disabled bool, priority int) testcluster.VaultNodeSealConfig {
seal := tc.Seal(name, idx)
seal.Config["disabled"] = strconv.FormatBool(disabled)
seal.Config["priority"] = strconv.Itoa(priority)
return seal
}
// Seal creates a seal using the given mount name and an idx that identifies a key.
// The mount and key will be created.
func (tc *transitCluster) Seal(name string, idx int) testcluster.VaultNodeSealConfig {
client := tc.cluster.Nodes()[0].APIClient()
if m, _ := client.Sys().GetMount(name); m == nil {
require.NoError(tc.t, client.Sys().Mount(name, &api.MountInput{
Type: "transit",
}))
}
keyName := fmt.Sprintf("transit-seal-%d", idx+1)
_, err := client.Logical().Write(path.Join(name, "keys", keyName), nil)
require.NoError(tc.t, err)
return testcluster.VaultNodeSealConfig{
Type: "transit",
Config: map[string]string{
// For another docker container to talk to this cluster they
// must use the real api address, not the remapped localhost
// address test code uses.
"address": tc.cluster.Nodes()[0].(*docker.DockerClusterNode).RealAPIAddr,
"token": tc.cluster.GetRootToken(),
"mount_path": name,
"key_name": keyName,
"name": strings.ReplaceAll(name, " ", "_") + "-" + keyName,
"priority": "1",
},
}
}
type logScanner struct {
wg sync.WaitGroup
l sync.Mutex
ch chan string
pw *io.PipeWriter
stop chan struct{}
}
func newLogScanner(t *testing.T, underlying io.Writer, bufLines int) (*logScanner, io.Writer) {
pr, pw := io.Pipe()
ls := &logScanner{
ch: make(chan string, bufLines),
pw: pw,
stop: make(chan struct{}),
}
ls.wg.Add(1)
go func() {
defer ls.wg.Done()
// bufio.Scanner is perfect here because hclog writes each log entry
// ending with a newline character.
scanner := bufio.NewScanner(pr)
// scanner.Scan() will block until a new line is written to the pipe,
// and it will exit automatically when pw.Close() is called.
for scanner.Scan() {
logLine := scanner.Text()
underlying.Write([]byte(logLine + "\n"))
select {
case <-ls.stop:
return
case ls.ch <- logLine:
}
}
if err := scanner.Err(); err != nil {
t.Fatalf("Scanner error: %v", err)
}
}()
t.Cleanup(func() {
if err := ls.Close(); err != nil {
t.Logf("Error closing scanner: %v", err)
}
})
return ls, pw
}
func (ls *logScanner) Lines() <-chan string {
return ls.ch
}
func (ls *logScanner) Close() error {
ls.l.Lock()
defer ls.l.Unlock()
close(ls.stop)
err := ls.pw.Close()
ls.wg.Wait()
return err
}
type logMatcher struct {
targets map[string]bool
lines <-chan string
done chan struct{}
l sync.RWMutex
}
func newLogMatcher(lines <-chan string, targets []string) *logMatcher {
tmap := make(map[string]bool)
for _, target := range targets {
tmap[target] = false
}
lm := &logMatcher{
targets: tmap,
lines: lines,
done: make(chan struct{}),
}
go func() {
for {
select {
case <-lm.done:
return
case line := <-lines:
for target, ok := range tmap {
if !ok && strings.Contains(line, target) {
lm.l.Lock()
tmap[target] = true
lm.l.Unlock()
}
}
}
}
}()
return lm
}
func (lm *logMatcher) stop() {
close(lm.done)
}
func (lm *logMatcher) missing() []string {
lm.l.RLock()
defer lm.l.RUnlock()
var ret []string
for target, found := range lm.targets {
if !found {
ret = append(ret, target)
}
}
return ret
}

View File

@ -115,7 +115,7 @@ func TestSysRekey_ConfigReload(t *testing.T) {
nodeConfig.EnableUnauthenticatedAccess = []string{"rekey"}
// Update the config and copy it to the container
err := node.UpdateConfig(t.Context(), nodeConfig)
err := node.UpdateConfig(t.Context(), opts)
require.NoError(t, err, "failed to update config")
// Send SIGHUP to reload the configuration
@ -154,7 +154,7 @@ func TestSysRekey_ConfigReload(t *testing.T) {
nodeConfig.EnableUnauthenticatedAccess = nil
// Update the config and copy it to the container
err := node.UpdateConfig(t.Context(), nodeConfig)
err := node.UpdateConfig(t.Context(), opts)
require.NoError(t, err, "failed to update config")
// Send SIGHUP to reload the configuration