From a342dcbb29d65fef10eaa18af99d0fe1b4325b3f Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 20 Jun 2019 20:55:10 -0400 Subject: [PATCH] Sync --- command/base_predict_test.go | 1 + physical/raft/raft.go | 21 +++++++++++++++++++++ vault/core.go | 24 +++++++++++++----------- vault/ha.go | 7 ++++++- vault/mount.go | 4 ++-- vault/policy_store.go | 2 +- vault/raft.go | 8 ++++---- 7 files changed, 48 insertions(+), 19 deletions(-) diff --git a/command/base_predict_test.go b/command/base_predict_test.go index 9353768a7b..f677ad4aed 100644 --- a/command/base_predict_test.go +++ b/command/base_predict_test.go @@ -340,6 +340,7 @@ func TestPredict_Plugins(t *testing.T) { "hana-database-plugin", "influxdb-database-plugin", "jwt", + "kmip", "kubernetes", "kv", "ldap", diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 6a58a25ed6..b47d3b5245 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -832,6 +832,27 @@ func (l *RaftLock) monitorLeadership(stopCh <-chan struct{}) <-chan struct{} { // Lock blocks until we become leader or are shutdown. It returns a channel that // is closed when we detect a loss of leadership. func (l *RaftLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Check to see if we are already leader. + l.b.l.RLock() + if l.b.raft.State() == raft.Leader { + err := l.b.applyLog(context.Background(), &LogData{ + Operations: []*LogOperation{ + &LogOperation{ + OpType: putOp, + Key: l.key, + Value: l.value, + }, + }, + }) + l.b.l.RUnlock() + if err != nil { + return nil, err + } + + return l.monitorLeadership(stopCh), nil + } + l.b.l.RUnlock() + for { select { case isLeader := <-l.b.raftNotifyCh: diff --git a/vault/core.go b/vault/core.go index 2c402f0df5..fe594b0609 100644 --- a/vault/core.go +++ b/vault/core.go @@ -1420,10 +1420,10 @@ func (c *Core) UIHeaders() (http.Header, error) { // sealInternal is an internal method used to seal the vault. It does not do // any authorization checking. func (c *Core) sealInternal() error { - return c.sealInternalWithOptions(true, false) + return c.sealInternalWithOptions(true, false, true) } -func (c *Core) sealInternalWithOptions(grabStateLock, keepHALock bool) error { +func (c *Core) sealInternalWithOptions(grabStateLock, keepHALock, shutdownRaft bool) error { // Mark sealed, and if already marked return if swapped := atomic.CompareAndSwapUint32(c.sealed, 0, 1); !swapped { return nil @@ -1503,15 +1503,17 @@ func (c *Core) sealInternalWithOptions(grabStateLock, keepHALock bool) error { } // If the storage backend needs to be sealed - if raftStorage, ok := c.underlyingPhysical.(*raft.RaftBackend); ok { - if err := raftStorage.TeardownCluster(c.clusterListener); err != nil { - c.logger.Error("error stopping storage cluster", "error", err) - return err + if shutdownRaft { + if raftStorage, ok := c.underlyingPhysical.(*raft.RaftBackend); ok { + if err := raftStorage.TeardownCluster(c.clusterListener); err != nil { + c.logger.Error("error stopping storage cluster", "error", err) + return err + } } - } - // Stop the cluster listener - c.stopClusterListener() + // Stop the cluster listener + c.stopClusterListener() + } c.logger.Debug("sealing barrier") if err := c.barrier.Seal(); err != nil { @@ -1702,14 +1704,14 @@ func (c *Core) preSeal() error { c.stopPeriodicRaftTLSRotate() + c.stopForwarding() + c.clusterParamsLock.Lock() if err := stopReplication(c); err != nil { result = multierror.Append(result, errwrap.Wrapf("error stopping replication: {{err}}", err)) } c.clusterParamsLock.Unlock() - c.stopForwarding() - if err := c.teardownAudits(); err != nil { result = multierror.Append(result, errwrap.Wrapf("error tearing down audits: {{err}}", err)) } diff --git a/vault/ha.go b/vault/ha.go index d3b94cd3c5..73d0d94c1f 100644 --- a/vault/ha.go +++ b/vault/ha.go @@ -15,6 +15,7 @@ import ( multierror "github.com/hashicorp/go-multierror" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" @@ -678,6 +679,7 @@ func (c *Core) periodicLeaderRefresh(newLeaderCh chan func(), stopCh chan struct // periodicCheckKeyUpgrade is used to watch for key rotation events as a standby func (c *Core) periodicCheckKeyUpgrades(ctx context.Context, stopCh chan struct{}) { opCount := new(int32) + _, isRaft := c.underlyingPhysical.(*raft.RaftBackend) for { select { case <-time.After(keyRotateCheckInterval): @@ -706,7 +708,10 @@ func (c *Core) periodicCheckKeyUpgrades(ctx context.Context, stopCh chan struct{ entry, _ := c.barrier.Get(ctx, poisonPillPath) if entry != nil && len(entry.Value) > 0 { c.logger.Warn("encryption keys have changed out from underneath us (possibly due to replication enabling), must be unsealed again") - go c.Shutdown() + // If we are using raft storage we do not want to shut down + // raft during replication secondary enablement. This will + // allow us to keep making progress on the raft log. + go c.sealInternalWithOptions(true, false, !isRaft) atomic.AddInt32(lopCount, -1) return } diff --git a/vault/mount.go b/vault/mount.go index f27d1a2330..052c4da892 100644 --- a/vault/mount.go +++ b/vault/mount.go @@ -483,8 +483,6 @@ func (c *Core) mountInternal(ctx context.Context, entry *MountEntry, updateStora addPathCheckers(c, entry, backend, viewPath) - addLicenseCallback(c, backend) - c.setCoreBackend(entry, backend, view) // If the mount is filtered or we are on a DR secondary we don't want to @@ -1218,6 +1216,8 @@ func (c *Core) newLogicalBackend(ctx context.Context, entry *MountEntry, sysView if b == nil { return nil, fmt.Errorf("nil backend of type %q returned from factory", t) } + addLicenseCallback(c, b) + return b, nil } diff --git a/vault/policy_store.go b/vault/policy_store.go index 8fd2c49ea9..529218bc16 100644 --- a/vault/policy_store.go +++ b/vault/policy_store.go @@ -261,7 +261,7 @@ func (c *Core) setupPolicyStore(ctx context.Context) error { return err } - if c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) { + if c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary | consts.ReplicationDRSecondary) { // Policies will sync from the primary return nil } diff --git a/vault/raft.go b/vault/raft.go index 719b2a0021..8c15cafc80 100644 --- a/vault/raft.go +++ b/vault/raft.go @@ -402,7 +402,7 @@ func (c *Core) raftSnapshotRestoreCallback(grabLock bool) func() error { // Seal ourselves c.logger.Info("failed to perform key upgrades, sealing", "error", err) - c.sealInternalWithOptions(false, false) + c.sealInternalWithOptions(false, false, true) return err default: // If we are using an auto-unseal we can try to use the seal to @@ -412,17 +412,17 @@ func (c *Core) raftSnapshotRestoreCallback(grabLock bool) func() error { keys, err := c.seal.GetStoredKeys(ctx) if err != nil { c.logger.Error("raft snapshot restore failed to get stored keys", "error", err) - c.sealInternalWithOptions(false, false) + c.sealInternalWithOptions(false, false, true) return err } if err := c.barrier.Seal(); err != nil { c.logger.Error("raft snapshot restore failed to seal barrier", "error", err) - c.sealInternalWithOptions(false, false) + c.sealInternalWithOptions(false, false, true) return err } if err := c.barrier.Unseal(ctx, keys[0]); err != nil { c.logger.Error("raft snapshot restore failed to unseal barrier", "error", err) - c.sealInternalWithOptions(false, false) + c.sealInternalWithOptions(false, false, true) return err } c.logger.Info("done reloading master key using auto seal")