mirror of
https://github.com/hashicorp/vault.git
synced 2025-08-23 07:31:09 +02:00
* Adding explicit MPL license for sub-package. This directory and its subdirectories (packages) contain files licensed with the MPLv2 `LICENSE` file in this directory and are intentionally licensed separately from the BSL `LICENSE` file at the root of this repository. * Adding explicit MPL license for sub-package. This directory and its subdirectories (packages) contain files licensed with the MPLv2 `LICENSE` file in this directory and are intentionally licensed separately from the BSL `LICENSE` file at the root of this repository. * Updating the license from MPL to Business Source License. Going forward, this project will be licensed under the Business Source License v1.1. Please see our blog post for more details at https://hashi.co/bsl-blog, FAQ at www.hashicorp.com/licensing-faq, and details of the license at www.hashicorp.com/bsl. * add missing license headers * Update copyright file headers to BUS-1.1 * Fix test that expected exact offset on hcl file --------- Co-authored-by: hashicorp-copywrite[bot] <110428419+hashicorp-copywrite[bot]@users.noreply.github.com> Co-authored-by: Sarah Thompson <sthompson@hashicorp.com> Co-authored-by: Brian Kassouf <bkassouf@hashicorp.com>
239 lines
5.4 KiB
Go
239 lines
5.4 KiB
Go
// Copyright (c) HashiCorp, Inc.
|
|
// SPDX-License-Identifier: BUSL-1.1
|
|
|
|
package raft
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"fmt"
|
|
"os"
|
|
"testing"
|
|
|
|
"github.com/golang/protobuf/proto"
|
|
"github.com/hashicorp/go-raftchunking"
|
|
raftchunkingtypes "github.com/hashicorp/go-raftchunking/types"
|
|
"github.com/hashicorp/go-uuid"
|
|
"github.com/hashicorp/raft"
|
|
"github.com/hashicorp/raft-boltdb/v2"
|
|
"github.com/hashicorp/vault/sdk/physical"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
// This chunks encoded data and then performing out-of-order applies of half
|
|
// the logs. It then snapshots, restores to a new FSM, and applies the rest.
|
|
// The goal is to verify that chunking snapshotting works as expected.
|
|
func TestRaft_Chunking_Lifecycle(t *testing.T) {
|
|
t.Parallel()
|
|
require := require.New(t)
|
|
assert := assert.New(t)
|
|
|
|
b, dir := GetRaft(t, true, false)
|
|
defer os.RemoveAll(dir)
|
|
|
|
t.Log("applying configuration")
|
|
|
|
b.applyConfigSettings(raft.DefaultConfig())
|
|
|
|
t.Log("chunking")
|
|
|
|
buf := []byte("let's see how this goes, shall we?")
|
|
logData := &LogData{
|
|
Operations: []*LogOperation{
|
|
{
|
|
OpType: putOp,
|
|
Key: "foobar",
|
|
Value: buf,
|
|
},
|
|
},
|
|
}
|
|
cmdBytes, err := proto.Marshal(logData)
|
|
require.NoError(err)
|
|
|
|
var logs []*raft.Log
|
|
for i, b := range cmdBytes {
|
|
// Stage multiple operations so we can test restoring across multiple opnums
|
|
for j := 0; j < 10; j++ {
|
|
chunkInfo := &raftchunkingtypes.ChunkInfo{
|
|
OpNum: uint64(32 + j),
|
|
SequenceNum: uint32(i),
|
|
NumChunks: uint32(len(cmdBytes)),
|
|
}
|
|
chunkBytes, err := proto.Marshal(chunkInfo)
|
|
require.NoError(err)
|
|
|
|
logs = append(logs, &raft.Log{
|
|
Data: []byte{b},
|
|
Extensions: chunkBytes,
|
|
})
|
|
}
|
|
}
|
|
|
|
t.Log("applying half of the logs")
|
|
|
|
// The reason for the skipping is to test out-of-order applies which are
|
|
// theoretically possible. Some of these will actually finish though!
|
|
for i := 0; i < len(logs); i += 2 {
|
|
resp := b.fsm.chunker.Apply(logs[i])
|
|
if resp != nil {
|
|
_, ok := resp.(raftchunking.ChunkingSuccess)
|
|
assert.True(ok)
|
|
}
|
|
}
|
|
|
|
t.Log("tearing down cluster")
|
|
require.NoError(b.TeardownCluster(nil))
|
|
require.NoError(b.fsm.getDB().Close())
|
|
require.NoError(b.stableStore.(*raftboltdb.BoltStore).Close())
|
|
|
|
t.Log("starting new backend")
|
|
backendRaw, err := NewRaftBackend(b.conf, b.logger)
|
|
require.NoError(err)
|
|
b = backendRaw.(*RaftBackend)
|
|
|
|
t.Log("applying rest of the logs")
|
|
|
|
// Apply the rest of the logs
|
|
var resp interface{}
|
|
for i := 1; i < len(logs); i += 2 {
|
|
resp = b.fsm.chunker.Apply(logs[i])
|
|
if resp != nil {
|
|
_, ok := resp.(raftchunking.ChunkingSuccess)
|
|
assert.True(ok)
|
|
}
|
|
}
|
|
|
|
assert.NotNil(resp)
|
|
_, ok := resp.(raftchunking.ChunkingSuccess)
|
|
assert.True(ok)
|
|
}
|
|
|
|
func TestFSM_Chunking_TermChange(t *testing.T) {
|
|
t.Parallel()
|
|
require := require.New(t)
|
|
assert := assert.New(t)
|
|
|
|
b, dir := GetRaft(t, true, false)
|
|
defer os.RemoveAll(dir)
|
|
|
|
t.Log("applying configuration")
|
|
|
|
b.applyConfigSettings(raft.DefaultConfig())
|
|
|
|
t.Log("chunking")
|
|
|
|
buf := []byte("let's see how this goes, shall we?")
|
|
logData := &LogData{
|
|
Operations: []*LogOperation{
|
|
{
|
|
OpType: putOp,
|
|
Key: "foobar",
|
|
Value: buf,
|
|
},
|
|
},
|
|
}
|
|
cmdBytes, err := proto.Marshal(logData)
|
|
require.NoError(err)
|
|
|
|
// Only need two chunks to test this
|
|
chunks := [][]byte{
|
|
cmdBytes[0:2],
|
|
cmdBytes[2:],
|
|
}
|
|
var logs []*raft.Log
|
|
for i, b := range chunks {
|
|
chunkInfo := &raftchunkingtypes.ChunkInfo{
|
|
OpNum: uint64(32),
|
|
SequenceNum: uint32(i),
|
|
NumChunks: uint32(len(chunks)),
|
|
}
|
|
chunkBytes, err := proto.Marshal(chunkInfo)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
logs = append(logs, &raft.Log{
|
|
Term: uint64(i),
|
|
Data: b,
|
|
Extensions: chunkBytes,
|
|
})
|
|
}
|
|
|
|
// We should see nil for both
|
|
for _, log := range logs {
|
|
resp := b.fsm.chunker.Apply(log)
|
|
assert.Nil(resp)
|
|
}
|
|
|
|
// Now verify the other baseline, that when the term doesn't change we see
|
|
// non-nil. First make the chunker have a clean state, then set the terms
|
|
// to be the same.
|
|
b.fsm.chunker.RestoreState(nil)
|
|
logs[1].Term = uint64(0)
|
|
|
|
// We should see nil only for the first one
|
|
for i, log := range logs {
|
|
resp := b.fsm.chunker.Apply(log)
|
|
if i == 0 {
|
|
assert.Nil(resp)
|
|
}
|
|
if i == 1 {
|
|
assert.NotNil(resp)
|
|
_, ok := resp.(raftchunking.ChunkingSuccess)
|
|
assert.True(ok)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestRaft_Chunking_AppliedIndex(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
raft, dir := GetRaft(t, true, false)
|
|
defer os.RemoveAll(dir)
|
|
|
|
// Lower the size for tests
|
|
raftchunking.ChunkSize = 1024
|
|
val, err := uuid.GenerateRandomBytes(3 * raftchunking.ChunkSize)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// Write a value to fastforward the index
|
|
err = raft.Put(context.Background(), &physical.Entry{
|
|
Key: "key",
|
|
Value: []byte("test"),
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
currentIndex := raft.AppliedIndex()
|
|
// Write some data
|
|
for i := 0; i < 10; i++ {
|
|
err := raft.Put(context.Background(), &physical.Entry{
|
|
Key: fmt.Sprintf("key-%d", i),
|
|
Value: val,
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
newIndex := raft.AppliedIndex()
|
|
|
|
// Each put should generate 4 chunks
|
|
if newIndex-currentIndex != 10*4 {
|
|
t.Fatalf("Did not apply chunks as expected, applied index = %d - %d = %d", newIndex, currentIndex, newIndex-currentIndex)
|
|
}
|
|
|
|
for i := 0; i < 10; i++ {
|
|
entry, err := raft.Get(context.Background(), fmt.Sprintf("key-%d", i))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !bytes.Equal(entry.Value, val) {
|
|
t.Fatal("value is corrupt")
|
|
}
|
|
}
|
|
}
|