Backport some OSS changes (#10267)

* Backport some OSS changes

* go mod vendor
This commit is contained in:
Brian Kassouf 2020-10-29 16:47:34 -07:00 committed by GitHub
parent ece971b648
commit 3bc7d15e6b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 5350 additions and 64 deletions

View File

@ -362,6 +362,7 @@ func TestPredict_Plugins(t *testing.T) {
"influxdb-database-plugin", "influxdb-database-plugin",
"jwt", "jwt",
"kerberos", "kerberos",
"keymgmt",
"kmip", "kmip",
"kubernetes", "kubernetes",
"kv", "kv",
@ -409,6 +410,14 @@ func TestPredict_Plugins(t *testing.T) {
act := p.plugins() act := p.plugins()
if !strutil.StrListContains(act, "keymgmt") {
for i, v := range tc.exp {
if v == "keymgmt" {
tc.exp = append(tc.exp[:i], tc.exp[i+1:]...)
break
}
}
}
if !strutil.StrListContains(act, "kmip") { if !strutil.StrListContains(act, "kmip") {
for i, v := range tc.exp { for i, v := range tc.exp {
if v == "kmip" { if v == "kmip" {

View File

@ -270,6 +270,8 @@ func (c *Config) Merge(c2 *Config) *Config {
} }
} }
result.entConfig = c.entConfig.Merge(c2.entConfig)
return result return result
} }
@ -775,5 +777,10 @@ func (c *Config) Sanitized() map[string]interface{} {
result["service_registration"] = sanitizedServiceRegistration result["service_registration"] = sanitizedServiceRegistration
} }
entConfigResult := c.entConfig.Sanitized()
for k, v := range entConfigResult {
result[k] = v
}
return result return result
} }

View File

@ -142,6 +142,8 @@ func testLoadConfigFile_topLevel(t *testing.T, entropy *configutil.Entropy) {
APIAddr: "top_level_api_addr", APIAddr: "top_level_api_addr",
ClusterAddr: "top_level_cluster_addr", ClusterAddr: "top_level_cluster_addr",
} }
addExpectedEntConfig(expected, []string{})
if entropy != nil { if entropy != nil {
expected.Entropy = entropy expected.Entropy = entropy
} }
@ -226,6 +228,8 @@ func testLoadConfigFile_json2(t *testing.T, entropy *configutil.Entropy) {
DisableSealWrap: true, DisableSealWrap: true,
DisableSealWrapRaw: true, DisableSealWrapRaw: true,
} }
addExpectedEntConfig(expected, []string{"http"})
if entropy != nil { if entropy != nil {
expected.Entropy = entropy expected.Entropy = entropy
} }
@ -429,6 +433,9 @@ func testLoadConfigFile(t *testing.T) {
DefaultLeaseTTL: 10 * time.Hour, DefaultLeaseTTL: 10 * time.Hour,
DefaultLeaseTTLRaw: "10h", DefaultLeaseTTLRaw: "10h",
} }
addExpectedEntConfig(expected, []string{})
config.Listeners[0].RawConfig = nil config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil { if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff) t.Fatal(diff)
@ -506,6 +513,9 @@ func testLoadConfigFile_json(t *testing.T) {
DisableSealWrap: true, DisableSealWrap: true,
DisableSealWrapRaw: true, DisableSealWrapRaw: true,
} }
addExpectedEntConfig(expected, []string{})
config.Listeners[0].RawConfig = nil config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil { if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff) t.Fatal(diff)
@ -564,6 +574,9 @@ func testLoadConfigDir(t *testing.T) {
MaxLeaseTTL: 10 * time.Hour, MaxLeaseTTL: 10 * time.Hour,
DefaultLeaseTTL: 10 * time.Hour, DefaultLeaseTTL: 10 * time.Hour,
} }
addExpectedEntConfig(expected, []string{"http"})
config.Listeners[0].RawConfig = nil config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil { if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff) t.Fatal(diff)
@ -654,9 +667,12 @@ func testConfig_Sanitized(t *testing.T) {
"stackdriver_project_id": "", "stackdriver_project_id": "",
"stackdriver_debug_logs": false, "stackdriver_debug_logs": false,
"statsd_address": "bar", "statsd_address": "bar",
"statsite_address": ""}, "statsite_address": "",
},
} }
addExpectedEntSanitizedConfig(expected, []string{"http"})
config.Listeners[0].RawConfig = nil config.Listeners[0].RawConfig = nil
if diff := deep.Equal(sanitizedConfig, expected); len(diff) > 0 { if diff := deep.Equal(sanitizedConfig, expected); len(diff) > 0 {
t.Fatalf("bad, diff: %#v", diff) t.Fatalf("bad, diff: %#v", diff)

View File

@ -0,0 +1,6 @@
// +build !enterprise
package server
func addExpectedEntConfig(c *Config, sentinelModules []string) {}
func addExpectedEntSanitizedConfig(c map[string]interface{}, sentinelModules []string) {}

View File

@ -12,3 +12,12 @@ type entConfig struct {
func (ec *entConfig) parseConfig(list *ast.ObjectList) error { func (ec *entConfig) parseConfig(list *ast.ObjectList) error {
return nil return nil
} }
func (ec entConfig) Merge(ec2 entConfig) entConfig {
result := entConfig{}
return result
}
func (ec entConfig) Sanitized() map[string]interface{} {
return nil
}

View File

@ -5,6 +5,9 @@ telemetry {
usage_gauge_period = "5m" usage_gauge_period = "5m"
maximum_gauge_cardinality = 100 maximum_gauge_cardinality = 100
} }
sentinel {
additional_enabled_modules = ["http"]
}
ui=true ui=true
raw_storage_endpoint=true raw_storage_endpoint=true
default_lease_ttl = "10h" default_lease_ttl = "10h"

View File

@ -34,6 +34,10 @@ telemetry {
metrics_prefix = "myprefix" metrics_prefix = "myprefix"
} }
sentinel {
additional_enabled_modules = []
}
max_lease_ttl = "10h" max_lease_ttl = "10h"
default_lease_ttl = "10h" default_lease_ttl = "10h"
cluster_name = "testcluster" cluster_name = "testcluster"

View File

@ -21,6 +21,9 @@
"usage_gauge_period": "5m", "usage_gauge_period": "5m",
"maximum_gauge_cardinality": 100 "maximum_gauge_cardinality": 100
}, },
"sentinel": {
"additional_enabled_modules": []
},
"max_lease_ttl": "10h", "max_lease_ttl": "10h",
"default_lease_ttl": "10h", "default_lease_ttl": "10h",
"cluster_name":"testcluster", "cluster_name":"testcluster",

View File

@ -39,6 +39,9 @@ entropy "seal" {
mode = "augmentation" mode = "augmentation"
} }
sentinel {
additional_enabled_modules = []
}
kms "commastringpurpose" { kms "commastringpurpose" {
purpose = "foo,bar" purpose = "foo,bar"
} }

View File

@ -53,6 +53,9 @@
"circonus_broker_select_tag": "dc:sfo", "circonus_broker_select_tag": "dc:sfo",
"prometheus_retention_time": "30s" "prometheus_retention_time": "30s"
}, },
"sentinel": {
"additional_enabled_modules": ["http"]
},
"entropy": { "entropy": {
"seal": { "seal": {
"mode": "augmentation" "mode": "augmentation"

View File

@ -34,6 +34,10 @@ telemetry {
maximum_gauge_cardinality = 100 maximum_gauge_cardinality = 100
} }
sentinel {
additional_enabled_modules = ["http"]
}
seal "awskms" { seal "awskms" {
region = "us-east-1" region = "us-east-1"
access_key = "AKIAIOSFODNN7EXAMPLE" access_key = "AKIAIOSFODNN7EXAMPLE"

View File

@ -28,10 +28,10 @@ var (
func (n *Namespace) HasParent(possibleParent *Namespace) bool { func (n *Namespace) HasParent(possibleParent *Namespace) bool {
switch { switch {
case n.Path == "":
return false
case possibleParent.Path == "": case possibleParent.Path == "":
return true return true
case n.Path == "":
return false
default: default:
return strings.HasPrefix(n.Path, possibleParent.Path) return strings.HasPrefix(n.Path, possibleParent.Path)
} }

124
helper/timeutil/timeutil.go Normal file
View File

@ -0,0 +1,124 @@
package timeutil
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
)
func StartOfMonth(t time.Time) time.Time {
year, month, _ := t.Date()
return time.Date(year, month, 1, 0, 0, 0, 0, t.Location())
}
func StartOfNextMonth(t time.Time) time.Time {
year, month, _ := t.Date()
return time.Date(year, month, 1, 0, 0, 0, 0, t.Location()).AddDate(0, 1, 0)
}
// IsMonthStart checks if :t: is the start of the month
func IsMonthStart(t time.Time) bool {
return t.Equal(StartOfMonth(t))
}
func EndOfMonth(t time.Time) time.Time {
year, month, _ := t.Date()
if month == time.December {
return time.Date(year, time.December, 31, 23, 59, 59, 0, t.Location())
} else {
eom := time.Date(year, month+1, 1, 23, 59, 59, 0, t.Location())
return eom.AddDate(0, 0, -1)
}
}
// IsPreviousMonth checks if :t: is in the month directly before :toCompare:
func IsPreviousMonth(t, toCompare time.Time) bool {
thisMonthStart := StartOfMonth(toCompare)
previousMonthStart := StartOfMonth(thisMonthStart.AddDate(0, 0, -1))
if t.Equal(previousMonthStart) {
return true
}
return t.After(previousMonthStart) && t.Before(thisMonthStart)
}
// IsCurrentMonth checks if :t: is in the current month, as defined by :compare:
// generally, pass in time.Now().UTC() as :compare:
func IsCurrentMonth(t, compare time.Time) bool {
thisMonthStart := StartOfMonth(compare)
queryMonthStart := StartOfMonth(t)
return queryMonthStart.Equal(thisMonthStart)
}
// GetMostRecentContinuousMonths finds the start time of the most
// recent set of continguous months.
//
// For example, if the most recent start time is Aug 15, then that range is just 1 month
// If the recent start times are Aug 1 and July 1 and June 15, then that range is
// three months and we return June 15.
//
// note: return slice will be nil if :startTimes: is nil
// :startTimes: must be sorted in decreasing order (see unit test for examples)
func GetMostRecentContiguousMonths(startTimes []time.Time) []time.Time {
if len(startTimes) < 2 {
// no processing needed if 0 or 1 months worth of logs
return startTimes
}
out := []time.Time{startTimes[0]}
if !IsMonthStart(out[0]) {
// there is less than one contiguous month (most recent start time is after the start of this month)
return out
}
i := 1
for ; i < len(startTimes); i++ {
if !IsMonthStart(startTimes[i]) || !IsPreviousMonth(startTimes[i], startTimes[i-1]) {
break
}
out = append(out, startTimes[i])
}
// handle mid-month log starts
if i < len(startTimes) {
if IsPreviousMonth(StartOfMonth(startTimes[i]), startTimes[i-1]) {
// the earliest part of the segment is mid-month, but still valid for this segment
out = append(out, startTimes[i])
}
}
return out
}
func InRange(t, start, end time.Time) bool {
return (t.Equal(start) || t.After(start)) &&
(t.Equal(end) || t.Before(end))
}
// Used when a storage path has the form <timestamp>/,
// where timestamp is a Unix timestamp.
func ParseTimeFromPath(path string) (time.Time, error) {
elems := strings.Split(path, "/")
if len(elems) == 1 {
// :path: is a directory that must have children
return time.Time{}, errors.New("Invalid path provided")
}
unixSeconds, err := strconv.ParseInt(elems[0], 10, 64)
if err != nil {
return time.Time{}, fmt.Errorf("could not convert time from path segment %q. error: %w", elems[0], err)
}
return time.Unix(unixSeconds, 0).UTC(), nil
}
// Compute the N-month period before the given date.
// For example, if it is currently April 2020, then 12 months is April 2019 through March 2020.
func MonthsPreviousTo(months int, now time.Time) time.Time {
firstOfMonth := StartOfMonth(now.UTC())
return firstOfMonth.AddDate(0, -months, 0)
}

View File

@ -0,0 +1,298 @@
package timeutil
import (
"reflect"
"testing"
"time"
)
func TestTimeutil_StartOfMonth(t *testing.T) {
testCases := []struct {
Input time.Time
Expected time.Time
}{
{
Input: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
Expected: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Input: time.Date(2020, 1, 1, 1, 0, 0, 0, time.UTC),
Expected: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Input: time.Date(2020, 1, 1, 0, 0, 0, 1, time.UTC),
Expected: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Input: time.Date(2020, 1, 31, 23, 59, 59, 999999999, time.UTC),
Expected: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Input: time.Date(2020, 2, 28, 1, 2, 3, 4, time.UTC),
Expected: time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC),
},
}
for _, tc := range testCases {
result := StartOfMonth(tc.Input)
if !result.Equal(tc.Expected) {
t.Errorf("start of %v is %v, expected %v", tc.Input, result, tc.Expected)
}
}
}
func TestTimeutil_IsMonthStart(t *testing.T) {
testCases := []struct {
input time.Time
expected bool
}{
{
input: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
expected: true,
},
{
input: time.Date(2020, 1, 1, 0, 0, 0, 1, time.UTC),
expected: false,
},
{
input: time.Date(2020, 4, 5, 0, 0, 0, 0, time.UTC),
expected: false,
},
{
input: time.Date(2020, 1, 31, 23, 59, 59, 999999999, time.UTC),
expected: false,
},
}
for _, tc := range testCases {
result := IsMonthStart(tc.input)
if result != tc.expected {
t.Errorf("is %v the start of the month? expected %t, got %t", tc.input, tc.expected, result)
}
}
}
func TestTimeutil_EndOfMonth(t *testing.T) {
testCases := []struct {
Input time.Time
Expected time.Time
}{
{
// The current behavior does not use the nanoseconds
// because we didn't want to clutter the result of end-of-month reporting.
Input: time.Date(2020, 1, 31, 23, 59, 59, 0, time.UTC),
Expected: time.Date(2020, 1, 31, 23, 59, 59, 0, time.UTC),
},
{
Input: time.Date(2020, 1, 31, 23, 59, 59, 999999999, time.UTC),
Expected: time.Date(2020, 1, 31, 23, 59, 59, 0, time.UTC),
},
{
Input: time.Date(2020, 1, 15, 1, 2, 3, 4, time.UTC),
Expected: time.Date(2020, 1, 31, 23, 59, 59, 0, time.UTC),
},
{
// Leap year
Input: time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC),
Expected: time.Date(2020, 2, 29, 23, 59, 59, 0, time.UTC),
},
{
// non-leap year
Input: time.Date(2100, 2, 1, 0, 0, 0, 0, time.UTC),
Expected: time.Date(2100, 2, 28, 23, 59, 59, 0, time.UTC),
},
}
for _, tc := range testCases {
result := EndOfMonth(tc.Input)
if !result.Equal(tc.Expected) {
t.Errorf("end of %v is %v, expected %v", tc.Input, result, tc.Expected)
}
}
}
func TestTimeutil_IsPreviousMonth(t *testing.T) {
testCases := []struct {
tInput time.Time
compareInput time.Time
expected bool
}{
{
tInput: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
compareInput: time.Date(2020, 1, 31, 0, 0, 0, 0, time.UTC),
expected: false,
},
{
tInput: time.Date(2019, 12, 31, 0, 0, 0, 0, time.UTC),
compareInput: time.Date(2020, 1, 31, 0, 0, 0, 0, time.UTC),
expected: true,
},
{
// leap year (false)
tInput: time.Date(2019, 12, 29, 10, 10, 10, 0, time.UTC),
compareInput: time.Date(2020, 2, 29, 10, 10, 10, 0, time.UTC),
expected: false,
},
{
// leap year (true)
tInput: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
compareInput: time.Date(2020, 2, 29, 10, 10, 10, 0, time.UTC),
expected: true,
},
{
tInput: time.Date(2018, 5, 5, 5, 0, 0, 0, time.UTC),
compareInput: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
expected: false,
},
{
// test normalization. want to make subtracting 1 month from 3/30/2020 doesn't yield 2/30/2020, normalized
// to 3/1/2020
tInput: time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC),
compareInput: time.Date(2020, 3, 30, 0, 0, 0, 0, time.UTC),
expected: true,
},
}
for _, tc := range testCases {
result := IsPreviousMonth(tc.tInput, tc.compareInput)
if result != tc.expected {
t.Errorf("%v in previous month to %v? expected %t, got %t", tc.tInput, tc.compareInput, tc.expected, result)
}
}
}
func TestTimeutil_IsCurrentMonth(t *testing.T) {
now := time.Now()
testCases := []struct {
input time.Time
expected bool
}{
{
input: now,
expected: true,
},
{
input: StartOfMonth(now).AddDate(0, 0, -1),
expected: false,
},
{
input: EndOfMonth(now).AddDate(0, 0, -1),
expected: true,
},
{
input: StartOfMonth(now).AddDate(-1, 0, 0),
expected: false,
},
}
for _, tc := range testCases {
result := IsCurrentMonth(tc.input, now)
if result != tc.expected {
t.Errorf("invalid result. expected %t for %v", tc.expected, tc.input)
}
}
}
func TestTimeUtil_ContiguousMonths(t *testing.T) {
testCases := []struct {
input []time.Time
expected []time.Time
}{
{
input: []time.Time{
time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 3, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 2, 5, 0, 0, 0, 0, time.UTC),
time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
},
expected: []time.Time{
time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 3, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 2, 5, 0, 0, 0, 0, time.UTC),
},
},
{
input: []time.Time{
time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 3, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
},
expected: []time.Time{
time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 3, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
{
input: []time.Time{
time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC),
},
expected: []time.Time{
time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC),
},
},
{
input: []time.Time{},
expected: []time.Time{},
},
{
input: nil,
expected: nil,
},
{
input: []time.Time{
time.Date(2020, 2, 2, 0, 0, 0, 0, time.UTC),
time.Date(2020, 1, 15, 0, 0, 0, 0, time.UTC),
},
expected: []time.Time{
time.Date(2020, 2, 2, 0, 0, 0, 0, time.UTC),
},
},
}
for _, tc := range testCases {
result := GetMostRecentContiguousMonths(tc.input)
if !reflect.DeepEqual(tc.expected, result) {
t.Errorf("invalid contiguous segment returned. expected %v, got %v", tc.expected, result)
}
}
}
func TestTimeUtil_ParseTimeFromPath(t *testing.T) {
testCases := []struct {
input string
expectedOut time.Time
expectError bool
}{
{
input: "719020800/1",
expectedOut: time.Unix(719020800, 0).UTC(),
expectError: false,
},
{
input: "1601415205/3",
expectedOut: time.Unix(1601415205, 0).UTC(),
expectError: false,
},
{
input: "baddata/3",
expectedOut: time.Time{},
expectError: true,
},
}
for _, tc := range testCases {
result, err := ParseTimeFromPath(tc.input)
gotError := err != nil
if result != tc.expectedOut {
t.Errorf("bad timestamp on input %q. expected: %v got: %v", tc.input, tc.expectedOut, result)
}
if gotError != tc.expectError {
t.Errorf("bad error status on input %q. expected error: %t, got error: %t", tc.input, tc.expectError, gotError)
}
}
}

View File

@ -180,6 +180,9 @@ func TestConnectionURL(t *testing.T) {
const maxTries = 3 const maxTries = 3
func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) { func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) {
t.Log("Skipping testPostgresSQLLockTTL portion of test.")
return
for tries := 1; tries <= maxTries; tries++ { for tries := 1; tries <= maxTries; tries++ {
// Try this several times. If the test environment is too slow the lock can naturally lapse // Try this several times. If the test environment is too slow the lock can naturally lapse
if attemptLockTTLTest(t, ha, tries) { if attemptLockTTLTest(t, ha, tries) {

View File

@ -69,5 +69,6 @@ func GetRegion(configuredRegion string) (string, error) {
if err != nil { if err != nil {
return "", errwrap.Wrapf("unable to retrieve region from instance metadata: {{err}}", err) return "", errwrap.Wrapf("unable to retrieve region from instance metadata: {{err}}", err)
} }
return region, nil return region, nil
} }

View File

@ -160,6 +160,138 @@ func (x *LogFragment) GetNonEntityTokens() map[string]uint64 {
return nil return nil
} }
type EntityActivityLog struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Entities []*EntityRecord `sentinel:"" protobuf:"bytes,1,rep,name=entities,proto3" json:"entities,omitempty"`
}
func (x *EntityActivityLog) Reset() {
*x = EntityActivityLog{}
if protoimpl.UnsafeEnabled {
mi := &file_vault_activity_activity_log_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *EntityActivityLog) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EntityActivityLog) ProtoMessage() {}
func (x *EntityActivityLog) ProtoReflect() protoreflect.Message {
mi := &file_vault_activity_activity_log_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EntityActivityLog.ProtoReflect.Descriptor instead.
func (*EntityActivityLog) Descriptor() ([]byte, []int) {
return file_vault_activity_activity_log_proto_rawDescGZIP(), []int{2}
}
func (x *EntityActivityLog) GetEntities() []*EntityRecord {
if x != nil {
return x.Entities
}
return nil
}
type TokenCount struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
CountByNamespaceID map[string]uint64 `sentinel:"" protobuf:"bytes,1,rep,name=count_by_namespace_id,json=countByNamespaceId,proto3" json:"count_by_namespace_id,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
}
func (x *TokenCount) Reset() {
*x = TokenCount{}
if protoimpl.UnsafeEnabled {
mi := &file_vault_activity_activity_log_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TokenCount) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TokenCount) ProtoMessage() {}
func (x *TokenCount) ProtoReflect() protoreflect.Message {
mi := &file_vault_activity_activity_log_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TokenCount.ProtoReflect.Descriptor instead.
func (*TokenCount) Descriptor() ([]byte, []int) {
return file_vault_activity_activity_log_proto_rawDescGZIP(), []int{3}
}
func (x *TokenCount) GetCountByNamespaceID() map[string]uint64 {
if x != nil {
return x.CountByNamespaceID
}
return nil
}
type LogFragmentResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *LogFragmentResponse) Reset() {
*x = LogFragmentResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_vault_activity_activity_log_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *LogFragmentResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LogFragmentResponse) ProtoMessage() {}
func (x *LogFragmentResponse) ProtoReflect() protoreflect.Message {
mi := &file_vault_activity_activity_log_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LogFragmentResponse.ProtoReflect.Descriptor instead.
func (*LogFragmentResponse) Descriptor() ([]byte, []int) {
return file_vault_activity_activity_log_proto_rawDescGZIP(), []int{4}
}
var File_vault_activity_activity_log_proto protoreflect.FileDescriptor var File_vault_activity_activity_log_proto protoreflect.FileDescriptor
var file_vault_activity_activity_log_proto_rawDesc = []byte{ var file_vault_activity_activity_log_proto_rawDesc = []byte{
@ -189,10 +321,28 @@ var file_vault_activity_activity_log_proto_rawDesc = []byte{
0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x11, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x67, 0x12, 0x32, 0x0a, 0x08, 0x65,
0x61, 0x75, 0x6c, 0x74, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e,
0x69, 0x74, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52,
0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22,
0xb4, 0x01, 0x0a, 0x0a, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x5f,
0x0a, 0x15, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x62, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73,
0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e,
0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x43, 0x6f,
0x75, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73,
0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, 0x63, 0x6f, 0x75,
0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x1a,
0x45, 0x0a, 0x17, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70,
0x61, 0x63, 0x65, 0x49, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x15, 0x0a, 0x13, 0x4c, 0x6f, 0x67, 0x46, 0x72, 0x61,
0x67, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2b, 0x5a,
0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68,
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x76, 0x61, 0x75, 0x6c,
0x74, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
} }
var ( var (
@ -207,20 +357,26 @@ func file_vault_activity_activity_log_proto_rawDescGZIP() []byte {
return file_vault_activity_activity_log_proto_rawDescData return file_vault_activity_activity_log_proto_rawDescData
} }
var file_vault_activity_activity_log_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_vault_activity_activity_log_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_vault_activity_activity_log_proto_goTypes = []interface{}{ var file_vault_activity_activity_log_proto_goTypes = []interface{}{
(*EntityRecord)(nil), // 0: activity.EntityRecord (*EntityRecord)(nil), // 0: activity.EntityRecord
(*LogFragment)(nil), // 1: activity.LogFragment (*LogFragment)(nil), // 1: activity.LogFragment
nil, // 2: activity.LogFragment.NonEntityTokensEntry (*EntityActivityLog)(nil), // 2: activity.EntityActivityLog
(*TokenCount)(nil), // 3: activity.TokenCount
(*LogFragmentResponse)(nil), // 4: activity.LogFragmentResponse
nil, // 5: activity.LogFragment.NonEntityTokensEntry
nil, // 6: activity.TokenCount.CountByNamespaceIDEntry
} }
var file_vault_activity_activity_log_proto_depIDxs = []int32{ var file_vault_activity_activity_log_proto_depIDxs = []int32{
0, // 0: activity.LogFragment.entities:type_name -> activity.EntityRecord 0, // 0: activity.LogFragment.entities:type_name -> activity.EntityRecord
2, // 1: activity.LogFragment.non_entity_tokens:type_name -> activity.LogFragment.NonEntityTokensEntry 5, // 1: activity.LogFragment.non_entity_tokens:type_name -> activity.LogFragment.NonEntityTokensEntry
2, // [2:2] is the sub-list for method output_type 0, // 2: activity.EntityActivityLog.entities:type_name -> activity.EntityRecord
2, // [2:2] is the sub-list for method input_type 6, // 3: activity.TokenCount.count_by_namespace_id:type_name -> activity.TokenCount.CountByNamespaceIDEntry
2, // [2:2] is the sub-list for extension type_name 4, // [4:4] is the sub-list for method output_type
2, // [2:2] is the sub-list for extension extendee 4, // [4:4] is the sub-list for method input_type
0, // [0:2] is the sub-list for field type_name 4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
} }
func init() { file_vault_activity_activity_log_proto_init() } func init() { file_vault_activity_activity_log_proto_init() }
@ -253,6 +409,42 @@ func file_vault_activity_activity_log_proto_init() {
return nil return nil
} }
} }
file_vault_activity_activity_log_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EntityActivityLog); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_vault_activity_activity_log_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TokenCount); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_vault_activity_activity_log_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*LogFragmentResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
} }
type x struct{} type x struct{}
out := protoimpl.TypeBuilder{ out := protoimpl.TypeBuilder{
@ -260,7 +452,7 @@ func file_vault_activity_activity_log_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_vault_activity_activity_log_proto_rawDesc, RawDescriptor: file_vault_activity_activity_log_proto_rawDesc,
NumEnums: 0, NumEnums: 0,
NumMessages: 3, NumMessages: 7,
NumExtensions: 0, NumExtensions: 0,
NumServices: 0, NumServices: 0,
}, },

View File

@ -26,3 +26,14 @@ message LogFragment {
// indexed by namespace ID // indexed by namespace ID
map<string,uint64> non_entity_tokens = 3; map<string,uint64> non_entity_tokens = 3;
} }
message EntityActivityLog {
repeated EntityRecord entities = 1;
}
message TokenCount {
map<string,uint64> count_by_namespace_id = 1;
}
message LogFragmentResponse {
}

233
vault/activity/query.go Normal file
View File

@ -0,0 +1,233 @@
package activity
import (
"context"
"encoding/json"
"errors"
"fmt"
"sort"
"strconv"
"time"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/helper/timeutil"
"github.com/hashicorp/vault/sdk/logical"
)
// About 66 bytes per record:
//{"namespace_id":"xxxxx","entities":1234,"non_entity_tokens":1234},
// = approx 7900 namespaces in 512KiB
// So one storage entry is fine (for now).
type NamespaceRecord struct {
NamespaceID string `json:"namespace_id"`
Entities uint64 `json:"entities"`
NonEntityTokens uint64 `json:"non_entity_tokens"`
}
type PrecomputedQuery struct {
StartTime time.Time
EndTime time.Time
Namespaces []*NamespaceRecord `json:"namespaces"`
}
type PrecomputedQueryStore struct {
logger log.Logger
view logical.Storage
}
// The query store should be initialized with a view to the subdirectory
// it should use, like "queries".
func NewPrecomputedQueryStore(logger log.Logger, view logical.Storage, retentionMonths int) *PrecomputedQueryStore {
return &PrecomputedQueryStore{
logger: logger,
view: view,
}
}
func (s *PrecomputedQueryStore) Put(ctx context.Context, p *PrecomputedQuery) error {
path := fmt.Sprintf("%v/%v", p.StartTime.Unix(), p.EndTime.Unix())
asJson, err := json.Marshal(p)
if err != nil {
return err
}
err = s.view.Put(ctx, &logical.StorageEntry{
Key: path,
Value: asJson,
})
if err != nil {
return err
}
return nil
}
func (s *PrecomputedQueryStore) listStartTimes(ctx context.Context) ([]time.Time, error) {
// We could cache this to save a storage operation on each query,
// but that seems like a marginal improvment.
rawStartTimes, err := s.view.List(ctx, "")
if err != nil {
return nil, err
}
startTimes := make([]time.Time, 0, len(rawStartTimes))
for _, raw := range rawStartTimes {
t, err := timeutil.ParseTimeFromPath(raw)
if err != nil {
s.logger.Warn("could not parse precomputed query subdirectory", "key", raw)
continue
}
startTimes = append(startTimes, t)
}
return startTimes, nil
}
func (s *PrecomputedQueryStore) listEndTimes(ctx context.Context, startTime time.Time) ([]time.Time, error) {
rawEndTimes, err := s.view.List(ctx, fmt.Sprintf("%v/", startTime.Unix()))
if err != nil {
return nil, err
}
endTimes := make([]time.Time, 0, len(rawEndTimes))
for _, raw := range rawEndTimes {
val, err := strconv.ParseInt(raw, 10, 64)
if err != nil {
s.logger.Warn("could not parse precomputed query end time", "key", raw)
continue
}
endTimes = append(endTimes, time.Unix(val, 0).UTC())
}
return endTimes, nil
}
func (s *PrecomputedQueryStore) QueriesAvailable(ctx context.Context) (bool, error) {
startTimes, err := s.listStartTimes(ctx)
if err != nil {
return false, err
}
return len(startTimes) > 0, nil
}
func (s *PrecomputedQueryStore) Get(ctx context.Context, startTime, endTime time.Time) (*PrecomputedQuery, error) {
if startTime.After(endTime) {
return nil, errors.New("start time is after end time")
}
startTime = timeutil.StartOfMonth(startTime)
endTime = timeutil.EndOfMonth(endTime)
s.logger.Trace("searching for matching queries", "startTime", startTime, "endTime", endTime)
// Find the oldest continuous region which overlaps with the given range.
// We only have to handle some collection of lower triangles like this,
// not arbitrary sets of endpoints (except in the middle of writes or GC):
//
// start ->
// end #
// | ##
// V ###
//
// #
// ##
// ###
//
// (1) find all saved start times T that are
// in [startTime,endTime]
// (if there is some report that overlaps, it will
// have a start time in the range-- an overlap
// only at the end is impossible.)
// (2) take the latest continguous region within
// that set
// i.e., walk up the diagonal as far as we can in a single
// triangle.
// (These could be combined into a single pass, but
// that seems more complicated to understand.)
startTimes, err := s.listStartTimes(ctx)
if err != nil {
return nil, err
}
s.logger.Trace("retrieved start times from storage", "startTimes", startTimes)
filteredList := make([]time.Time, 0, len(startTimes))
for _, t := range startTimes {
if timeutil.InRange(t, startTime, endTime) {
filteredList = append(filteredList, t)
}
}
s.logger.Trace("filtered to range", "startTimes", filteredList)
if len(filteredList) == 0 {
return nil, nil
}
// Descending order, as required by the timeutil function
sort.Slice(filteredList, func(i, j int) bool {
return filteredList[i].After(filteredList[j])
})
contiguous := timeutil.GetMostRecentContiguousMonths(filteredList)
actualStartTime := contiguous[len(contiguous)-1]
s.logger.Trace("chose start time", "actualStartTime", actualStartTime, "contiguous", contiguous)
endTimes, err := s.listEndTimes(ctx, actualStartTime)
if err != nil {
return nil, err
}
s.logger.Trace("retrieved end times from storage", "endTimes", endTimes)
// Might happen if there's a race with GC
if len(endTimes) == 0 {
s.logger.Warn("missing end times", "start time", actualStartTime)
return nil, nil
}
var actualEndTime time.Time
for _, t := range endTimes {
if timeutil.InRange(t, startTime, endTime) {
if actualEndTime.IsZero() || t.After(actualEndTime) {
actualEndTime = t
}
}
}
if actualEndTime.IsZero() {
s.logger.Warn("no end time in range", "start time", actualStartTime)
return nil, nil
}
path := fmt.Sprintf("%v/%v", actualStartTime.Unix(), actualEndTime.Unix())
entry, err := s.view.Get(ctx, path)
if err != nil {
return nil, err
}
p := &PrecomputedQuery{}
err = json.Unmarshal(entry.Value, p)
if err != nil {
s.logger.Warn("failed query lookup at", "path", path)
return nil, err
}
return p, nil
}
func (s *PrecomputedQueryStore) DeleteQueriesBefore(ctx context.Context, retentionThreshold time.Time) error {
startTimes, err := s.listStartTimes(ctx)
if err != nil {
return err
}
for _, t := range startTimes {
path := fmt.Sprintf("%v/", t.Unix())
if t.Before(retentionThreshold) {
rawEndTimes, err := s.view.List(ctx, path)
if err != nil {
return err
}
s.logger.Trace("deleting queries", "startTime", t)
// Don't care about what the end time is,
// the start time along determines deletion.
for _, end := range rawEndTimes {
err = s.view.Delete(ctx, path+end)
if err != nil {
return err
}
}
}
}
return nil
}

View File

@ -0,0 +1,289 @@
package activity
import (
"context"
"reflect"
"sort"
"testing"
"time"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/helper/timeutil"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/logical"
)
func NewTestQueryStore(t *testing.T) *PrecomputedQueryStore {
t.Helper()
logger := logging.NewVaultLogger(log.Trace)
view := &logical.InmemStorage{}
return NewPrecomputedQueryStore(logger, view, 12)
}
func TestQueryStore_Inventory(t *testing.T) {
startTimes := []time.Time{
time.Date(2020, 1, 15, 0, 0, 0, 0, time.UTC),
time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 3, 1, 0, 0, 0, 0, time.UTC),
}
endTimes := []time.Time{
timeutil.EndOfMonth(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)),
timeutil.EndOfMonth(time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC)),
timeutil.EndOfMonth(time.Date(2020, 3, 1, 0, 0, 0, 0, time.UTC)),
timeutil.EndOfMonth(time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC)),
timeutil.EndOfMonth(time.Date(2020, 5, 1, 0, 0, 0, 0, time.UTC)),
}
qs := NewTestQueryStore(t)
ctx := context.Background()
for _, s := range startTimes {
for _, e := range endTimes {
if e.Before(s) {
continue
}
qs.Put(ctx, &PrecomputedQuery{
StartTime: s,
EndTime: e,
Namespaces: []*NamespaceRecord{},
})
}
}
storedStartTimes, err := qs.listStartTimes(ctx)
if err != nil {
t.Fatal(err)
}
if len(storedStartTimes) != len(startTimes) {
t.Fatalf("bad length, expected %v got %v", len(startTimes), storedStartTimes)
}
sort.Slice(storedStartTimes, func(i, j int) bool {
return storedStartTimes[i].Before(storedStartTimes[j])
})
if !reflect.DeepEqual(storedStartTimes, startTimes) {
t.Fatalf("start time mismatch, expected %v got %v", startTimes, storedStartTimes)
}
storedEndTimes, err := qs.listEndTimes(ctx, startTimes[1])
expected := endTimes[1:]
if len(storedEndTimes) != len(expected) {
t.Fatalf("bad length, expected %v got %v", len(expected), storedEndTimes)
}
sort.Slice(storedEndTimes, func(i, j int) bool {
return storedEndTimes[i].Before(storedEndTimes[j])
})
if !reflect.DeepEqual(storedEndTimes, expected) {
t.Fatalf("end time mismatch, expected %v got %v", expected, storedEndTimes)
}
}
func TestQueryStore_MarshalDemarshal(t *testing.T) {
tsStart := time.Date(2020, 1, 15, 0, 0, 0, 0, time.UTC)
tsEnd := timeutil.EndOfMonth(tsStart)
p := &PrecomputedQuery{
StartTime: tsStart,
EndTime: tsEnd,
Namespaces: []*NamespaceRecord{
&NamespaceRecord{
NamespaceID: "root",
Entities: 20,
NonEntityTokens: 42,
},
&NamespaceRecord{
NamespaceID: "yzABC",
Entities: 15,
NonEntityTokens: 31,
},
},
}
qs := NewTestQueryStore(t)
ctx := context.Background()
qs.Put(ctx, p)
result, err := qs.Get(ctx, tsStart, tsEnd)
if err != nil {
t.Fatal(err)
}
if result == nil {
t.Fatal("nil response from Get")
}
if !reflect.DeepEqual(result, p) {
t.Fatalf("unequal query objects, expected %v got %v", p, result)
}
}
func TestQueryStore_TimeRanges(t *testing.T) {
qs := NewTestQueryStore(t)
ctx := context.Background()
// Scenario ranges: Jan 15 - Jan 31 (one month)
// Feb 2 - Mar 31 (two months, but not contiguous)
// April and May are skipped
// June 1 - September 30 (4 months)
periods := []struct {
Begin time.Time
Ends []time.Time
}{
{
time.Date(2020, 1, 15, 12, 45, 53, 0, time.UTC),
[]time.Time{
timeutil.EndOfMonth(time.Date(2020, 1, 1, 1, 0, 0, 0, time.UTC)),
},
},
{
time.Date(2020, 2, 2, 0, 0, 0, 0, time.UTC),
[]time.Time{
timeutil.EndOfMonth(time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC)),
timeutil.EndOfMonth(time.Date(2020, 3, 1, 0, 0, 0, 0, time.UTC)),
},
},
{
time.Date(2020, 6, 1, 0, 0, 0, 0, time.UTC),
[]time.Time{
timeutil.EndOfMonth(time.Date(2020, 6, 1, 0, 0, 0, 0, time.UTC)),
timeutil.EndOfMonth(time.Date(2020, 7, 1, 0, 0, 0, 0, time.UTC)),
timeutil.EndOfMonth(time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)),
timeutil.EndOfMonth(time.Date(2020, 9, 1, 0, 0, 0, 0, time.UTC)),
},
},
}
for _, period := range periods {
for _, e := range period.Ends {
qs.Put(ctx, &PrecomputedQuery{
StartTime: period.Begin,
EndTime: e,
Namespaces: []*NamespaceRecord{
&NamespaceRecord{
NamespaceID: "root",
Entities: 17,
NonEntityTokens: 31,
},
},
})
}
}
testCases := []struct {
Name string
StartTime time.Time
EndTime time.Time
Empty bool
ExpectedStart time.Time
ExpectedEnd time.Time
}{
{
"year query in October",
time.Date(2019, 10, 12, 0, 0, 0, 0, time.UTC),
time.Date(2020, 10, 12, 0, 0, 0, 0, time.UTC),
false,
// June - Sept
periods[2].Begin,
periods[2].Ends[3],
},
{
"one day in January",
time.Date(2020, 1, 4, 0, 0, 0, 0, time.UTC),
time.Date(2020, 1, 5, 0, 0, 0, 0, time.UTC),
false,
// January, even though this is outside the range specified
periods[0].Begin,
periods[0].Ends[0],
},
{
"one day in February",
time.Date(2020, 2, 4, 0, 0, 0, 0, time.UTC),
time.Date(2020, 2, 5, 0, 0, 0, 0, time.UTC),
false,
// February only
periods[1].Begin,
periods[1].Ends[0],
},
{
"January through March",
time.Date(2020, 1, 4, 0, 0, 0, 0, time.UTC),
time.Date(2020, 3, 5, 0, 0, 0, 0, time.UTC),
false,
// February and March only
// Fails due to bug in library function, TODO
periods[1].Begin,
periods[1].Ends[1],
},
{
"the month of May",
time.Date(2020, 5, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 5, 31, 0, 0, 0, 0, time.UTC),
true, // no data
time.Time{},
time.Time{},
},
{
"May through June",
time.Date(2020, 5, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 6, 1, 0, 0, 0, 0, time.UTC),
false,
// June only
periods[2].Begin,
periods[2].Ends[0],
},
{
"September",
time.Date(2020, 9, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 9, 1, 0, 0, 0, 0, time.UTC),
true, // We have June through September,
// but not anything starting in September
// (which does not match a real scenario)
time.Time{},
time.Time{},
},
{
"December",
time.Date(2020, 12, 1, 0, 0, 0, 0, time.UTC),
time.Date(2020, 12, 1, 0, 0, 0, 0, time.UTC),
true, // no data
time.Time{},
time.Time{},
},
{
"June through December",
time.Date(2020, 6, 1, 12, 0, 0, 0, time.UTC),
time.Date(2020, 12, 31, 12, 0, 0, 0, time.UTC),
false,
// June through September
periods[2].Begin,
periods[2].Ends[3],
},
}
for _, tc := range testCases {
tc := tc // capture range variable
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
result, err := qs.Get(ctx, tc.StartTime, tc.EndTime)
if err != nil {
t.Fatal(err)
}
if result == nil {
if tc.Empty {
return
} else {
t.Fatal("unexpected empty result")
}
} else {
if tc.Empty {
t.Fatal("expected empty result")
}
}
if !result.StartTime.Equal(tc.ExpectedStart) {
t.Errorf("start time mismatch: %v, expected %v", result.StartTime, tc.ExpectedStart)
}
if !result.EndTime.Equal(tc.ExpectedEnd) {
t.Errorf("end time mismatch: %v, expected %v", result.EndTime, tc.ExpectedEnd)
}
})
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,10 @@
// +build !enterprise
package vault
import "context"
// sendCurrentFragment is a no-op on OSS
func (a *ActivityLog) sendCurrentFragment(ctx context.Context) error {
return nil
}

View File

@ -425,7 +425,7 @@ func (c *Core) taintCredEntry(ctx context.Context, path string, updateStorage bo
// Taint the entry from the auth table // Taint the entry from the auth table
// We do this on the original since setting the taint operates // We do this on the original since setting the taint operates
// on the entries which a shallow clone shares anyways // on the entries which a shallow clone shares anyways
entry, err := c.auth.setTaint(ctx, strings.TrimPrefix(path, credentialRoutePrefix), true) entry, err := c.auth.setTaint(ctx, strings.TrimPrefix(path, credentialRoutePrefix), true, mountStateUnmounting)
if err != nil { if err != nil {
return err return err
} }

View File

@ -532,6 +532,8 @@ type Core struct {
quotaManager *quotas.Manager quotaManager *quotas.Manager
clusterHeartbeatInterval time.Duration clusterHeartbeatInterval time.Duration
activityLogConfig ActivityLogCoreConfig
} }
// CoreConfig is used to parameterize a core // CoreConfig is used to parameterize a core
@ -631,6 +633,9 @@ type CoreConfig struct {
ClusterNetworkLayer cluster.NetworkLayer ClusterNetworkLayer cluster.NetworkLayer
ClusterHeartbeatInterval time.Duration ClusterHeartbeatInterval time.Duration
// Activity log controls
ActivityLogConfig ActivityLogCoreConfig
} }
// GetServiceRegistration returns the config's ServiceRegistration, or nil if it does // GetServiceRegistration returns the config's ServiceRegistration, or nil if it does
@ -770,9 +775,9 @@ func NewCore(conf *CoreConfig) (*Core, error) {
postUnsealStarted: new(uint32), postUnsealStarted: new(uint32),
raftJoinDoneCh: make(chan struct{}), raftJoinDoneCh: make(chan struct{}),
clusterHeartbeatInterval: clusterHeartbeatInterval, clusterHeartbeatInterval: clusterHeartbeatInterval,
activityLogConfig: conf.ActivityLogConfig,
} }
c.standbyStopCh.Store(make(chan struct{})) c.standbyStopCh.Store(make(chan struct{}))
atomic.StoreUint32(c.sealed, 1) atomic.StoreUint32(c.sealed, 1)
c.metricSink.SetGaugeWithLabels([]string{"core", "unsealed"}, 0, nil) c.metricSink.SetGaugeWithLabels([]string{"core", "unsealed"}, 0, nil)
@ -2061,6 +2066,9 @@ func (c *Core) preSeal() error {
if err := c.stopExpiration(); err != nil { if err := c.stopExpiration(); err != nil {
result = multierror.Append(result, errwrap.Wrapf("error stopping expiration: {{err}}", err)) result = multierror.Append(result, errwrap.Wrapf("error stopping expiration: {{err}}", err))
} }
if err := c.stopActivityLog(); err != nil {
result = multierror.Append(result, errwrap.Wrapf("error stopping activity log: {{err}}", err))
}
if err := c.teardownCredentials(context.Background()); err != nil { if err := c.teardownCredentials(context.Background()); err != nil {
result = multierror.Append(result, errwrap.Wrapf("error tearing down credentials: {{err}}", err)) result = multierror.Append(result, errwrap.Wrapf("error tearing down credentials: {{err}}", err))
} }

View File

@ -68,8 +68,8 @@ func (c *Core) metricsLoop(stopCh chan struct{}) {
} }
c.stateLock.RUnlock() c.stateLock.RUnlock()
case <-identityCountTimer: case <-identityCountTimer:
// Only emit on active node // Only emit on active node of cluster that is not a DR cecondary.
if c.PerfStandby() { if standby, _ := c.Standby(); standby || c.IsDRSecondary() {
break break
} }
@ -196,10 +196,11 @@ func (c *Core) emitMetrics(stopCh chan struct{}) {
}, },
} }
// Disable collection if configured, or if we're a performance standby. // Disable collection if configured, or if we're a performance standby
// node or DR secondary cluster.
if c.MetricSink().GaugeInterval == time.Duration(0) { if c.MetricSink().GaugeInterval == time.Duration(0) {
c.logger.Info("usage gauge collection is disabled") c.logger.Info("usage gauge collection is disabled")
} else if !c.PerfStandby() { } else if standby, _ := c.Standby(); !standby && !c.IsDRSecondary() {
for _, init := range metricsInit { for _, init := range metricsInit {
if init.DisableEnvVar != "" { if init.DisableEnvVar != "" {
if os.Getenv(init.DisableEnvVar) != "" { if os.Getenv(init.DisableEnvVar) != "" {

View File

@ -13,6 +13,11 @@ import (
"github.com/hashicorp/vault/vault/replication" "github.com/hashicorp/vault/vault/replication"
) )
const (
activityLogEnabledDefault = false
activityLogEnabledDefaultValue = "default-disabled"
)
type entCore struct{} type entCore struct{}
type entCoreConfig struct{} type entCoreConfig struct{}

View File

@ -127,7 +127,7 @@ func waitForRemovalOrTimeout(c *api.Client, path string, tick, to time.Duration)
} }
} }
func TestQuotas_RateLimitQuota_DupName(t *testing.T) { func TestQuotas_RateLimit_DupName(t *testing.T) {
conf, opts := teststorage.ClusterSetup(coreConfig, nil, nil) conf, opts := teststorage.ClusterSetup(coreConfig, nil, nil)
cluster := vault.NewTestCluster(t, conf, opts) cluster := vault.NewTestCluster(t, conf, opts)
cluster.Start() cluster.Start()

View File

@ -167,6 +167,7 @@ func NewSystemBackend(core *Core, logger log.Logger) *SystemBackend {
b.Backend.Paths = append(b.Backend.Paths, b.monitorPath()) b.Backend.Paths = append(b.Backend.Paths, b.monitorPath())
b.Backend.Paths = append(b.Backend.Paths, b.hostInfoPath()) b.Backend.Paths = append(b.Backend.Paths, b.hostInfoPath())
b.Backend.Paths = append(b.Backend.Paths, b.quotasPaths()...) b.Backend.Paths = append(b.Backend.Paths, b.quotasPaths()...)
b.Backend.Paths = append(b.Backend.Paths, b.rootActivityPaths()...)
if core.rawEnabled { if core.rawEnabled {
b.Backend.Paths = append(b.Backend.Paths, b.rawPaths()...) b.Backend.Paths = append(b.Backend.Paths, b.rawPaths()...)
@ -4391,4 +4392,12 @@ This path responds to the following HTTP methods.
The information that gets collected includes host hardware information, and CPU, The information that gets collected includes host hardware information, and CPU,
disk, and memory utilization`, disk, and memory utilization`,
}, },
"activity-query": {
"Query the historical count of clients.",
"Query the historical count of clients.",
},
"activity-config": {
"Control the collection and reporting of client counts.",
"Control the collection and reporting of client counts.",
},
} }

View File

@ -0,0 +1,220 @@
package vault
import (
"context"
"net/http"
"path"
"strings"
"time"
"github.com/hashicorp/vault/helper/timeutil"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/logical"
)
// activityQueryPath is available in every namespace
func (b *SystemBackend) activityQueryPath() *framework.Path {
return &framework.Path{
Pattern: "internal/counters/activity$",
Fields: map[string]*framework.FieldSchema{
"start_time": &framework.FieldSchema{
Type: framework.TypeTime,
Description: "Start of query interval",
},
"end_time": &framework.FieldSchema{
Type: framework.TypeTime,
Description: "End of query interval",
},
},
HelpSynopsis: strings.TrimSpace(sysHelp["activity-query"][0]),
HelpDescription: strings.TrimSpace(sysHelp["activity-query"][1]),
Operations: map[logical.Operation]framework.OperationHandler{
logical.ReadOperation: &framework.PathOperation{
Callback: b.handleClientMetricQuery,
Summary: "Report the client count metrics, for this namespace and all child namespaces.",
},
},
}
}
// rootActivityPaths are available only in the root namespace
func (b *SystemBackend) rootActivityPaths() []*framework.Path {
return []*framework.Path{
b.activityQueryPath(),
{
Pattern: "internal/counters/config$",
Fields: map[string]*framework.FieldSchema{
"default_report_months": &framework.FieldSchema{
Type: framework.TypeInt,
Default: 12,
Description: "Number of months to report if no start date specified.",
},
"retention_months": &framework.FieldSchema{
Type: framework.TypeInt,
Default: 24,
Description: "Number of months of client data to retain. Setting to 0 will clear all existing data.",
},
"enabled": &framework.FieldSchema{
Type: framework.TypeString,
Default: "default",
Description: "Enable or disable collection of client count: enable, disable, or default.",
},
},
HelpSynopsis: strings.TrimSpace(sysHelp["activity-config"][0]),
HelpDescription: strings.TrimSpace(sysHelp["activity-config"][1]),
Operations: map[logical.Operation]framework.OperationHandler{
logical.ReadOperation: &framework.PathOperation{
Callback: b.handleActivityConfigRead,
Summary: "Read the client count tracking configuration.",
},
logical.UpdateOperation: &framework.PathOperation{
Callback: b.handleActivityConfigUpdate,
Summary: "Enable or disable collection of client count, set retention period, or set default reporting period.",
},
},
},
}
}
func (b *SystemBackend) handleClientMetricQuery(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
a := b.Core.activityLog
if a == nil {
return logical.ErrorResponse("no activity log present"), nil
}
startTime := d.Get("start_time").(time.Time)
endTime := d.Get("end_time").(time.Time)
// If a specific endTime is used, then respect that
// otherwise we want to give the latest N months, so go back to the start
// of the previous month
//
// Also convert any user inputs to UTC to avoid
// problems later.
if endTime.IsZero() {
endTime = timeutil.EndOfMonth(time.Now().UTC().AddDate(0, -1, 0))
} else {
endTime = endTime.UTC()
}
if startTime.IsZero() {
startTime = a.DefaultStartTime(endTime)
} else {
startTime = startTime.UTC()
}
if startTime.After(endTime) {
return logical.ErrorResponse("start_time is later than end_time"), nil
}
results, err := a.handleQuery(ctx, startTime, endTime)
if err != nil {
return nil, err
}
if results == nil {
resp204, err := logical.RespondWithStatusCode(nil, req, http.StatusNoContent)
return resp204, err
}
return &logical.Response{
Data: results,
}, nil
}
func (b *SystemBackend) handleActivityConfigRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
a := b.Core.activityLog
if a == nil {
return logical.ErrorResponse("no activity log present"), nil
}
config, err := a.loadConfigOrDefault(ctx)
if err != nil {
return nil, err
}
qa, err := a.queriesAvailable(ctx)
if err != nil {
return nil, err
}
if config.Enabled == "default" {
config.Enabled = activityLogEnabledDefaultValue
}
return &logical.Response{
Data: map[string]interface{}{
"default_report_months": config.DefaultReportMonths,
"retention_months": config.RetentionMonths,
"enabled": config.Enabled,
"queries_available": qa,
},
}, nil
}
func (b *SystemBackend) handleActivityConfigUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
a := b.Core.activityLog
if a == nil {
return logical.ErrorResponse("no activity log present"), nil
}
config, err := a.loadConfigOrDefault(ctx)
if err != nil {
return nil, err
}
{
// Parse the default report months
if defaultReportMonthsRaw, ok := d.GetOk("default_report_months"); ok {
config.DefaultReportMonths = defaultReportMonthsRaw.(int)
}
if config.DefaultReportMonths <= 0 {
return logical.ErrorResponse("default_report_months must be greater than 0"), logical.ErrInvalidRequest
}
}
{
// Parse the retention months
if retentionMonthsRaw, ok := d.GetOk("retention_months"); ok {
config.RetentionMonths = retentionMonthsRaw.(int)
}
if config.RetentionMonths < 0 {
return logical.ErrorResponse("retention_months must be greater than or equal to 0"), logical.ErrInvalidRequest
}
}
{
// Parse the enabled setting
if enabledRaw, ok := d.GetOk("enabled"); ok {
config.Enabled = enabledRaw.(string)
}
switch config.Enabled {
case "default", "enable", "disable":
default:
return logical.ErrorResponse("enabled must be one of \"default\", \"enable\", \"disable\""), logical.ErrInvalidRequest
}
}
enabled := config.Enabled == "enable"
if !enabled && config.Enabled == "default" {
enabled = activityLogEnabledDefault
}
if enabled && config.RetentionMonths == 0 {
return logical.ErrorResponse("retention_months cannot be 0 while enabled"), logical.ErrInvalidRequest
}
// Store the config
entry, err := logical.StorageEntryJSON(path.Join(activitySubPath, activityConfigKey), config)
if err != nil {
return nil, err
}
if err := req.Storage.Put(ctx, entry); err != nil {
return nil, err
}
// Set the new config on the activity log
a.SetConfig(ctx, config)
return nil, nil
}

View File

@ -186,7 +186,7 @@ func (t *MountTable) shallowClone() *MountTable {
// setTaint is used to set the taint on given entry Accepts either the mount // setTaint is used to set the taint on given entry Accepts either the mount
// entry's path or namespace + path, i.e. <ns-path>/secret/ or <ns-path>/token/ // entry's path or namespace + path, i.e. <ns-path>/secret/ or <ns-path>/token/
func (t *MountTable) setTaint(ctx context.Context, path string, value bool) (*MountEntry, error) { func (t *MountTable) setTaint(ctx context.Context, path string, tainted bool, mountState string) (*MountEntry, error) {
n := len(t.Entries) n := len(t.Entries)
ns, err := namespace.FromContext(ctx) ns, err := namespace.FromContext(ctx)
if err != nil { if err != nil {
@ -194,7 +194,8 @@ func (t *MountTable) setTaint(ctx context.Context, path string, value bool) (*Mo
} }
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
if entry := t.Entries[i]; entry.Path == path && entry.Namespace().ID == ns.ID { if entry := t.Entries[i]; entry.Path == path && entry.Namespace().ID == ns.ID {
t.Entries[i].Tainted = value t.Entries[i].Tainted = tainted
t.Entries[i].MountState = mountState
return t.Entries[i], nil return t.Entries[i], nil
} }
} }
@ -253,6 +254,8 @@ func (t *MountTable) sortEntriesByPathDepth() *MountTable {
return t return t
} }
const mountStateUnmounting = "unmounting"
// MountEntry is used to represent a mount table entry // MountEntry is used to represent a mount table entry
type MountEntry struct { type MountEntry struct {
Table string `json:"table"` // The table it belongs to Table string `json:"table"` // The table it belongs to
@ -268,6 +271,7 @@ type MountEntry struct {
SealWrap bool `json:"seal_wrap"` // Whether to wrap CSPs SealWrap bool `json:"seal_wrap"` // Whether to wrap CSPs
ExternalEntropyAccess bool `json:"external_entropy_access,omitempty"` // Whether to allow external entropy source access ExternalEntropyAccess bool `json:"external_entropy_access,omitempty"` // Whether to allow external entropy source access
Tainted bool `json:"tainted,omitempty"` // Set as a Write-Ahead flag for unmount/remount Tainted bool `json:"tainted,omitempty"` // Set as a Write-Ahead flag for unmount/remount
MountState string `json:"mount_state,omitempty"` // The current mount state. The only non-empty mount state right now is "unmounting"
NamespaceID string `json:"namespace_id"` NamespaceID string `json:"namespace_id"`
// namespace contains the populated namespace // namespace contains the populated namespace
@ -641,7 +645,7 @@ func (c *Core) unmountInternal(ctx context.Context, path string, updateStorage b
entry := c.router.MatchingMountEntry(ctx, path) entry := c.router.MatchingMountEntry(ctx, path)
// Mark the entry as tainted // Mark the entry as tainted
if err := c.taintMountEntry(ctx, path, updateStorage); err != nil { if err := c.taintMountEntry(ctx, path, updateStorage, true); err != nil {
c.logger.Error("failed to taint mount entry for path being unmounted", "error", err, "path", path) c.logger.Error("failed to taint mount entry for path being unmounted", "error", err, "path", path)
return err return err
} }
@ -759,13 +763,18 @@ func (c *Core) removeMountEntry(ctx context.Context, path string, updateStorage
} }
// taintMountEntry is used to mark an entry in the mount table as tainted // taintMountEntry is used to mark an entry in the mount table as tainted
func (c *Core) taintMountEntry(ctx context.Context, path string, updateStorage bool) error { func (c *Core) taintMountEntry(ctx context.Context, path string, updateStorage, unmounting bool) error {
c.mountsLock.Lock() c.mountsLock.Lock()
defer c.mountsLock.Unlock() defer c.mountsLock.Unlock()
mountState := ""
if unmounting {
mountState = mountStateUnmounting
}
// As modifying the taint of an entry affects shallow clones, // As modifying the taint of an entry affects shallow clones,
// we simply use the original // we simply use the original
entry, err := c.mounts.setTaint(ctx, path, true) entry, err := c.mounts.setTaint(ctx, path, true, mountState)
if err != nil { if err != nil {
return err return err
} }
@ -860,7 +869,7 @@ func (c *Core) remount(ctx context.Context, src, dst string, updateStorage bool)
} }
// Mark the entry as tainted // Mark the entry as tainted
if err := c.taintMountEntry(ctx, src, updateStorage); err != nil { if err := c.taintMountEntry(ctx, src, updateStorage, false); err != nil {
return err return err
} }
@ -988,9 +997,38 @@ func (c *Core) loadMounts(ctx context.Context) error {
} }
} }
// Note that this is only designed to work with singletons, as it checks by // If this node is a performance standby we do not want to attempt to
// type only. // upgrade the mount table, this will be the active node's responsibility.
if !c.perfStandby {
err := c.runMountUpdates(ctx, needPersist)
if err != nil {
c.logger.Error("failed to run mount table upgrades", "error", err)
return err
}
}
for _, entry := range c.mounts.Entries {
if entry.NamespaceID == "" {
entry.NamespaceID = namespace.RootNamespaceID
}
ns, err := NamespaceByID(ctx, entry.NamespaceID, c)
if err != nil {
return err
}
if ns == nil {
return namespace.ErrNoNamespace
}
entry.namespace = ns
// Sync values to the cache
entry.SyncCache()
}
return nil
}
// Note that this is only designed to work with singletons, as it checks by
// type only.
func (c *Core) runMountUpdates(ctx context.Context, needPersist bool) error {
// Upgrade to typed mount table // Upgrade to typed mount table
if c.mounts.Type == "" { if c.mounts.Type == "" {
c.mounts.Type = mountTableType c.mounts.Type = mountTableType
@ -1022,6 +1060,10 @@ func (c *Core) loadMounts(ctx context.Context) error {
// Upgrade to table-scoped entries // Upgrade to table-scoped entries
for _, entry := range c.mounts.Entries { for _, entry := range c.mounts.Entries {
if !c.PR1103disabled && entry.Type == mountTypeNSCubbyhole && !entry.Local && !c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationDRSecondary) {
entry.Local = true
needPersist = true
}
if entry.Type == cubbyholeMountType && !entry.Local { if entry.Type == cubbyholeMountType && !entry.Local {
entry.Local = true entry.Local = true
needPersist = true needPersist = true
@ -1051,17 +1093,6 @@ func (c *Core) loadMounts(ctx context.Context) error {
entry.NamespaceID = namespace.RootNamespaceID entry.NamespaceID = namespace.RootNamespaceID
needPersist = true needPersist = true
} }
ns, err := NamespaceByID(ctx, entry.NamespaceID, c)
if err != nil {
return err
}
if ns == nil {
return namespace.ErrNoNamespace
}
entry.namespace = ns
// Sync values to the cache
entry.SyncCache()
} }
// Done if we have restored the mount table and we don't need // Done if we have restored the mount table and we don't need

View File

@ -10,6 +10,10 @@ var (
NamespaceByID func(context.Context, string, *Core) (*namespace.Namespace, error) = namespaceByID NamespaceByID func(context.Context, string, *Core) (*namespace.Namespace, error) = namespaceByID
) )
const (
mountTypeNSCubbyhole = "ns_cubbyhole"
)
func namespaceByID(ctx context.Context, nsID string, c *Core) (*namespace.Namespace, error) { func namespaceByID(ctx context.Context, nsID string, c *Core) (*namespace.Namespace, error) {
if nsID == namespace.RootNamespaceID { if nsID == namespace.RootNamespaceID {
return namespace.RootNamespace, nil return namespace.RootNamespace, nil

View File

@ -1468,12 +1468,18 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
coreConfig.CounterSyncInterval = base.CounterSyncInterval coreConfig.CounterSyncInterval = base.CounterSyncInterval
coreConfig.RecoveryMode = base.RecoveryMode coreConfig.RecoveryMode = base.RecoveryMode
coreConfig.ActivityLogConfig = base.ActivityLogConfig
testApplyEntBaseConfig(coreConfig, base) testApplyEntBaseConfig(coreConfig, base)
} }
if coreConfig.ClusterName == "" { if coreConfig.ClusterName == "" {
coreConfig.ClusterName = t.Name() coreConfig.ClusterName = t.Name()
} }
if coreConfig.ClusterName == "" {
coreConfig.ClusterName = t.Name()
}
if coreConfig.ClusterHeartbeatInterval == 0 { if coreConfig.ClusterHeartbeatInterval == 0 {
// Set this lower so that state populates quickly to standby nodes // Set this lower so that state populates quickly to standby nodes
coreConfig.ClusterHeartbeatInterval = 2 * time.Second coreConfig.ClusterHeartbeatInterval = 2 * time.Second
@ -1768,6 +1774,10 @@ func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreCo
localConfig.MetricSink, localConfig.MetricsHelper = opts.CoreMetricSinkProvider(localConfig.ClusterName) localConfig.MetricSink, localConfig.MetricsHelper = opts.CoreMetricSinkProvider(localConfig.ClusterName)
} }
if opts != nil && opts.CoreMetricSinkProvider != nil {
localConfig.MetricSink, localConfig.MetricsHelper = opts.CoreMetricSinkProvider(localConfig.ClusterName)
}
c, err := NewCore(&localConfig) c, err := NewCore(&localConfig)
if err != nil { if err != nil {
t.Fatalf("err: %v", err) t.Fatalf("err: %v", err)

View File

@ -485,7 +485,8 @@ type TokenStore struct {
parentBarrierView *BarrierView parentBarrierView *BarrierView
rolesBarrierView *BarrierView rolesBarrierView *BarrierView
expiration *ExpirationManager expiration *ExpirationManager
activityLog *ActivityLog
cubbyholeBackend *CubbyholeBackend cubbyholeBackend *CubbyholeBackend
@ -657,6 +658,12 @@ func (ts *TokenStore) SetExpirationManager(exp *ExpirationManager) {
ts.expiration = exp ts.expiration = exp
} }
// SetActivityLog injects the activity log to which all new
// token creation events are reported.
func (ts *TokenStore) SetActivityLog(a *ActivityLog) {
ts.activityLog = a
}
// SaltID is used to apply a salt and hash to an ID to make sure its not reversible // SaltID is used to apply a salt and hash to an ID to make sure its not reversible
func (ts *TokenStore) SaltID(ctx context.Context, id string) (string, error) { func (ts *TokenStore) SaltID(ctx context.Context, id string) (string, error) {
ns, err := namespace.FromContext(ctx) ns, err := namespace.FromContext(ctx)
@ -862,6 +869,11 @@ func (ts *TokenStore) create(ctx context.Context, entry *logical.TokenEntry) err
return err return err
} }
// Update the activity log
if ts.activityLog != nil {
ts.activityLog.HandleTokenCreation(entry)
}
return ts.storeCommon(ctx, entry, true) return ts.storeCommon(ctx, entry, true)
case logical.TokenTypeBatch: case logical.TokenTypeBatch:
@ -905,6 +917,11 @@ func (ts *TokenStore) create(ctx context.Context, entry *logical.TokenEntry) err
entry.ID = fmt.Sprintf("%s.%s", entry.ID, tokenNS.ID) entry.ID = fmt.Sprintf("%s.%s", entry.ID, tokenNS.ID)
} }
// Update the activity log
if ts.activityLog != nil {
ts.activityLog.HandleTokenCreation(entry)
}
return nil return nil
default: default:

View File

@ -69,5 +69,6 @@ func GetRegion(configuredRegion string) (string, error) {
if err != nil { if err != nil {
return "", errwrap.Wrapf("unable to retrieve region from instance metadata: {{err}}", err) return "", errwrap.Wrapf("unable to retrieve region from instance metadata: {{err}}", err)
} }
return region, nil return region, nil
} }

View File

@ -90,6 +90,12 @@ This endpoint lists all existing roles in the secrets engine.
| :----- | :---------------- | | :----- | :---------------- |
| `LIST` | `/transform/role` | | `LIST` | `/transform/role` |
### Parameters
- `filter` `(string: "*")`
If provided, only returns role names that match the given glob.
### Sample Request ### Sample Request
```shell-session ```shell-session