diff --git a/CHANGELOG.md b/CHANGELOG.md
index 036f8c9436..97a44ced44 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,24 @@
-## 0.7.0 (Unreleased)
+## 0.7.1 (Unreleased)
+
+IMPROVEMENTS:
+
+ * storage/s3: Support `max_parallel` option to limit concurrent outstanding
+ requests [GH-2466]
+
+BUG FIXES:
+
+ * storage/etcd3: Ensure locks are released if client is improperly shut down
+ [GH-2526]
+
+## 0.7.0 (March 21th, 2017)
+
+SECURITY:
+
+ * Common name not being validated when `exclude_cn_from_sans` option used in
+ `pki` backend: When using a role in the `pki` backend that specified the
+ `exclude_cn_from_sans` option, the common name would not then be properly
+ validated against the role's constraints. This has been fixed. We recommend
+ any users of this feature to upgrade to 0.7 as soon as feasible.
DEPRECATIONS/CHANGES:
@@ -56,6 +76,10 @@ FEATURES:
IMPROVEMENTS:
+ * api/request: Passing username and password information in API request
+ [GH-2469]
+ * audit: Logging the token's use count with authentication response and
+ logging the remaining uses of the client token with request [GH-2437]
* auth/approle: Support for restricting the number of uses on the tokens
issued [GH-2435]
* auth/aws-ec2: AWS EC2 auth backend now supports constraints for VPC ID,
@@ -66,16 +90,23 @@ IMPROVEMENTS:
* audit: Support adding a configurable prefix (such as `@cee`) before each
line [GH-2359]
* core: Canonicalize list operations to use a trailing slash [GH-2390]
+ * core: Add option to disable caching on a per-mount level [GH-2455]
+ * core: Add ability to require valid client certs in listener config [GH-2457]
* physical/dynamodb: Implement a session timeout to avoid having to use
recovery mode in the case of an unclean shutdown, which makes HA much safer
[GH-2141]
* secret/pki: O (Organization) values can now be set to role-defined values
for issued/signed certificates [GH-2369]
- * secret/pki: Certificates issued/signed from PKI backend does not generate
+ * secret/pki: Certificates issued/signed from PKI backend do not generate
leases by default [GH-2403]
* secret/pki: When using DER format, still return the private key type
[GH-2405]
+ * secret/pki: Add an intermediate to the CA chain even if it lacks an
+ authority key ID [GH-2465]
+ * secret/pki: Add role option to use CSR SANs [GH-2489]
* secret/ssh: SSH backend as CA to sign user and host certificates [GH-2208]
+ * secret/ssh: Support reading of SSH CA public key from `config/ca` endpoint
+ and also return it when CA key pair is generated [GH-2483]
BUG FIXES:
diff --git a/Makefile b/Makefile
index 732ba93a61..52ab43c42f 100644
--- a/Makefile
+++ b/Makefile
@@ -22,7 +22,7 @@ dev-dynamic: generate
# test runs the unit tests and vets the code
test: generate
- CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=10m -parallel=4
+ CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=20m -parallel=4
testcompile: generate
@for pkg in $(TEST) ; do \
diff --git a/README.md b/README.md
index bf40cb75f2..61b2bb429e 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@ Vault [](https://travi
- Announcement list: [Google Groups](https://groups.google.com/group/hashicorp-announce)
- Discussion list: [Google Groups](https://groups.google.com/group/vault-tool)
-
+
Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log.
diff --git a/api/client.go b/api/client.go
index f23e1009d3..5f8a6f61db 100644
--- a/api/client.go
+++ b/api/client.go
@@ -333,6 +333,7 @@ func (c *Client) NewRequest(method, path string) *Request {
req := &Request{
Method: method,
URL: &url.URL{
+ User: c.addr.User,
Scheme: c.addr.Scheme,
Host: c.addr.Host,
Path: path,
diff --git a/api/request.go b/api/request.go
index 8f22dd5725..685e2d7e44 100644
--- a/api/request.go
+++ b/api/request.go
@@ -55,6 +55,7 @@ func (r *Request) ToHTTP() (*http.Request, error) {
return nil, err
}
+ req.URL.User = r.URL.User
req.URL.Scheme = r.URL.Scheme
req.URL.Host = r.URL.Host
req.Host = r.URL.Host
diff --git a/api/sys_mounts.go b/api/sys_mounts.go
index 768e09fd61..907fddb704 100644
--- a/api/sys_mounts.go
+++ b/api/sys_mounts.go
@@ -129,6 +129,7 @@ type MountInput struct {
type MountConfigInput struct {
DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
}
type MountOutput struct {
@@ -139,6 +140,7 @@ type MountOutput struct {
}
type MountConfigOutput struct {
- DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
}
diff --git a/audit/format.go b/audit/format.go
index 86b00f8a25..919da125e4 100644
--- a/audit/format.go
+++ b/audit/format.go
@@ -102,9 +102,10 @@ func (f *AuditFormatter) FormatRequest(
Error: errString,
Auth: AuditAuth{
- DisplayName: auth.DisplayName,
- Policies: auth.Policies,
- Metadata: auth.Metadata,
+ DisplayName: auth.DisplayName,
+ Policies: auth.Policies,
+ Metadata: auth.Metadata,
+ RemainingUses: req.ClientTokenRemainingUses,
},
Request: AuditRequest{
@@ -255,6 +256,7 @@ func (f *AuditFormatter) FormatResponse(
DisplayName: resp.Auth.DisplayName,
Policies: resp.Auth.Policies,
Metadata: resp.Auth.Metadata,
+ NumUses: resp.Auth.NumUses,
}
}
@@ -362,11 +364,13 @@ type AuditResponse struct {
}
type AuditAuth struct {
- ClientToken string `json:"client_token"`
- Accessor string `json:"accessor"`
- DisplayName string `json:"display_name"`
- Policies []string `json:"policies"`
- Metadata map[string]string `json:"metadata"`
+ ClientToken string `json:"client_token"`
+ Accessor string `json:"accessor"`
+ DisplayName string `json:"display_name"`
+ Policies []string `json:"policies"`
+ Metadata map[string]string `json:"metadata"`
+ NumUses int `json:"num_uses,omitempty"`
+ RemainingUses int `json:"remaining_uses,omitempty"`
}
type AuditSecret struct {
diff --git a/builtin/audit/socket/backend.go b/builtin/audit/socket/backend.go
index e987d33a94..91e701ed03 100644
--- a/builtin/audit/socket/backend.go
+++ b/builtin/audit/socket/backend.go
@@ -10,7 +10,7 @@ import (
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/duration"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/logical"
)
@@ -33,7 +33,7 @@ func Factory(conf *audit.BackendConfig) (audit.Backend, error) {
if !ok {
writeDeadline = "2s"
}
- writeDuration, err := duration.ParseDurationSecond(writeDeadline)
+ writeDuration, err := parseutil.ParseDurationSecond(writeDeadline)
if err != nil {
return nil, err
}
diff --git a/builtin/credential/approle/backend.go b/builtin/credential/approle/backend.go
index d40b75ebff..cd5d97b69d 100644
--- a/builtin/credential/approle/backend.go
+++ b/builtin/credential/approle/backend.go
@@ -1,7 +1,6 @@
package approle
import (
- "fmt"
"sync"
"github.com/hashicorp/vault/helper/locksutil"
@@ -23,28 +22,26 @@ type backend struct {
// Guard to clean-up the expired SecretID entries
tidySecretIDCASGuard uint32
- // Map of locks to make changes to role entries. These will be
- // initialized to a predefined number of locks when the backend is
- // created, and will be indexed based on salted role names.
- roleLocksMap map[string]*sync.RWMutex
-
- // Map of locks to make changes to the storage entries of RoleIDs
- // generated. These will be initialized to a predefined number of locks
- // when the backend is created, and will be indexed based on the salted
- // RoleIDs.
- roleIDLocksMap map[string]*sync.RWMutex
-
- // Map of locks to make changes to the storage entries of SecretIDs
- // generated. These will be initialized to a predefined number of locks
- // when the backend is created, and will be indexed based on the HMAC-ed
- // SecretIDs.
- secretIDLocksMap map[string]*sync.RWMutex
-
- // Map of locks to make changes to the storage entries of
- // SecretIDAccessors generated. These will be initialized to a
+ // Locks to make changes to role entries. These will be initialized to a
// predefined number of locks when the backend is created, and will be
- // indexed based on the SecretIDAccessors itself.
- secretIDAccessorLocksMap map[string]*sync.RWMutex
+ // indexed based on salted role names.
+ roleLocks []*locksutil.LockEntry
+
+ // Locks to make changes to the storage entries of RoleIDs generated. These
+ // will be initialized to a predefined number of locks when the backend is
+ // created, and will be indexed based on the salted RoleIDs.
+ roleIDLocks []*locksutil.LockEntry
+
+ // Locks to make changes to the storage entries of SecretIDs generated.
+ // These will be initialized to a predefined number of locks when the
+ // backend is created, and will be indexed based on the HMAC-ed SecretIDs.
+ secretIDLocks []*locksutil.LockEntry
+
+ // Locks to make changes to the storage entries of SecretIDAccessors
+ // generated. These will be initialized to a predefined number of locks
+ // when the backend is created, and will be indexed based on the
+ // SecretIDAccessors itself.
+ secretIDAccessorLocks []*locksutil.LockEntry
// secretIDListingLock is a dedicated lock for listing SecretIDAccessors
// for all the SecretIDs issued against an approle
@@ -64,49 +61,19 @@ func Backend(conf *logical.BackendConfig) (*backend, error) {
b := &backend{
view: conf.StorageView,
- // Create the map of locks to modify the registered roles
- roleLocksMap: make(map[string]*sync.RWMutex, 257),
+ // Create locks to modify the registered roles
+ roleLocks: locksutil.CreateLocks(),
- // Create the map of locks to modify the generated RoleIDs
- roleIDLocksMap: make(map[string]*sync.RWMutex, 257),
+ // Create locks to modify the generated RoleIDs
+ roleIDLocks: locksutil.CreateLocks(),
- // Create the map of locks to modify the generated SecretIDs
- secretIDLocksMap: make(map[string]*sync.RWMutex, 257),
+ // Create locks to modify the generated SecretIDs
+ secretIDLocks: locksutil.CreateLocks(),
- // Create the map of locks to modify the generated SecretIDAccessors
- secretIDAccessorLocksMap: make(map[string]*sync.RWMutex, 257),
+ // Create locks to modify the generated SecretIDAccessors
+ secretIDAccessorLocks: locksutil.CreateLocks(),
}
- var err error
-
- // Create 256 locks each for managing RoleID and SecretIDs. This will avoid
- // a superfluous number of locks directly proportional to the number of RoleID
- // and SecretIDs. These locks can be accessed by indexing based on the first two
- // characters of a randomly generated UUID.
- if err = locksutil.CreateLocks(b.roleLocksMap, 256); err != nil {
- return nil, fmt.Errorf("failed to create role locks: %v", err)
- }
-
- if err = locksutil.CreateLocks(b.roleIDLocksMap, 256); err != nil {
- return nil, fmt.Errorf("failed to create role ID locks: %v", err)
- }
-
- if err = locksutil.CreateLocks(b.secretIDLocksMap, 256); err != nil {
- return nil, fmt.Errorf("failed to create secret ID locks: %v", err)
- }
-
- if err = locksutil.CreateLocks(b.secretIDAccessorLocksMap, 256); err != nil {
- return nil, fmt.Errorf("failed to create secret ID accessor locks: %v", err)
- }
-
- // Have an extra lock to use in case the indexing does not result in a lock.
- // This happens if the indexing value is not beginning with hex characters.
- // These locks can be used for listing purposes as well.
- b.roleLocksMap["custom"] = &sync.RWMutex{}
- b.roleIDLocksMap["custom"] = &sync.RWMutex{}
- b.secretIDLocksMap["custom"] = &sync.RWMutex{}
- b.secretIDAccessorLocksMap["custom"] = &sync.RWMutex{}
-
// Attach the paths and secrets that are to be handled by the backend
b.Backend = &framework.Backend{
// Register a periodic function that deletes the expired SecretID entries
diff --git a/builtin/credential/approle/path_role.go b/builtin/credential/approle/path_role.go
index 20b9a13d43..e615234d94 100644
--- a/builtin/credential/approle/path_role.go
+++ b/builtin/credential/approle/path_role.go
@@ -3,12 +3,12 @@ package approle
import (
"fmt"
"strings"
- "sync"
"time"
"github.com/fatih/structs"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/cidrutil"
+ "github.com/hashicorp/vault/helper/locksutil"
"github.com/hashicorp/vault/helper/policyutil"
"github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/logical"
@@ -518,7 +518,6 @@ func (b *backend) pathRoleExistenceCheck(req *logical.Request, data *framework.F
// pathRoleList is used to list all the Roles registered with the backend.
func (b *backend) pathRoleList(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // This will return the "custom" lock
lock := b.roleLock("")
lock.RLock()
@@ -1926,39 +1925,12 @@ func (b *backend) handleRoleSecretIDCommon(req *logical.Request, data *framework
}, nil
}
-// roleIDLock is used to get a lock from the pre-initialized map
-// of locks. Map is indexed based on the first 2 characters of the
-// RoleID, which is a random UUID. If the input is not hex encoded
-// or if it is empty a "custom" lock will be returned.
-func (b *backend) roleIDLock(roleID string) *sync.RWMutex {
- var lock *sync.RWMutex
- var ok bool
- if len(roleID) >= 2 {
- lock, ok = b.roleIDLocksMap[roleID[0:2]]
- }
- if !ok || lock == nil {
- lock = b.roleIDLocksMap["custom"]
- }
- return lock
+func (b *backend) roleIDLock(roleID string) *locksutil.LockEntry {
+ return locksutil.LockForKey(b.roleIDLocks, roleID)
}
-// roleLock is used to get a lock from the pre-initialized map of locks. Map is
-// indexed based on the first 2 characters of the salted role name, which is a
-// random UUID. If the input is empty, a "custom" lock will be returned.
-func (b *backend) roleLock(roleName string) *sync.RWMutex {
- var lock *sync.RWMutex
- var ok bool
-
- // Salting is used to induce randomness so that roles starting with
- // similar characters will likely end up having different locks
- index := b.salt.SaltID(roleName)
- if len(index) >= 2 {
- lock, ok = b.roleLocksMap[index[0:2]]
- }
- if !ok || lock == nil {
- lock = b.roleLocksMap["custom"]
- }
- return lock
+func (b *backend) roleLock(roleName string) *locksutil.LockEntry {
+ return locksutil.LockForKey(b.roleLocks, roleName)
}
// setRoleIDEntry creates a storage entry that maps RoleID to Role
diff --git a/builtin/credential/approle/validation.go b/builtin/credential/approle/validation.go
index b4d3af154f..87528fb3da 100644
--- a/builtin/credential/approle/validation.go
+++ b/builtin/credential/approle/validation.go
@@ -6,11 +6,11 @@ import (
"encoding/hex"
"fmt"
"strings"
- "sync"
"time"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/cidrutil"
+ "github.com/hashicorp/vault/helper/locksutil"
"github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
@@ -299,39 +299,12 @@ func createHMAC(key, value string) (string, error) {
return hex.EncodeToString(hm.Sum(nil)), nil
}
-// secretIDLock is used to get a lock from the pre-initialized map of locks.
-// Map is indexed based on the first 2 characters of the secretIDHMAC. If the
-// input is not hex encoded or if empty, a "custom" lock will be returned.
-func (b *backend) secretIDLock(secretIDHMAC string) *sync.RWMutex {
- var lock *sync.RWMutex
- var ok bool
- if len(secretIDHMAC) >= 2 {
- lock, ok = b.secretIDLocksMap[secretIDHMAC[0:2]]
- }
- if !ok || lock == nil {
- // Fall back for custom lock to make sure that this method
- // never returns nil
- lock = b.secretIDLocksMap["custom"]
- }
- return lock
+func (b *backend) secretIDLock(secretIDHMAC string) *locksutil.LockEntry {
+ return locksutil.LockForKey(b.secretIDLocks, secretIDHMAC)
}
-// secretIDAccessorLock is used to get a lock from the pre-initialized map
-// of locks. Map is indexed based on the first 2 characters of the
-// secretIDAccessor. If the input is not hex encoded or if empty, a "custom"
-// lock will be returned.
-func (b *backend) secretIDAccessorLock(secretIDAccessor string) *sync.RWMutex {
- var lock *sync.RWMutex
- var ok bool
- if len(secretIDAccessor) >= 2 {
- lock, ok = b.secretIDAccessorLocksMap[secretIDAccessor[0:2]]
- }
- if !ok || lock == nil {
- // Fall back for custom lock to make sure that this method
- // never returns nil
- lock = b.secretIDAccessorLocksMap["custom"]
- }
- return lock
+func (b *backend) secretIDAccessorLock(secretIDAccessor string) *locksutil.LockEntry {
+ return locksutil.LockForKey(b.secretIDAccessorLocks, secretIDAccessor)
}
// nonLockedSecretIDStorageEntry fetches the secret ID properties from physical
diff --git a/builtin/logical/pki/cert_util.go b/builtin/logical/pki/cert_util.go
index 212cc57555..36d284c48c 100644
--- a/builtin/logical/pki/cert_util.go
+++ b/builtin/logical/pki/cert_util.go
@@ -66,8 +66,10 @@ func (b *caInfoBundle) GetCAChain() []*certutil.CertBlock {
chain := []*certutil.CertBlock{}
// Include issuing CA in Chain, not including Root Authority
- if len(b.Certificate.AuthorityKeyId) > 0 &&
- !bytes.Equal(b.Certificate.AuthorityKeyId, b.Certificate.SubjectKeyId) {
+ if (len(b.Certificate.AuthorityKeyId) > 0 &&
+ !bytes.Equal(b.Certificate.AuthorityKeyId, b.Certificate.SubjectKeyId)) ||
+ (len(b.Certificate.AuthorityKeyId) == 0 &&
+ !bytes.Equal(b.Certificate.RawIssuer, b.Certificate.RawSubject)) {
chain = append(chain, &certutil.CertBlock{
Certificate: b.Certificate,
@@ -215,7 +217,7 @@ func fetchCertBySerial(req *logical.Request, prefix, serial string) (*logical.St
// Given a set of requested names for a certificate, verifies that all of them
// match the various toggles set in the role for controlling issuance.
// If one does not pass, it is returned in the string argument.
-func validateNames(req *logical.Request, names []string, role *roleEntry) (string, error) {
+func validateNames(req *logical.Request, names []string, role *roleEntry) string {
for _, name := range names {
sanitizedName := name
emailDomain := name
@@ -231,7 +233,7 @@ func validateNames(req *logical.Request, names []string, role *roleEntry) (strin
if strings.Contains(name, "@") {
splitEmail := strings.Split(name, "@")
if len(splitEmail) != 2 {
- return name, nil
+ return name
}
sanitizedName = splitEmail[1]
emailDomain = splitEmail[1]
@@ -248,7 +250,7 @@ func validateNames(req *logical.Request, names []string, role *roleEntry) (strin
// Email addresses using wildcard domain names do not make sense
if isEmail && isWildcard {
- return name, nil
+ return name
}
// AllowAnyName is checked after this because EnforceHostnames still
@@ -257,7 +259,7 @@ func validateNames(req *logical.Request, names []string, role *roleEntry) (strin
// wildcard prefix.
if role.EnforceHostnames {
if !hostnameRegex.MatchString(sanitizedName) {
- return name, nil
+ return name
}
}
@@ -366,10 +368,10 @@ func validateNames(req *logical.Request, names []string, role *roleEntry) (strin
}
//panic(fmt.Sprintf("\nName is %s\nRole is\n%#v\n", name, role))
- return name, nil
+ return name
}
- return "", nil
+ return ""
}
func generateCert(b *backend,
@@ -558,13 +560,13 @@ func generateCreationBundle(b *backend,
var err error
var ok bool
- // Get the common name
+ // Read in names -- CN, DNS and email addresses
var cn string
+ dnsNames := []string{}
+ emailAddresses := []string{}
{
- if csr != nil {
- if role.UseCSRCommonName {
- cn = csr.Subject.CommonName
- }
+ if csr != nil && role.UseCSRCommonName {
+ cn = csr.Subject.CommonName
}
if cn == "" {
cn = data.Get("common_name").(string)
@@ -572,6 +574,91 @@ func generateCreationBundle(b *backend,
return nil, errutil.UserError{Err: `the common_name field is required, or must be provided in a CSR with "use_csr_common_name" set to true`}
}
}
+
+ if csr != nil && role.UseCSRSANs {
+ dnsNames = csr.DNSNames
+ emailAddresses = csr.EmailAddresses
+ }
+
+ if !data.Get("exclude_cn_from_sans").(bool) {
+ if strings.Contains(cn, "@") {
+ // Note: emails are not disallowed if the role's email protection
+ // flag is false, because they may well be included for
+ // informational purposes; it is up to the verifying party to
+ // ensure that email addresses in a subject alternate name can be
+ // used for the purpose for which they are presented
+ emailAddresses = append(emailAddresses, cn)
+ } else {
+ dnsNames = append(dnsNames, cn)
+ }
+ }
+
+ if csr == nil || !role.UseCSRSANs {
+ cnAltRaw, ok := data.GetOk("alt_names")
+ if ok {
+ cnAlt := strutil.ParseDedupAndSortStrings(cnAltRaw.(string), ",")
+ for _, v := range cnAlt {
+ if strings.Contains(v, "@") {
+ emailAddresses = append(emailAddresses, v)
+ } else {
+ dnsNames = append(dnsNames, v)
+ }
+ }
+ }
+ }
+
+ // Check the CN. This ensures that the CN is checked even if it's
+ // excluded from SANs.
+ badName := validateNames(req, []string{cn}, role)
+ if len(badName) != 0 {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "common name %s not allowed by this role", badName)}
+ }
+
+ // Check for bad email and/or DNS names
+ badName = validateNames(req, dnsNames, role)
+ if len(badName) != 0 {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "subject alternate name %s not allowed by this role", badName)}
+ }
+
+ badName = validateNames(req, emailAddresses, role)
+ if len(badName) != 0 {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "email address %s not allowed by this role", badName)}
+ }
+ }
+
+ // Get and verify any IP SANs
+ ipAddresses := []net.IP{}
+ var ipAltInt interface{}
+ {
+ if csr != nil && role.UseCSRSANs {
+ if !role.AllowIPSANs {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "IP Subject Alternative Names are not allowed in this role, but was provided some via CSR")}
+ }
+ ipAddresses = csr.IPAddresses
+ } else {
+ ipAltInt, ok = data.GetOk("ip_sans")
+ if ok {
+ ipAlt := ipAltInt.(string)
+ if len(ipAlt) != 0 {
+ if !role.AllowIPSANs {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "IP Subject Alternative Names are not allowed in this role, but was provided %s", ipAlt)}
+ }
+ for _, v := range strings.Split(ipAlt, ",") {
+ parsedIP := net.ParseIP(v)
+ if parsedIP == nil {
+ return nil, errutil.UserError{Err: fmt.Sprintf(
+ "the value '%s' is not a valid IP address", v)}
+ }
+ ipAddresses = append(ipAddresses, parsedIP)
+ }
+ }
+ }
+ }
}
// Set OU (organizationalUnit) values if specified in the role
@@ -590,80 +677,6 @@ func generateCreationBundle(b *backend,
}
}
- // Read in alternate names -- DNS and email addresses
- dnsNames := []string{}
- emailAddresses := []string{}
- {
- if !data.Get("exclude_cn_from_sans").(bool) {
- if strings.Contains(cn, "@") {
- // Note: emails are not disallowed if the role's email protection
- // flag is false, because they may well be included for
- // informational purposes; it is up to the verifying party to
- // ensure that email addresses in a subject alternate name can be
- // used for the purpose for which they are presented
- emailAddresses = append(emailAddresses, cn)
- } else {
- dnsNames = append(dnsNames, cn)
- }
- }
- cnAltInt, ok := data.GetOk("alt_names")
- if ok {
- cnAlt := cnAltInt.(string)
- if len(cnAlt) != 0 {
- for _, v := range strings.Split(cnAlt, ",") {
- if strings.Contains(v, "@") {
- emailAddresses = append(emailAddresses, v)
- } else {
- dnsNames = append(dnsNames, v)
- }
- }
- }
- }
-
- // Check for bad email and/or DNS names
- badName, err := validateNames(req, dnsNames, role)
- if len(badName) != 0 {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "name %s not allowed by this role", badName)}
- } else if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf(
- "error validating name %s: %s", badName, err)}
- }
-
- badName, err = validateNames(req, emailAddresses, role)
- if len(badName) != 0 {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "email %s not allowed by this role", badName)}
- } else if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf(
- "error validating name %s: %s", badName, err)}
- }
- }
-
- // Get and verify any IP SANs
- ipAddresses := []net.IP{}
- var ipAltInt interface{}
- {
- ipAltInt, ok = data.GetOk("ip_sans")
- if ok {
- ipAlt := ipAltInt.(string)
- if len(ipAlt) != 0 {
- if !role.AllowIPSANs {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "IP Subject Alternative Names are not allowed in this role, but was provided %s", ipAlt)}
- }
- for _, v := range strings.Split(ipAlt, ",") {
- parsedIP := net.ParseIP(v)
- if parsedIP == nil {
- return nil, errutil.UserError{Err: fmt.Sprintf(
- "the value '%s' is not a valid IP address", v)}
- }
- ipAddresses = append(ipAddresses, parsedIP)
- }
- }
- }
- }
-
// Get the TTL and very it against the max allowed
var ttlField string
var ttl time.Duration
diff --git a/builtin/logical/pki/path_issue_sign.go b/builtin/logical/pki/path_issue_sign.go
index fc96997f25..803ef7f385 100644
--- a/builtin/logical/pki/path_issue_sign.go
+++ b/builtin/logical/pki/path_issue_sign.go
@@ -125,6 +125,7 @@ func (b *backend) pathSignVerbatim(
EnforceHostnames: false,
KeyType: "any",
UseCSRCommonName: true,
+ UseCSRSANs: true,
}
return b.pathIssueSignCert(req, data, role, true, true)
diff --git a/builtin/logical/pki/path_roles.go b/builtin/logical/pki/path_roles.go
index 1b5fde60db..0eadc6ff29 100644
--- a/builtin/logical/pki/path_roles.go
+++ b/builtin/logical/pki/path_roles.go
@@ -7,7 +7,7 @@ import (
"time"
"github.com/fatih/structs"
- "github.com/hashicorp/vault/helper/duration"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
@@ -169,6 +169,14 @@ does *not* include any requested Subject Alternative
Names. Defaults to true.`,
},
+ "use_csr_sans": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Default: true,
+ Description: `If set, when used with a signing profile,
+the SANs in the CSR will be used. This does *not*
+include the Common Name (cn). Defaults to true.`,
+ },
+
"ou": &framework.FieldSchema{
Type: framework.TypeString,
Default: "",
@@ -371,6 +379,7 @@ func (b *backend) pathRoleCreate(
KeyType: data.Get("key_type").(string),
KeyBits: data.Get("key_bits").(int),
UseCSRCommonName: data.Get("use_csr_common_name").(bool),
+ UseCSRSANs: data.Get("use_csr_sans").(bool),
KeyUsage: data.Get("key_usage").(string),
OU: data.Get("ou").(string),
Organization: data.Get("organization").(string),
@@ -388,7 +397,7 @@ func (b *backend) pathRoleCreate(
if len(entry.MaxTTL) == 0 {
maxTTL = maxSystemTTL
} else {
- maxTTL, err = duration.ParseDurationSecond(entry.MaxTTL)
+ maxTTL, err = parseutil.ParseDurationSecond(entry.MaxTTL)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Invalid max ttl: %s", err)), nil
@@ -400,7 +409,7 @@ func (b *backend) pathRoleCreate(
ttl := b.System().DefaultLeaseTTL()
if len(entry.TTL) != 0 {
- ttl, err = duration.ParseDurationSecond(entry.TTL)
+ ttl, err = parseutil.ParseDurationSecond(entry.TTL)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Invalid ttl: %s", err)), nil
@@ -487,6 +496,7 @@ type roleEntry struct {
CodeSigningFlag bool `json:"code_signing_flag" structs:"code_signing_flag" mapstructure:"code_signing_flag"`
EmailProtectionFlag bool `json:"email_protection_flag" structs:"email_protection_flag" mapstructure:"email_protection_flag"`
UseCSRCommonName bool `json:"use_csr_common_name" structs:"use_csr_common_name" mapstructure:"use_csr_common_name"`
+ UseCSRSANs bool `json:"use_csr_sans" structs:"use_csr_sans" mapstructure:"use_csr_sans"`
KeyType string `json:"key_type" structs:"key_type" mapstructure:"key_type"`
KeyBits int `json:"key_bits" structs:"key_bits" mapstructure:"key_bits"`
MaxPathLength *int `json:",omitempty" structs:"max_path_length,omitempty" mapstructure:"max_path_length"`
diff --git a/builtin/logical/ssh/backend_test.go b/builtin/logical/ssh/backend_test.go
index 37a348489e..538455c728 100644
--- a/builtin/logical/ssh/backend_test.go
+++ b/builtin/logical/ssh/backend_test.go
@@ -587,7 +587,7 @@ func TestBackend_ValidPrincipalsValidatedForHostCertificates(t *testing.T) {
},
}),
- signCertificateStep("testing", "root", ssh.HostCert, []string{"dummy.example.org", "second.example.com"}, map[string]string{
+ signCertificateStep("testing", "vault-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.HostCert, []string{"dummy.example.org", "second.example.com"}, map[string]string{
"option": "value",
}, map[string]string{
"extension": "extended",
@@ -632,7 +632,7 @@ func TestBackend_OptionsOverrideDefaults(t *testing.T) {
},
}),
- signCertificateStep("testing", "root", ssh.UserCert, []string{"tuber"}, map[string]string{
+ signCertificateStep("testing", "vault-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.UserCert, []string{"tuber"}, map[string]string{
"secondary": "value",
}, map[string]string{
"additional": "value",
@@ -709,7 +709,7 @@ func validateSSHCertificate(cert *ssh.Certificate, keyId string, certType int, v
ttl time.Duration) error {
if cert.KeyId != keyId {
- return fmt.Errorf("Incorrect KeyId: %v", cert.KeyId)
+ return fmt.Errorf("Incorrect KeyId: %v, wanted %v", cert.KeyId, keyId)
}
if cert.CertType != uint32(certType) {
diff --git a/builtin/logical/ssh/path_config_ca.go b/builtin/logical/ssh/path_config_ca.go
index 64712d94d6..37300b77d2 100644
--- a/builtin/logical/ssh/path_config_ca.go
+++ b/builtin/logical/ssh/path_config_ca.go
@@ -7,11 +7,25 @@ import (
"encoding/pem"
"fmt"
+ multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
"golang.org/x/crypto/ssh"
)
+const (
+ caPublicKey = "ca_public_key"
+ caPrivateKey = "ca_private_key"
+ caPublicKeyStoragePath = "config/ca_public_key"
+ caPublicKeyStoragePathDeprecated = "public_key"
+ caPrivateKeyStoragePath = "config/ca_private_key"
+ caPrivateKeyStoragePathDeprecated = "config/ca_bundle"
+)
+
+type keyStorageEntry struct {
+ Key string `json:"key" structs:"key" mapstructure:"key"`
+}
+
func pathConfigCA(b *backend) *framework.Path {
return &framework.Path{
Pattern: "config/ca",
@@ -34,27 +48,102 @@ func pathConfigCA(b *backend) *framework.Path {
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathConfigCAUpdate,
logical.DeleteOperation: b.pathConfigCADelete,
+ logical.ReadOperation: b.pathConfigCARead,
},
HelpSynopsis: `Set the SSH private key used for signing certificates.`,
HelpDescription: `This sets the CA information used for certificates generated by this
by this mount. The fields must be in the standard private and public SSH format.
-For security reasons, the private key cannot be retrieved later.`,
+For security reasons, the private key cannot be retrieved later.
+
+Read operations will return the public key, if already stored/generated.`,
}
}
+func (b *backend) pathConfigCARead(
+ req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
+ publicKeyEntry, err := caKey(req.Storage, caPublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read CA public key: %v", err)
+ }
+
+ if publicKeyEntry == nil {
+ return logical.ErrorResponse("keys haven't been configured yet"), nil
+ }
+
+ response := &logical.Response{
+ Data: map[string]interface{}{
+ "public_key": publicKeyEntry.Key,
+ },
+ }
+
+ return response, nil
+}
+
func (b *backend) pathConfigCADelete(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if err := req.Storage.Delete("config/ca_bundle"); err != nil {
+ if err := req.Storage.Delete(caPrivateKeyStoragePath); err != nil {
return nil, err
}
- if err := req.Storage.Delete("config/ca_public_key"); err != nil {
+ if err := req.Storage.Delete(caPublicKeyStoragePath); err != nil {
return nil, err
}
return nil, nil
}
+func caKey(storage logical.Storage, keyType string) (*keyStorageEntry, error) {
+ var path, deprecatedPath string
+ switch keyType {
+ case caPrivateKey:
+ path = caPrivateKeyStoragePath
+ deprecatedPath = caPrivateKeyStoragePathDeprecated
+ case caPublicKey:
+ path = caPublicKeyStoragePath
+ deprecatedPath = caPublicKeyStoragePathDeprecated
+ default:
+ return nil, fmt.Errorf("unrecognized key type %q", keyType)
+ }
+
+ entry, err := storage.Get(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read CA key of type %q: %v", keyType, err)
+ }
+
+ if entry == nil {
+ // If the entry is not found, look at an older path. If found, upgrade
+ // it.
+ entry, err = storage.Get(deprecatedPath)
+ if err != nil {
+ return nil, err
+ }
+ if entry != nil {
+ entry, err = logical.StorageEntryJSON(path, keyStorageEntry{
+ Key: string(entry.Value),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if err := storage.Put(entry); err != nil {
+ return nil, err
+ }
+ if err = storage.Delete(deprecatedPath); err != nil {
+ return nil, err
+ }
+ }
+ }
+ if entry == nil {
+ return nil, nil
+ }
+
+ var keyEntry keyStorageEntry
+ if err := entry.DecodeJSON(&keyEntry); err != nil {
+ return nil, err
+ }
+
+ return &keyEntry, nil
+}
+
func (b *backend) pathConfigCAUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
var err error
publicKey := data.Get("public_key").(string)
@@ -112,39 +201,68 @@ func (b *backend) pathConfigCAUpdate(req *logical.Request, data *framework.Field
return nil, fmt.Errorf("failed to generate or parse the keys")
}
- publicKeyEntry, err := req.Storage.Get("config/ca_public_key")
+ publicKeyEntry, err := caKey(req.Storage, caPublicKey)
if err != nil {
- return nil, fmt.Errorf("failed while reading ca_public_key: %v", err)
+ return nil, fmt.Errorf("failed to read CA public key: %v", err)
}
- privateKeyEntry, err := req.Storage.Get("config/ca_bundle")
+ privateKeyEntry, err := caKey(req.Storage, caPrivateKey)
if err != nil {
- return nil, fmt.Errorf("failed while reading ca_bundle: %v", err)
+ return nil, fmt.Errorf("failed to read CA private key: %v", err)
}
- if publicKeyEntry != nil || privateKeyEntry != nil {
+ if (publicKeyEntry != nil && publicKeyEntry.Key != "") || (privateKeyEntry != nil && privateKeyEntry.Key != "") {
return nil, fmt.Errorf("keys are already configured; delete them before reconfiguring")
}
- err = req.Storage.Put(&logical.StorageEntry{
- Key: "config/ca_public_key",
- Value: []byte(publicKey),
+ entry, err := logical.StorageEntryJSON(caPublicKeyStoragePath, &keyStorageEntry{
+ Key: publicKey,
})
if err != nil {
return nil, err
}
- bundle := signingBundle{
- Certificate: privateKey,
- }
-
- entry, err := logical.StorageEntryJSON("config/ca_bundle", bundle)
+ // Save the public key
+ err = req.Storage.Put(entry)
if err != nil {
return nil, err
}
+ entry, err = logical.StorageEntryJSON(caPrivateKeyStoragePath, &keyStorageEntry{
+ Key: privateKey,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Save the private key
err = req.Storage.Put(entry)
- return nil, err
+ if err != nil {
+ var mErr *multierror.Error
+
+ mErr = multierror.Append(mErr, fmt.Errorf("failed to store CA private key: %v", err))
+
+ // If storing private key fails, the corresponding public key should be
+ // removed
+ if delErr := req.Storage.Delete(caPublicKeyStoragePath); delErr != nil {
+ mErr = multierror.Append(mErr, fmt.Errorf("failed to cleanup CA public key: %v", delErr))
+ return nil, mErr
+ }
+
+ return nil, err
+ }
+
+ if generateSigningKey {
+ response := &logical.Response{
+ Data: map[string]interface{}{
+ "public_key": publicKey,
+ },
+ }
+
+ return response, nil
+ }
+
+ return nil, nil
}
func generateSSHKeyPair() (string, string, error) {
diff --git a/builtin/logical/ssh/path_config_ca_test.go b/builtin/logical/ssh/path_config_ca_test.go
index a8089b1785..cc0b17b7de 100644
--- a/builtin/logical/ssh/path_config_ca_test.go
+++ b/builtin/logical/ssh/path_config_ca_test.go
@@ -6,6 +6,91 @@ import (
"github.com/hashicorp/vault/logical"
)
+func TestSSH_ConfigCAStorageUpgrade(t *testing.T) {
+ var err error
+
+ config := logical.TestBackendConfig()
+ config.StorageView = &logical.InmemStorage{}
+
+ b, err := Backend(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = b.Setup(config)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Store at an older path
+ err = config.StorageView.Put(&logical.StorageEntry{
+ Key: caPrivateKeyStoragePathDeprecated,
+ Value: []byte(privateKey),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Reading it should return the key as well as upgrade the storage path
+ privateKeyEntry, err := caKey(config.StorageView, caPrivateKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if privateKeyEntry == nil || privateKeyEntry.Key == "" {
+ t.Fatalf("failed to read the stored private key")
+ }
+
+ entry, err := config.StorageView.Get(caPrivateKeyStoragePathDeprecated)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry != nil {
+ t.Fatalf("bad: expected a nil entry after upgrade")
+ }
+
+ entry, err = config.StorageView.Get(caPrivateKeyStoragePath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ t.Fatalf("bad: expected a non-nil entry after upgrade")
+ }
+
+ // Store at an older path
+ err = config.StorageView.Put(&logical.StorageEntry{
+ Key: caPublicKeyStoragePathDeprecated,
+ Value: []byte(publicKey),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Reading it should return the key as well as upgrade the storage path
+ publicKeyEntry, err := caKey(config.StorageView, caPublicKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if publicKeyEntry == nil || publicKeyEntry.Key == "" {
+ t.Fatalf("failed to read the stored public key")
+ }
+
+ entry, err = config.StorageView.Get(caPublicKeyStoragePathDeprecated)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry != nil {
+ t.Fatalf("bad: expected a nil entry after upgrade")
+ }
+
+ entry, err = config.StorageView.Get(caPublicKeyStoragePath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ t.Fatalf("bad: expected a non-nil entry after upgrade")
+ }
+}
+
func TestSSH_ConfigCAUpdateDelete(t *testing.T) {
var resp *logical.Response
var err error
diff --git a/builtin/logical/ssh/path_fetch.go b/builtin/logical/ssh/path_fetch.go
index 21235ebd8c..1b59794899 100644
--- a/builtin/logical/ssh/path_fetch.go
+++ b/builtin/logical/ssh/path_fetch.go
@@ -19,19 +19,18 @@ func pathFetchPublicKey(b *backend) *framework.Path {
}
func (b *backend) pathFetchPublicKey(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- entry, err := req.Storage.Get("config/ca_public_key")
+ publicKeyEntry, err := caKey(req.Storage, caPublicKey)
if err != nil {
return nil, err
}
-
- if entry == nil {
+ if publicKeyEntry == nil || publicKeyEntry.Key == "" {
return nil, nil
}
response := &logical.Response{
Data: map[string]interface{}{
logical.HTTPContentType: "text/plain",
- logical.HTTPRawBody: entry.Value,
+ logical.HTTPRawBody: []byte(publicKeyEntry.Key),
logical.HTTPStatusCode: 200,
},
}
diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go
index 1591bf52e1..dfc2ffdb16 100644
--- a/builtin/logical/ssh/path_roles.go
+++ b/builtin/logical/ssh/path_roles.go
@@ -7,7 +7,7 @@ import (
"time"
"github.com/hashicorp/vault/helper/cidrutil"
- "github.com/hashicorp/vault/helper/duration"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
@@ -44,6 +44,7 @@ type sshRole struct {
AllowHostCertificates bool `mapstructure:"allow_host_certificates" json:"allow_host_certificates"`
AllowBareDomains bool `mapstructure:"allow_bare_domains" json:"allow_bare_domains"`
AllowSubdomains bool `mapstructure:"allow_subdomains" json:"allow_subdomains"`
+ AllowUserKeyIDs bool `mapstructure:"allow_user_key_ids" json:"allow_user_key_ids"`
}
func pathListRoles(b *backend) *framework.Path {
@@ -143,11 +144,14 @@ func pathRoles(b *backend) *framework.Path {
Type: framework.TypeString,
Description: `
[Optional for all types]
- If this option is not specified, client can request for a credential for
- any valid user at the remote host, including the admin user. If only certain
- usernames are to be allowed, then this list enforces it. If this field is
- set, then credentials can only be created for default_user and usernames
- present in this list.
+ If this option is not specified, client can request for a
+ credential for any valid user at the remote host, including the
+ admin user. If only certain usernames are to be allowed, then
+ this list enforces it. If this field is set, then credentials
+ can only be created for default_user and usernames present in
+ this list. Setting this option will enable all the users with
+ access this role to fetch credentials for all other usernames
+ in this list. Use with caution.
`,
},
"allowed_domains": &framework.FieldSchema{
@@ -251,6 +255,15 @@ func pathRoles(b *backend) *framework.Path {
If set, host certificates that are requested are allowed to use subdomains of those listed in "allowed_domains".
`,
},
+ "allow_user_key_ids": &framework.FieldSchema{
+ Type: framework.TypeBool,
+ Description: `
+ [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type]
+ If true, users can override the key ID for a signed certificate with the "key_id" field.
+ When false, the key ID will always be the token display name.
+ The key ID is logged by the SSH server and can be useful for auditing.
+ `,
+ },
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@@ -407,7 +420,6 @@ func (b *backend) pathRoleWrite(req *logical.Request, d *framework.FieldData) (*
}
func (b *backend) createCARole(allowedUsers, defaultUser string, data *framework.FieldData) (*sshRole, *logical.Response) {
-
role := &sshRole{
MaxTTL: data.Get("max_ttl").(string),
TTL: data.Get("ttl").(string),
@@ -420,9 +432,14 @@ func (b *backend) createCARole(allowedUsers, defaultUser string, data *framework
DefaultUser: defaultUser,
AllowBareDomains: data.Get("allow_bare_domains").(bool),
AllowSubdomains: data.Get("allow_subdomains").(bool),
+ AllowUserKeyIDs: data.Get("allow_user_key_ids").(bool),
KeyType: KeyTypeCA,
}
+ if !role.AllowUserCertificates && !role.AllowHostCertificates {
+ return nil, logical.ErrorResponse("Either 'allow_user_certificates' or 'allow_host_certificates' must be set to 'true'")
+ }
+
defaultCriticalOptions := convertMapToStringValue(data.Get("default_critical_options").(map[string]interface{}))
defaultExtensions := convertMapToStringValue(data.Get("default_extensions").(map[string]interface{}))
@@ -432,7 +449,7 @@ func (b *backend) createCARole(allowedUsers, defaultUser string, data *framework
maxTTL = maxSystemTTL
} else {
var err error
- maxTTL, err = duration.ParseDurationSecond(role.MaxTTL)
+ maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL)
if err != nil {
return nil, logical.ErrorResponse(fmt.Sprintf(
"Invalid max ttl: %s", err))
@@ -445,7 +462,7 @@ func (b *backend) createCARole(allowedUsers, defaultUser string, data *framework
ttl := b.System().DefaultLeaseTTL()
if len(role.TTL) != 0 {
var err error
- ttl, err = duration.ParseDurationSecond(role.TTL)
+ ttl, err = parseutil.ParseDurationSecond(role.TTL)
if err != nil {
return nil, logical.ErrorResponse(fmt.Sprintf(
"Invalid ttl: %s", err))
@@ -533,6 +550,7 @@ func (b *backend) pathRoleRead(req *logical.Request, d *framework.FieldData) (*l
"allow_host_certificates": role.AllowHostCertificates,
"allow_bare_domains": role.AllowBareDomains,
"allow_subdomains": role.AllowSubdomains,
+ "allow_user_key_ids": role.AllowUserKeyIDs,
"key_type": role.KeyType,
"default_critical_options": role.DefaultCriticalOptions,
"default_extensions": role.DefaultExtensions,
diff --git a/builtin/logical/ssh/path_sign.go b/builtin/logical/ssh/path_sign.go
index 69833b2434..22e2513162 100644
--- a/builtin/logical/ssh/path_sign.go
+++ b/builtin/logical/ssh/path_sign.go
@@ -2,6 +2,8 @@ package ssh
import (
"crypto/rand"
+ "crypto/sha256"
+ "encoding/hex"
"errors"
"fmt"
"strconv"
@@ -9,28 +11,23 @@ import (
"time"
"github.com/hashicorp/vault/helper/certutil"
- "github.com/hashicorp/vault/helper/duration"
- "github.com/hashicorp/vault/helper/errutil"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
"golang.org/x/crypto/ssh"
)
-type signingBundle struct {
- Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"`
-}
-
type creationBundle struct {
KeyId string
ValidPrincipals []string
PublicKey ssh.PublicKey
CertificateType uint32
TTL time.Duration
- SigningBundle signingBundle
+ Signer ssh.Signer
Role *sshRole
- criticalOptions map[string]string
- extensions map[string]string
+ CriticalOptions map[string]string
+ Extensions map[string]string
}
func pathSign(b *backend) *framework.Path {
@@ -109,16 +106,16 @@ func (b *backend) pathSignCertificate(req *logical.Request, data *framework.Fiel
userPublicKey, err := parsePublicSSHKey(publicKey)
if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("unable to decode \"public_key\" as SSH key: %s", err)), nil
- }
-
- keyId := data.Get("key_id").(string)
- if keyId == "" {
- keyId = req.DisplayName
+ return logical.ErrorResponse(fmt.Sprintf("failed to parse public_key as SSH key: %s", err)), nil
}
// Note that these various functions always return "user errors" so we pass
// them as 4xx values
+ keyId, err := b.calculateKeyId(data, req, role, userPublicKey)
+ if err != nil {
+ return logical.ErrorResponse(err.Error()), nil
+ }
+
certificateType, err := b.calculateCertificateType(data, role)
if err != nil {
return logical.ErrorResponse(err.Error()), nil
@@ -152,32 +149,32 @@ func (b *backend) pathSignCertificate(req *logical.Request, data *framework.Fiel
return logical.ErrorResponse(err.Error()), nil
}
- storedBundle, err := req.Storage.Get("config/ca_bundle")
+ privateKeyEntry, err := caKey(req.Storage, caPrivateKey)
if err != nil {
- return nil, fmt.Errorf("unable to fetch local CA certificate/key: %v", err)
+ return nil, fmt.Errorf("failed to read CA private key: %v", err)
}
- if storedBundle == nil {
- return logical.ErrorResponse("backend must be configured with a CA certificate/key"), nil
+ if privateKeyEntry == nil || privateKeyEntry.Key == "" {
+ return nil, fmt.Errorf("failed to read CA private key")
}
- var bundle signingBundle
- if err := storedBundle.DecodeJSON(&bundle); err != nil {
- return nil, fmt.Errorf("unable to decode local CA certificate/key: %v", err)
+ signer, err := ssh.ParsePrivateKey([]byte(privateKeyEntry.Key))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse stored CA private key: %v", err)
}
- signingBundle := creationBundle{
+ cBundle := creationBundle{
KeyId: keyId,
PublicKey: userPublicKey,
- SigningBundle: bundle,
+ Signer: signer,
ValidPrincipals: parsedPrincipals,
TTL: ttl,
CertificateType: certificateType,
Role: role,
- criticalOptions: criticalOptions,
- extensions: extensions,
+ CriticalOptions: criticalOptions,
+ Extensions: extensions,
}
- certificate, err := signingBundle.sign()
+ certificate, err := cBundle.sign()
if err != nil {
return nil, err
}
@@ -198,34 +195,37 @@ func (b *backend) pathSignCertificate(req *logical.Request, data *framework.Fiel
}
func (b *backend) calculateValidPrincipals(data *framework.FieldData, defaultPrincipal, principalsAllowedByRole string, validatePrincipal func([]string, string) bool) ([]string, error) {
- if principalsAllowedByRole == "" {
- return nil, fmt.Errorf(`"role is not configured to allow any principles`)
+ validPrincipals := ""
+ validPrincipalsRaw, ok := data.GetOk("valid_principals")
+ if ok {
+ validPrincipals = validPrincipalsRaw.(string)
+ } else {
+ validPrincipals = defaultPrincipal
}
- validPrincipals := data.Get("valid_principals").(string)
- if validPrincipals == "" {
- if defaultPrincipal != "" {
- return []string{defaultPrincipal}, nil
+ parsedPrincipals := strutil.ParseDedupAndSortStrings(validPrincipals, ",")
+ allowedPrincipals := strutil.ParseDedupAndSortStrings(principalsAllowedByRole, ",")
+ switch {
+ case len(parsedPrincipals) == 0:
+ // There is nothing to process
+ return nil, nil
+ case len(allowedPrincipals) == 0:
+ // User has requested principals to be set, but role is not configured
+ // with any principals
+ return nil, fmt.Errorf("role is not configured to allow any principles")
+ default:
+ // Role was explicitly configured to allow any principal.
+ if principalsAllowedByRole == "*" {
+ return parsedPrincipals, nil
}
- return nil, fmt.Errorf(`"valid_principals" not supplied and no default set in the role`)
- }
-
- parsedPrincipals := strings.Split(validPrincipals, ",")
-
- // Role was explicitly configured to allow any principal.
- if principalsAllowedByRole == "*" {
+ for _, principal := range parsedPrincipals {
+ if !validatePrincipal(allowedPrincipals, principal) {
+ return nil, fmt.Errorf("%v is not a valid value for valid_principals", principal)
+ }
+ }
return parsedPrincipals, nil
}
-
- allowedPrincipals := strings.Split(principalsAllowedByRole, ",")
- for _, principal := range parsedPrincipals {
- if !validatePrincipal(allowedPrincipals, principal) {
- return nil, fmt.Errorf(`%v is not a valid value for "valid_principals"`, principal)
- }
- }
-
- return parsedPrincipals, nil
}
func validateValidPrincipalForHosts(role *sshRole) func([]string, string) bool {
@@ -250,21 +250,43 @@ func (b *backend) calculateCertificateType(data *framework.FieldData, role *sshR
switch requestedCertificateType {
case "user":
if !role.AllowUserCertificates {
- return 0, errors.New(`"cert_type" 'user' is not allowed by role`)
+ return 0, errors.New("cert_type 'user' is not allowed by role")
}
certificateType = ssh.UserCert
case "host":
if !role.AllowHostCertificates {
- return 0, errors.New(`"cert_type" 'host' is not allowed by role`)
+ return 0, errors.New("cert_type 'host' is not allowed by role")
}
certificateType = ssh.HostCert
default:
- return 0, errors.New(`"cert_type" must be either 'user' or 'host'`)
+ return 0, errors.New("cert_type must be either 'user' or 'host'")
}
return certificateType, nil
}
+func (b *backend) calculateKeyId(data *framework.FieldData, req *logical.Request, role *sshRole, pubKey ssh.PublicKey) (string, error) {
+ reqId := data.Get("key_id").(string)
+
+ if reqId != "" {
+ if !role.AllowUserKeyIDs {
+ return "", fmt.Errorf("setting key_id is not allowed by role")
+ }
+ return reqId, nil
+ }
+
+ keyHash := sha256.Sum256(pubKey.Marshal())
+ keyId := hex.EncodeToString(keyHash[:])
+
+ if req.DisplayName != "" {
+ keyId = fmt.Sprintf("%s-%s", req.DisplayName, keyId)
+ }
+
+ keyId = fmt.Sprintf("vault-%s", keyId)
+
+ return keyId, nil
+}
+
func (b *backend) calculateCriticalOptions(data *framework.FieldData, role *sshRole) (map[string]string, error) {
unparsedCriticalOptions := data.Get("critical_options").(map[string]interface{})
if len(unparsedCriticalOptions) == 0 {
@@ -310,7 +332,7 @@ func (b *backend) calculateExtensions(data *framework.FieldData, role *sshRole)
}
if len(notAllowed) != 0 {
- return nil, fmt.Errorf("Extensions not on allowed list: %v", notAllowed)
+ return nil, fmt.Errorf("extensions %v are not on allowed list", notAllowed)
}
}
@@ -332,7 +354,7 @@ func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.D
ttl = b.System().DefaultLeaseTTL()
} else {
var err error
- ttl, err = duration.ParseDurationSecond(ttlField)
+ ttl, err = parseutil.ParseDurationSecond(ttlField)
if err != nil {
return 0, fmt.Errorf("invalid requested ttl: %s", err)
}
@@ -342,7 +364,7 @@ func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.D
maxTTL = b.System().MaxLeaseTTL()
} else {
var err error
- maxTTL, err = duration.ParseDurationSecond(role.MaxTTL)
+ maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL)
if err != nil {
return 0, fmt.Errorf("invalid requested max ttl: %s", err)
}
@@ -362,11 +384,6 @@ func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.D
}
func (b *creationBundle) sign() (*ssh.Certificate, error) {
- signingKey, err := ssh.ParsePrivateKey([]byte(b.SigningBundle.Certificate))
- if err != nil {
- return nil, errutil.InternalError{Err: fmt.Sprintf("stored SSH signing key cannot be parsed: %v", err)}
- }
-
serialNumber, err := certutil.GenerateSerialNumber()
if err != nil {
return nil, err
@@ -383,14 +400,14 @@ func (b *creationBundle) sign() (*ssh.Certificate, error) {
ValidBefore: uint64(now.Add(b.TTL).In(time.UTC).Unix()),
CertType: b.CertificateType,
Permissions: ssh.Permissions{
- CriticalOptions: b.criticalOptions,
- Extensions: b.extensions,
+ CriticalOptions: b.CriticalOptions,
+ Extensions: b.Extensions,
},
}
- err = certificate.SignCert(rand.Reader, signingKey)
+ err = certificate.SignCert(rand.Reader, b.Signer)
if err != nil {
- return nil, errutil.InternalError{Err: "Failed to generate signed SSH key"}
+ return nil, fmt.Errorf("failed to generate signed SSH key")
}
return certificate, nil
diff --git a/command/mount.go b/command/mount.go
index 9700dccdbf..e6267233f4 100644
--- a/command/mount.go
+++ b/command/mount.go
@@ -15,12 +15,13 @@ type MountCommand struct {
func (c *MountCommand) Run(args []string) int {
var description, path, defaultLeaseTTL, maxLeaseTTL string
- var local bool
+ var local, forceNoCache bool
flags := c.Meta.FlagSet("mount", meta.FlagSetDefault)
flags.StringVar(&description, "description", "", "")
flags.StringVar(&path, "path", "", "")
flags.StringVar(&defaultLeaseTTL, "default-lease-ttl", "", "")
flags.StringVar(&maxLeaseTTL, "max-lease-ttl", "", "")
+ flags.BoolVar(&forceNoCache, "force-no-cache", false, "")
flags.BoolVar(&local, "local", false, "")
flags.Usage = func() { c.Ui.Error(c.Help()) }
if err := flags.Parse(args); err != nil {
@@ -55,6 +56,7 @@ func (c *MountCommand) Run(args []string) int {
Config: api.MountConfigInput{
DefaultLeaseTTL: defaultLeaseTTL,
MaxLeaseTTL: maxLeaseTTL,
+ ForceNoCache: forceNoCache,
},
Local: local,
}
@@ -105,6 +107,11 @@ Mount Options:
the previously set value. Set to '0' to
explicitly set it to use the global default.
+ -force-no-cache Forces the backend to disable caching. If not
+ specified, uses the global default. This does
+ not affect caching of the underlying encrypted
+ data storage.
+
-local Mark the mount as a local mount. Local mounts
are not replicated nor (if a secondary)
removed by replication.
diff --git a/command/mounts.go b/command/mounts.go
index d57837dd2c..d918d67124 100644
--- a/command/mounts.go
+++ b/command/mounts.go
@@ -42,7 +42,7 @@ func (c *MountsCommand) Run(args []string) int {
}
sort.Strings(paths)
- columns := []string{"Path | Type | Default TTL | Max TTL | Replication Behavior | Description"}
+ columns := []string{"Path | Type | Default TTL | Max TTL | Force No Cache | Replication Behavior | Description"}
for _, path := range paths {
mount := mounts[path]
defTTL := "system"
@@ -68,7 +68,8 @@ func (c *MountsCommand) Run(args []string) int {
replicatedBehavior = "local"
}
columns = append(columns, fmt.Sprintf(
- "%s | %s | %s | %s | %s | %s", path, mount.Type, defTTL, maxTTL, replicatedBehavior, mount.Description))
+ "%s | %s | %s | %s | %v | %s | %s", path, mount.Type, defTTL, maxTTL,
+ mount.Config.ForceNoCache, replicatedBehavior, mount.Description))
}
c.Ui.Output(columnize.SimpleFormat(columns))
diff --git a/command/server.go b/command/server.go
index 3b1c771bbe..17f40d1655 100644
--- a/command/server.go
+++ b/command/server.go
@@ -204,8 +204,8 @@ func (c *ServerCommand) Run(args []string) int {
}
// Ensure that a backend is provided
- if config.Backend == nil {
- c.Ui.Output("A physical backend must be specified")
+ if config.Storage == nil {
+ c.Ui.Output("A storage backend must be specified")
return 1
}
@@ -225,11 +225,11 @@ func (c *ServerCommand) Run(args []string) int {
// Initialize the backend
backend, err := physical.NewBackend(
- config.Backend.Type, c.logger, config.Backend.Config)
+ config.Storage.Type, c.logger, config.Storage.Config)
if err != nil {
c.Ui.Output(fmt.Sprintf(
- "Error initializing backend of type %s: %s",
- config.Backend.Type, err))
+ "Error initializing storage of type %s: %s",
+ config.Storage.Type, err))
return 1
}
@@ -255,7 +255,7 @@ func (c *ServerCommand) Run(args []string) int {
coreConfig := &vault.CoreConfig{
Physical: backend,
- RedirectAddr: config.Backend.RedirectAddr,
+ RedirectAddr: config.Storage.RedirectAddr,
HAPhysical: nil,
Seal: seal,
AuditBackends: c.AuditBackends,
@@ -288,39 +288,39 @@ func (c *ServerCommand) Run(args []string) int {
var disableClustering bool
- // Initialize the separate HA physical backend, if it exists
+ // Initialize the separate HA storage backend, if it exists
var ok bool
- if config.HABackend != nil {
+ if config.HAStorage != nil {
habackend, err := physical.NewBackend(
- config.HABackend.Type, c.logger, config.HABackend.Config)
+ config.HAStorage.Type, c.logger, config.HAStorage.Config)
if err != nil {
c.Ui.Output(fmt.Sprintf(
- "Error initializing backend of type %s: %s",
- config.HABackend.Type, err))
+ "Error initializing HA storage of type %s: %s",
+ config.HAStorage.Type, err))
return 1
}
if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok {
- c.Ui.Output("Specified HA backend does not support HA")
+ c.Ui.Output("Specified HA storage does not support HA")
return 1
}
if !coreConfig.HAPhysical.HAEnabled() {
- c.Ui.Output("Specified HA backend has HA support disabled; please consult documentation")
+ c.Ui.Output("Specified HA storage has HA support disabled; please consult documentation")
return 1
}
- coreConfig.RedirectAddr = config.HABackend.RedirectAddr
- disableClustering = config.HABackend.DisableClustering
+ coreConfig.RedirectAddr = config.HAStorage.RedirectAddr
+ disableClustering = config.HAStorage.DisableClustering
if !disableClustering {
- coreConfig.ClusterAddr = config.HABackend.ClusterAddr
+ coreConfig.ClusterAddr = config.HAStorage.ClusterAddr
}
} else {
if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok {
- coreConfig.RedirectAddr = config.Backend.RedirectAddr
- disableClustering = config.Backend.DisableClustering
+ coreConfig.RedirectAddr = config.Storage.RedirectAddr
+ disableClustering = config.Storage.DisableClustering
if !disableClustering {
- coreConfig.ClusterAddr = config.Backend.ClusterAddr
+ coreConfig.ClusterAddr = config.Storage.ClusterAddr
}
}
}
@@ -422,12 +422,12 @@ CLUSTER_SYNTHESIS_COMPLETE:
c.reloadFuncsLock = coreConfig.ReloadFuncsLock
// Compile server information for output later
- info["backend"] = config.Backend.Type
+ info["storage"] = config.Storage.Type
info["log level"] = logLevel
info["mlock"] = fmt.Sprintf(
"supported: %v, enabled: %v",
mlock.Supported(), !config.DisableMlock && mlock.Supported())
- infoKeys = append(infoKeys, "log level", "mlock", "backend")
+ infoKeys = append(infoKeys, "log level", "mlock", "storage")
if coreConfig.ClusterAddr != "" {
info["cluster address"] = coreConfig.ClusterAddr
@@ -438,16 +438,16 @@ CLUSTER_SYNTHESIS_COMPLETE:
infoKeys = append(infoKeys, "redirect address")
}
- if config.HABackend != nil {
- info["HA backend"] = config.HABackend.Type
- infoKeys = append(infoKeys, "HA backend")
+ if config.HAStorage != nil {
+ info["HA storage"] = config.HAStorage.Type
+ infoKeys = append(infoKeys, "HA storage")
} else {
- // If the backend supports HA, then note it
+ // If the storage supports HA, then note it
if coreConfig.HAPhysical != nil {
if coreConfig.HAPhysical.HAEnabled() {
- info["backend"] += " (HA available)"
+ info["storage"] += " (HA available)"
} else {
- info["backend"] += " (HA disabled)"
+ info["storage"] += " (HA disabled)"
}
}
}
diff --git a/command/server/config.go b/command/server/config.go
index a57fdad13b..4821a29ba8 100644
--- a/command/server/config.go
+++ b/command/server/config.go
@@ -15,28 +15,32 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/vault/helper/parseutil"
)
// Config is the configuration for the vault server.
type Config struct {
Listeners []*Listener `hcl:"-"`
- Backend *Backend `hcl:"-"`
- HABackend *Backend `hcl:"-"`
+ Storage *Storage `hcl:"-"`
+ HAStorage *Storage `hcl:"-"`
HSM *HSM `hcl:"-"`
- CacheSize int `hcl:"cache_size"`
- DisableCache bool `hcl:"disable_cache"`
- DisableMlock bool `hcl:"disable_mlock"`
+ CacheSize int `hcl:"cache_size"`
+ DisableCache bool `hcl:"-"`
+ DisableCacheRaw interface{} `hcl:"disable_cache"`
+ DisableMlock bool `hcl:"-"`
+ DisableMlockRaw interface{} `hcl:"disable_mlock"`
- EnableUI bool `hcl:"ui"`
+ EnableUI bool `hcl:"-"`
+ EnableUIRaw interface{} `hcl:"ui"`
Telemetry *Telemetry `hcl:"telemetry"`
MaxLeaseTTL time.Duration `hcl:"-"`
- MaxLeaseTTLRaw string `hcl:"max_lease_ttl"`
+ MaxLeaseTTLRaw interface{} `hcl:"max_lease_ttl"`
DefaultLeaseTTL time.Duration `hcl:"-"`
- DefaultLeaseTTLRaw string `hcl:"default_lease_ttl"`
+ DefaultLeaseTTLRaw interface{} `hcl:"default_lease_ttl"`
ClusterName string `hcl:"cluster_name"`
PluginDirectory string `hcl:"plugin_directory"`
@@ -48,7 +52,7 @@ func DevConfig(ha, transactional bool) *Config {
DisableCache: false,
DisableMlock: true,
- Backend: &Backend{
+ Storage: &Storage{
Type: "inmem",
},
@@ -72,11 +76,11 @@ func DevConfig(ha, transactional bool) *Config {
switch {
case ha && transactional:
- ret.Backend.Type = "inmem_transactional_ha"
+ ret.Storage.Type = "inmem_transactional_ha"
case !ha && transactional:
- ret.Backend.Type = "inmem_transactional"
+ ret.Storage.Type = "inmem_transactional"
case ha && !transactional:
- ret.Backend.Type = "inmem_ha"
+ ret.Storage.Type = "inmem_ha"
}
return ret
@@ -92,8 +96,8 @@ func (l *Listener) GoString() string {
return fmt.Sprintf("*%#v", *l)
}
-// Backend is the backend configuration for the server.
-type Backend struct {
+// Storage is the underlying storage configuration for the server.
+type Storage struct {
Type string
RedirectAddr string
ClusterAddr string
@@ -101,7 +105,7 @@ type Backend struct {
Config map[string]string
}
-func (b *Backend) GoString() string {
+func (b *Storage) GoString() string {
return fmt.Sprintf("*%#v", *b)
}
@@ -212,14 +216,14 @@ func (c *Config) Merge(c2 *Config) *Config {
result.Listeners = append(result.Listeners, l)
}
- result.Backend = c.Backend
- if c2.Backend != nil {
- result.Backend = c2.Backend
+ result.Storage = c.Storage
+ if c2.Storage != nil {
+ result.Storage = c2.Storage
}
- result.HABackend = c.HABackend
- if c2.HABackend != nil {
- result.HABackend = c2.HABackend
+ result.HAStorage = c.HAStorage
+ if c2.HAStorage != nil {
+ result.HAStorage = c2.HAStorage
}
result.HSM = c.HSM
@@ -310,13 +314,31 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
return nil, err
}
- if result.MaxLeaseTTLRaw != "" {
- if result.MaxLeaseTTL, err = time.ParseDuration(result.MaxLeaseTTLRaw); err != nil {
+ if result.MaxLeaseTTLRaw != nil {
+ if result.MaxLeaseTTL, err = parseutil.ParseDurationSecond(result.MaxLeaseTTLRaw); err != nil {
return nil, err
}
}
- if result.DefaultLeaseTTLRaw != "" {
- if result.DefaultLeaseTTL, err = time.ParseDuration(result.DefaultLeaseTTLRaw); err != nil {
+ if result.DefaultLeaseTTLRaw != nil {
+ if result.DefaultLeaseTTL, err = parseutil.ParseDurationSecond(result.DefaultLeaseTTLRaw); err != nil {
+ return nil, err
+ }
+ }
+
+ if result.EnableUIRaw != nil {
+ if result.EnableUI, err = parseutil.ParseBool(result.EnableUIRaw); err != nil {
+ return nil, err
+ }
+ }
+
+ if result.DisableCacheRaw != nil {
+ if result.DisableCache, err = parseutil.ParseBool(result.DisableCacheRaw); err != nil {
+ return nil, err
+ }
+ }
+
+ if result.DisableMlockRaw != nil {
+ if result.DisableMlock, err = parseutil.ParseBool(result.DisableMlockRaw); err != nil {
return nil, err
}
}
@@ -328,6 +350,8 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
valid := []string{
"atlas",
+ "storage",
+ "ha_storage",
"backend",
"ha_backend",
"hsm",
@@ -346,15 +370,28 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
return nil, err
}
- if o := list.Filter("backend"); len(o.Items) > 0 {
- if err := parseBackends(&result, o); err != nil {
- return nil, fmt.Errorf("error parsing 'backend': %s", err)
+ // Look for storage but still support old backend
+ if o := list.Filter("storage"); len(o.Items) > 0 {
+ if err := parseStorage(&result, o, "storage"); err != nil {
+ return nil, fmt.Errorf("error parsing 'storage': %s", err)
+ }
+ } else {
+ if o := list.Filter("backend"); len(o.Items) > 0 {
+ if err := parseStorage(&result, o, "backend"); err != nil {
+ return nil, fmt.Errorf("error parsing 'backend': %s", err)
+ }
}
}
- if o := list.Filter("ha_backend"); len(o.Items) > 0 {
- if err := parseHABackends(&result, o); err != nil {
- return nil, fmt.Errorf("error parsing 'ha_backend': %s", err)
+ if o := list.Filter("ha_storage"); len(o.Items) > 0 {
+ if err := parseHAStorage(&result, o, "ha_storage"); err != nil {
+ return nil, fmt.Errorf("error parsing 'ha_storage': %s", err)
+ }
+ } else {
+ if o := list.Filter("ha_backend"); len(o.Items) > 0 {
+ if err := parseHAStorage(&result, o, "ha_backend"); err != nil {
+ return nil, fmt.Errorf("error parsing 'ha_backend': %s", err)
+ }
}
}
@@ -456,22 +493,22 @@ func isTemporaryFile(name string) bool {
(strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs
}
-func parseBackends(result *Config, list *ast.ObjectList) error {
+func parseStorage(result *Config, list *ast.ObjectList, name string) error {
if len(list.Items) > 1 {
- return fmt.Errorf("only one 'backend' block is permitted")
+ return fmt.Errorf("only one %q block is permitted", name)
}
// Get our item
item := list.Items[0]
- key := "backend"
+ key := name
if len(item.Keys) > 0 {
key = item.Keys[0].Token.Value().(string)
}
var m map[string]string
if err := hcl.DecodeObject(&m, item.Val); err != nil {
- return multierror.Prefix(err, fmt.Sprintf("backend.%s:", key))
+ return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
}
// Pull out the redirect address since it's common to all backends
@@ -496,12 +533,12 @@ func parseBackends(result *Config, list *ast.ObjectList) error {
if v, ok := m["disable_clustering"]; ok {
disableClustering, err = strconv.ParseBool(v)
if err != nil {
- return multierror.Prefix(err, fmt.Sprintf("backend.%s:", key))
+ return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
}
delete(m, "disable_clustering")
}
- result.Backend = &Backend{
+ result.Storage = &Storage{
RedirectAddr: redirectAddr,
ClusterAddr: clusterAddr,
DisableClustering: disableClustering,
@@ -511,22 +548,22 @@ func parseBackends(result *Config, list *ast.ObjectList) error {
return nil
}
-func parseHABackends(result *Config, list *ast.ObjectList) error {
+func parseHAStorage(result *Config, list *ast.ObjectList, name string) error {
if len(list.Items) > 1 {
- return fmt.Errorf("only one 'ha_backend' block is permitted")
+ return fmt.Errorf("only one %q block is permitted", name)
}
// Get our item
item := list.Items[0]
- key := "backend"
+ key := name
if len(item.Keys) > 0 {
key = item.Keys[0].Token.Value().(string)
}
var m map[string]string
if err := hcl.DecodeObject(&m, item.Val); err != nil {
- return multierror.Prefix(err, fmt.Sprintf("ha_backend.%s:", key))
+ return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
}
// Pull out the redirect address since it's common to all backends
@@ -551,12 +588,12 @@ func parseHABackends(result *Config, list *ast.ObjectList) error {
if v, ok := m["disable_clustering"]; ok {
disableClustering, err = strconv.ParseBool(v)
if err != nil {
- return multierror.Prefix(err, fmt.Sprintf("backend.%s:", key))
+ return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
}
delete(m, "disable_clustering")
}
- result.HABackend = &Backend{
+ result.HAStorage = &Storage{
RedirectAddr: redirectAddr,
ClusterAddr: clusterAddr,
DisableClustering: disableClustering,
@@ -627,6 +664,7 @@ func parseListeners(result *Config, list *ast.ObjectList) error {
"tls_min_version",
"tls_cipher_suites",
"tls_prefer_server_cipher_suites",
+ "tls_require_and_verify_client_cert",
"token",
}
if err := checkHCLKeys(item.Val, valid); err != nil {
diff --git a/command/server/config_test.go b/command/server/config_test.go
index a0b4794366..789be400f4 100644
--- a/command/server/config_test.go
+++ b/command/server/config_test.go
@@ -37,7 +37,7 @@ func TestLoadConfigFile(t *testing.T) {
},
},
- Backend: &Backend{
+ Storage: &Storage{
Type: "consul",
RedirectAddr: "foo",
Config: map[string]string{
@@ -45,7 +45,7 @@ func TestLoadConfigFile(t *testing.T) {
},
},
- HABackend: &Backend{
+ HAStorage: &Storage{
Type: "consul",
RedirectAddr: "snafu",
Config: map[string]string{
@@ -60,9 +60,12 @@ func TestLoadConfigFile(t *testing.T) {
DisableHostname: false,
},
- DisableCache: true,
- DisableMlock: true,
- EnableUI: true,
+ DisableCache: true,
+ DisableCacheRaw: true,
+ DisableMlock: true,
+ DisableMlockRaw: true,
+ EnableUI: true,
+ EnableUIRaw: true,
MaxLeaseTTL: 10 * time.Hour,
MaxLeaseTTLRaw: "10h",
@@ -102,7 +105,7 @@ func TestLoadConfigFile_json(t *testing.T) {
},
},
- Backend: &Backend{
+ Storage: &Storage{
Type: "consul",
Config: map[string]string{
"foo": "bar",
@@ -134,7 +137,10 @@ func TestLoadConfigFile_json(t *testing.T) {
DefaultLeaseTTL: 10 * time.Hour,
DefaultLeaseTTLRaw: "10h",
ClusterName: "testcluster",
+ DisableCacheRaw: interface{}(nil),
+ DisableMlockRaw: interface{}(nil),
EnableUI: true,
+ EnableUIRaw: true,
}
if !reflect.DeepEqual(config, expected) {
t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
@@ -165,7 +171,7 @@ func TestLoadConfigFile_json2(t *testing.T) {
},
},
- Backend: &Backend{
+ Storage: &Storage{
Type: "consul",
Config: map[string]string{
"foo": "bar",
@@ -173,7 +179,7 @@ func TestLoadConfigFile_json2(t *testing.T) {
DisableClustering: true,
},
- HABackend: &Backend{
+ HAStorage: &Storage{
Type: "consul",
Config: map[string]string{
"bar": "baz",
@@ -228,7 +234,7 @@ func TestLoadConfigDir(t *testing.T) {
},
},
- Backend: &Backend{
+ Storage: &Storage{
Type: "consul",
Config: map[string]string{
"foo": "bar",
diff --git a/command/server/listener.go b/command/server/listener.go
index 3f5a4ed65a..999966e059 100644
--- a/command/server/listener.go
+++ b/command/server/listener.go
@@ -97,6 +97,15 @@ func listenerWrapTLS(
}
tlsConf.PreferServerCipherSuites = preferServer
}
+ if v, ok := config["tls_require_and_verify_client_cert"]; ok {
+ requireClient, err := strconv.ParseBool(v)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("invalid value for 'tls_require_and_verify_client_cert': %v", err)
+ }
+ if requireClient {
+ tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
+ }
+ }
ln = tls.NewListener(ln, tlsConf)
props["tls"] = "enabled"
diff --git a/command/server/test-fixtures/config.hcl.json b/command/server/test-fixtures/config.hcl.json
index 6e37c9a3a1..70e7e149cc 100644
--- a/command/server/test-fixtures/config.hcl.json
+++ b/command/server/test-fixtures/config.hcl.json
@@ -11,7 +11,7 @@
"node_id": "foo_node"
}
}],
- "backend": {
+ "storage": {
"consul": {
"foo": "bar",
"disable_clustering": "true"
diff --git a/command/server/test-fixtures/config2.hcl.json b/command/server/test-fixtures/config2.hcl.json
index fd3ab6e622..5279d63795 100644
--- a/command/server/test-fixtures/config2.hcl.json
+++ b/command/server/test-fixtures/config2.hcl.json
@@ -12,12 +12,12 @@
}
}
],
- "backend":{
+ "storage":{
"consul":{
"foo":"bar"
}
},
- "ha_backend":{
+ "ha_storage":{
"consul":{
"bar":"baz",
"disable_clustering": "true"
diff --git a/command/server_ha_test.go b/command/server_ha_test.go
index 26dc00878f..5562191eb5 100644
--- a/command/server_ha_test.go
+++ b/command/server_ha_test.go
@@ -64,8 +64,8 @@ func TestServer_GoodSeparateHA(t *testing.T) {
t.Fatalf("bad: %d\n\n%s\n\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String())
}
- if !strings.Contains(ui.OutputWriter.String(), "HA Backend:") {
- t.Fatalf("did not find HA Backend: %s", ui.OutputWriter.String())
+ if !strings.Contains(ui.OutputWriter.String(), "HA Storage:") {
+ t.Fatalf("did not find HA Storage: %s", ui.OutputWriter.String())
}
}
diff --git a/command/token_renew.go b/command/token_renew.go
index 9f64263a89..8ec1a550bb 100644
--- a/command/token_renew.go
+++ b/command/token_renew.go
@@ -6,7 +6,7 @@ import (
"time"
"github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/helper/duration"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/meta"
)
@@ -44,7 +44,7 @@ func (c *TokenRenewCommand) Run(args []string) int {
increment = args[1]
}
if increment != "" {
- dur, err := duration.ParseDurationSecond(increment)
+ dur, err := parseutil.ParseDurationSecond(increment)
if err != nil {
c.Ui.Error(fmt.Sprintf("Invalid increment: %s", err))
return 1
diff --git a/helper/locksutil/locks.go b/helper/locksutil/locks.go
index e1cdce0b7c..dcf1b4b82d 100644
--- a/helper/locksutil/locks.go
+++ b/helper/locksutil/locks.go
@@ -1,28 +1,32 @@
package locksutil
import (
- "fmt"
+ "crypto/md5"
"sync"
)
-// Takes in a map, indexed by string and creates new 'sync.RWMutex' items.
-// This utility creates 'count' number of mutexes (with a cap of 256) and
-// places them in the map. The indices will be 2 character hexadecimal
-// string values from 0 to count.
-func CreateLocks(p map[string]*sync.RWMutex, count int64) error {
- // Since the indices of the map entries are based on 2 character
- // hex values, this utility can only create upto 256 locks.
- if count <= 0 || count > 256 {
- return fmt.Errorf("invalid count: %d", count)
- }
+const (
+ LockCount = 256
+)
- if p == nil {
- return fmt.Errorf("map of locks is not initialized")
- }
-
- for i := int64(0); i < count; i++ {
- p[fmt.Sprintf("%02x", i)] = &sync.RWMutex{}
- }
-
- return nil
+type LockEntry struct {
+ sync.RWMutex
+}
+
+func CreateLocks() []*LockEntry {
+ ret := make([]*LockEntry, LockCount)
+ for i := range ret {
+ ret[i] = new(LockEntry)
+ }
+ return ret
+}
+
+func LockIndexForKey(key string) uint8 {
+ hf := md5.New()
+ hf.Write([]byte(key))
+ return uint8(hf.Sum(nil)[0])
+}
+
+func LockForKey(locks []*LockEntry, key string) *LockEntry {
+ return locks[LockIndexForKey(key)]
}
diff --git a/helper/locksutil/locks_test.go b/helper/locksutil/locks_test.go
index 13a775bb15..9916644637 100644
--- a/helper/locksutil/locks_test.go
+++ b/helper/locksutil/locks_test.go
@@ -1,47 +1,10 @@
package locksutil
-import (
- "sync"
- "testing"
-)
+import "testing"
func Test_CreateLocks(t *testing.T) {
- locks := map[string]*sync.RWMutex{}
-
- // Invalid argument
- if err := CreateLocks(locks, -1); err == nil {
- t.Fatal("expected an error")
- }
-
- // Invalid argument
- if err := CreateLocks(locks, 0); err == nil {
- t.Fatal("expected an error")
- }
-
- // Invalid argument
- if err := CreateLocks(locks, 300); err == nil {
- t.Fatal("expected an error")
- }
-
- // Maximum number of locks
- if err := CreateLocks(locks, 256); err != nil {
- t.Fatalf("err: %v", err)
- }
+ locks := CreateLocks()
if len(locks) != 256 {
t.Fatalf("bad: len(locks): expected:256 actual:%d", len(locks))
}
-
- // Clear out the locks for testing the next case
- for k, _ := range locks {
- delete(locks, k)
- }
-
- // General case
- if err := CreateLocks(locks, 10); err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(locks) != 10 {
- t.Fatalf("bad: len(locks): expected:10 actual:%d", len(locks))
- }
-
}
diff --git a/helper/duration/duration.go b/helper/parseutil/parseutil.go
similarity index 83%
rename from helper/duration/duration.go
rename to helper/parseutil/parseutil.go
index dabf714800..9ba2bf78f4 100644
--- a/helper/duration/duration.go
+++ b/helper/parseutil/parseutil.go
@@ -1,4 +1,4 @@
-package duration
+package parseutil
import (
"encoding/json"
@@ -6,6 +6,8 @@ import (
"strconv"
"strings"
"time"
+
+ "github.com/mitchellh/mapstructure"
)
func ParseDurationSecond(in interface{}) (time.Duration, error) {
@@ -50,3 +52,11 @@ func ParseDurationSecond(in interface{}) (time.Duration, error) {
return dur, nil
}
+
+func ParseBool(in interface{}) (bool, error) {
+ var result bool
+ if err := mapstructure.WeakDecode(in, &result); err != nil {
+ return false, err
+ }
+ return result, nil
+}
diff --git a/helper/duration/duration_test.go b/helper/parseutil/parseutil_test.go
similarity index 60%
rename from helper/duration/duration_test.go
rename to helper/parseutil/parseutil_test.go
index 6ff9646e0a..7168a45820 100644
--- a/helper/duration/duration_test.go
+++ b/helper/parseutil/parseutil_test.go
@@ -1,4 +1,4 @@
-package duration
+package parseutil
import (
"encoding/json"
@@ -29,3 +29,27 @@ func Test_ParseDurationSecond(t *testing.T) {
t.Fatal("not equivalent")
}
}
+
+func Test_ParseBool(t *testing.T) {
+ outp, err := ParseBool("true")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !outp {
+ t.Fatal("wrong output")
+ }
+ outp, err = ParseBool(1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !outp {
+ t.Fatal("wrong output")
+ }
+ outp, err = ParseBool(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !outp {
+ t.Fatal("wrong output")
+ }
+}
diff --git a/helper/strutil/strutil.go b/helper/strutil/strutil.go
index d079b9cb92..0d418016a1 100644
--- a/helper/strutil/strutil.go
+++ b/helper/strutil/strutil.go
@@ -255,3 +255,22 @@ func StrListDelete(s []string, d string) []string {
return s
}
+
+func GlobbedStringsMatch(item, val string) bool {
+ if len(item) < 2 {
+ return val == item
+ }
+
+ hasPrefix := strings.HasPrefix(item, "*")
+ hasSuffix := strings.HasSuffix(item, "*")
+
+ if hasPrefix && hasSuffix {
+ return strings.Contains(val, item[1:len(item)-1])
+ } else if hasPrefix {
+ return strings.HasSuffix(val, item[1:])
+ } else if hasSuffix {
+ return strings.HasPrefix(val, item[:len(item)-1])
+ }
+
+ return val == item
+}
diff --git a/helper/strutil/strutil_test.go b/helper/strutil/strutil_test.go
index f818a26723..85ccd8b4c0 100644
--- a/helper/strutil/strutil_test.go
+++ b/helper/strutil/strutil_test.go
@@ -279,3 +279,39 @@ $$`,
t.Fatalf("bad: expected:\n%#v\nactual:\n%#v", jsonExpected, actual)
}
}
+
+func TestGlobbedStringsMatch(t *testing.T) {
+ type tCase struct {
+ item string
+ val string
+ expect bool
+ }
+
+ tCases := []tCase{
+ tCase{"", "", true},
+ tCase{"*", "*", true},
+ tCase{"**", "**", true},
+ tCase{"*t", "t", true},
+ tCase{"*t", "test", true},
+ tCase{"t*", "test", true},
+ tCase{"*test", "test", true},
+ tCase{"*test", "a test", true},
+ tCase{"test", "a test", false},
+ tCase{"*test", "tests", false},
+ tCase{"test*", "test", true},
+ tCase{"test*", "testsss", true},
+ tCase{"test**", "testsss", false},
+ tCase{"test**", "test*", true},
+ tCase{"**test", "*test", true},
+ tCase{"TEST", "test", false},
+ tCase{"test", "test", true},
+ }
+
+ for _, tc := range tCases {
+ actual := GlobbedStringsMatch(tc.item, tc.val)
+
+ if actual != tc.expect {
+ t.Fatalf("Bad testcase %#v, expected %b, got %b", tc, tc.expect, actual)
+ }
+ }
+}
diff --git a/http/handler.go b/http/handler.go
index 09533b539b..fb9b7a8a1e 100644
--- a/http/handler.go
+++ b/http/handler.go
@@ -10,7 +10,7 @@ import (
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/duration"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
@@ -274,6 +274,7 @@ func requestAuth(core *vault.Core, r *http.Request, req *logical.Request) *logic
te, err := core.LookupToken(v)
if err == nil && te != nil {
req.ClientTokenAccessor = te.Accessor
+ req.ClientTokenRemainingUses = te.NumUses
}
}
@@ -289,7 +290,7 @@ func requestWrapInfo(r *http.Request, req *logical.Request) (*logical.Request, e
}
// If it has an allowed suffix parse as a duration string
- dur, err := duration.ParseDurationSecond(wrapTTL)
+ dur, err := parseutil.ParseDurationSecond(wrapTTL)
if err != nil {
return req, err
}
diff --git a/http/handler_test.go b/http/handler_test.go
index 0cda837281..149e603731 100644
--- a/http/handler_test.go
+++ b/http/handler_test.go
@@ -80,6 +80,7 @@ func TestSysMounts_headerAuth(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -89,6 +90,7 @@ func TestSysMounts_headerAuth(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -98,6 +100,7 @@ func TestSysMounts_headerAuth(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -108,6 +111,7 @@ func TestSysMounts_headerAuth(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -117,6 +121,7 @@ func TestSysMounts_headerAuth(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -126,6 +131,7 @@ func TestSysMounts_headerAuth(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
diff --git a/http/sys_generate_root.go b/http/sys_generate_root.go
index 418b1a4fd1..3697f80358 100644
--- a/http/sys_generate_root.go
+++ b/http/sys_generate_root.go
@@ -120,7 +120,7 @@ func handleSysGenerateRootUpdate(core *vault.Core) http.Handler {
if req.Key == "" {
respondError(
w, http.StatusBadRequest,
- errors.New("'key' must specified in request body as JSON"))
+ errors.New("'key' must be specified in request body as JSON"))
return
}
diff --git a/http/sys_mount_test.go b/http/sys_mount_test.go
index e84ee67914..2e12f0f798 100644
--- a/http/sys_mount_test.go
+++ b/http/sys_mount_test.go
@@ -32,6 +32,7 @@ func TestSysMounts(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -41,6 +42,7 @@ func TestSysMounts(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -50,6 +52,7 @@ func TestSysMounts(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -60,6 +63,7 @@ func TestSysMounts(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -69,6 +73,7 @@ func TestSysMounts(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -78,6 +83,7 @@ func TestSysMounts(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -119,6 +125,7 @@ func TestSysMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -128,6 +135,7 @@ func TestSysMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -137,6 +145,7 @@ func TestSysMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -146,6 +155,7 @@ func TestSysMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -156,6 +166,7 @@ func TestSysMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -165,6 +176,7 @@ func TestSysMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -174,6 +186,7 @@ func TestSysMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -183,6 +196,7 @@ func TestSysMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -246,6 +260,7 @@ func TestSysRemount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -255,6 +270,7 @@ func TestSysRemount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -264,6 +280,7 @@ func TestSysRemount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -273,6 +290,7 @@ func TestSysRemount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -283,6 +301,7 @@ func TestSysRemount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -292,6 +311,7 @@ func TestSysRemount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -301,6 +321,7 @@ func TestSysRemount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -310,6 +331,7 @@ func TestSysRemount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -354,6 +376,7 @@ func TestSysUnmount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -363,6 +386,7 @@ func TestSysUnmount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -372,6 +396,7 @@ func TestSysUnmount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -382,6 +407,7 @@ func TestSysUnmount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -391,6 +417,7 @@ func TestSysUnmount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -400,6 +427,7 @@ func TestSysUnmount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -441,6 +469,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -450,6 +479,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -459,6 +489,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -468,6 +499,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -478,6 +510,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -487,6 +520,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -496,6 +530,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -505,6 +540,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -567,6 +603,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("259196400"),
"max_lease_ttl": json.Number("259200000"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -576,6 +613,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -585,6 +623,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -594,6 +633,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -604,6 +644,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("259196400"),
"max_lease_ttl": json.Number("259200000"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -613,6 +654,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -622,6 +664,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": false,
},
@@ -631,6 +674,7 @@ func TestSysTuneMount(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
+ "force_no_cache": false,
},
"local": true,
},
@@ -656,9 +700,11 @@ func TestSysTuneMount(t *testing.T) {
"data": map[string]interface{}{
"default_lease_ttl": json.Number("259196400"),
"max_lease_ttl": json.Number("259200000"),
+ "force_no_cache": false,
},
"default_lease_ttl": json.Number("259196400"),
"max_lease_ttl": json.Number("259200000"),
+ "force_no_cache": false,
}
testResponseStatus(t, resp, 200)
@@ -687,9 +733,11 @@ func TestSysTuneMount(t *testing.T) {
"data": map[string]interface{}{
"default_lease_ttl": json.Number("40"),
"max_lease_ttl": json.Number("80"),
+ "force_no_cache": false,
},
"default_lease_ttl": json.Number("40"),
"max_lease_ttl": json.Number("80"),
+ "force_no_cache": false,
}
testResponseStatus(t, resp, 200)
diff --git a/http/sys_mounts_test.go b/http/sys_mounts_test.go
index 26e61acae5..5dc0bf9702 100644
--- a/http/sys_mounts_test.go
+++ b/http/sys_mounts_test.go
@@ -48,6 +48,10 @@ func TestSysMountConfig(t *testing.T) {
t.Fatalf("Expected default lease TTL: %d, got %d",
expectedMaxTTL, mountConfig.MaxLeaseTTL)
}
+
+ if mountConfig.ForceNoCache == true {
+ t.Fatalf("did not expect force cache")
+ }
}
// testMount sets up a test mount of a generic backend w/ a random path; caller
diff --git a/http/sys_rekey.go b/http/sys_rekey.go
index 023452c185..bd597b61b9 100644
--- a/http/sys_rekey.go
+++ b/http/sys_rekey.go
@@ -168,7 +168,7 @@ func handleSysRekeyUpdate(core *vault.Core, recovery bool) http.Handler {
if req.Key == "" {
respondError(
w, http.StatusBadRequest,
- errors.New("'key' must specified in request body as JSON"))
+ errors.New("'key' must be specified in request body as JSON"))
return
}
diff --git a/http/sys_seal.go b/http/sys_seal.go
index 07ffbcd5ba..ef2430495a 100644
--- a/http/sys_seal.go
+++ b/http/sys_seal.go
@@ -88,7 +88,7 @@ func handleSysUnseal(core *vault.Core) http.Handler {
if !req.Reset && req.Key == "" {
respondError(
w, http.StatusBadRequest,
- errors.New("'key' must specified in request body as JSON, or 'reset' set to true"))
+ errors.New("'key' must be specified in request body as JSON, or 'reset' set to true"))
return
}
diff --git a/logical/framework/backend.go b/logical/framework/backend.go
index eddace22e4..91ff3358d9 100644
--- a/logical/framework/backend.go
+++ b/logical/framework/backend.go
@@ -13,7 +13,7 @@ import (
log "github.com/mgutz/logxi/v1"
"github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/helper/duration"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/errutil"
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/logical"
@@ -551,7 +551,7 @@ func (s *FieldSchema) DefaultOrZero() interface{} {
case float64:
result = int(inp)
case string:
- dur, err := duration.ParseDurationSecond(inp)
+ dur, err := parseutil.ParseDurationSecond(inp)
if err != nil {
return s.Type.Zero()
}
diff --git a/logical/framework/field_data.go b/logical/framework/field_data.go
index b82f9f63db..9a62b0b1ab 100644
--- a/logical/framework/field_data.go
+++ b/logical/framework/field_data.go
@@ -4,7 +4,7 @@ import (
"encoding/json"
"fmt"
- "github.com/hashicorp/vault/helper/duration"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/mitchellh/mapstructure"
)
@@ -161,7 +161,7 @@ func (d *FieldData) getPrimitive(
case float64:
result = int(inp)
case string:
- dur, err := duration.ParseDurationSecond(inp)
+ dur, err := parseutil.ParseDurationSecond(inp)
if err != nil {
return nil, true, err
}
diff --git a/logical/request.go b/logical/request.go
index 4a3f85bf7e..a3f6715189 100644
--- a/logical/request.go
+++ b/logical/request.go
@@ -85,6 +85,10 @@ type Request struct {
// WrapInfo contains requested response wrapping parameters
WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"`
+ // ClientTokenRemainingUses represents the allowed number of uses left on the
+ // token supplied
+ ClientTokenRemainingUses int `json:"client_token_remaining_uses" structs:"client_token_remaining_uses" mapstructure:"client_token_remaining_uses"`
+
// For replication, contains the last WAL on the remote side after handling
// the request, used for best-effort avoidance of stale read-after-write
lastRemoteWAL uint64
diff --git a/physical/cache.go b/physical/cache.go
index 96f2586f31..f1b1365a3b 100644
--- a/physical/cache.go
+++ b/physical/cache.go
@@ -1,15 +1,11 @@
package physical
import (
- "crypto/sha1"
- "encoding/hex"
"fmt"
"strings"
- "sync"
"github.com/hashicorp/golang-lru"
"github.com/hashicorp/vault/helper/locksutil"
- "github.com/hashicorp/vault/helper/strutil"
log "github.com/mgutz/logxi/v1"
)
@@ -26,7 +22,7 @@ type Cache struct {
backend Backend
transactional Transactional
lru *lru.TwoQueueCache
- locks map[string]*sync.RWMutex
+ locks []*locksutil.LockEntry
logger log.Logger
}
@@ -43,13 +39,9 @@ func NewCache(b Backend, size int, logger log.Logger) *Cache {
c := &Cache{
backend: b,
lru: cache,
- locks: make(map[string]*sync.RWMutex, 256),
+ locks: locksutil.CreateLocks(),
logger: logger,
}
- if err := locksutil.CreateLocks(c.locks, 256); err != nil {
- logger.Error("physical/cache: error creating locks", "error", err)
- return nil
- }
if txnl, ok := c.backend.(Transactional); ok {
c.transactional = txnl
@@ -58,31 +50,10 @@ func NewCache(b Backend, size int, logger log.Logger) *Cache {
return c
}
-func (c *Cache) lockHashForKey(key string) string {
- hf := sha1.New()
- hf.Write([]byte(key))
- return strings.ToLower(hex.EncodeToString(hf.Sum(nil))[:2])
-}
-
-func (c *Cache) lockForKey(key string) *sync.RWMutex {
- return c.locks[c.lockHashForKey(key)]
-}
-
// Purge is used to clear the cache
func (c *Cache) Purge() {
// Lock the world
- lockHashes := make([]string, 0, len(c.locks))
- for hash := range c.locks {
- lockHashes = append(lockHashes, hash)
- }
-
- // Sort and deduplicate. This ensures we don't try to grab the same lock
- // twice, and enforcing a sort means we'll not have multiple goroutines
- // deadlock by acquiring in different orders.
- lockHashes = strutil.RemoveDuplicates(lockHashes)
-
- for _, lockHash := range lockHashes {
- lock := c.locks[lockHash]
+ for _, lock := range c.locks {
lock.Lock()
defer lock.Unlock()
}
@@ -91,7 +62,7 @@ func (c *Cache) Purge() {
}
func (c *Cache) Put(entry *Entry) error {
- lock := c.lockForKey(entry.Key)
+ lock := locksutil.LockForKey(c.locks, entry.Key)
lock.Lock()
defer lock.Unlock()
@@ -103,7 +74,7 @@ func (c *Cache) Put(entry *Entry) error {
}
func (c *Cache) Get(key string) (*Entry, error) {
- lock := c.lockForKey(key)
+ lock := locksutil.LockForKey(c.locks, key)
lock.RLock()
defer lock.RUnlock()
@@ -139,7 +110,7 @@ func (c *Cache) Get(key string) (*Entry, error) {
}
func (c *Cache) Delete(key string) error {
- lock := c.lockForKey(key)
+ lock := locksutil.LockForKey(c.locks, key)
lock.Lock()
defer lock.Unlock()
@@ -162,18 +133,8 @@ func (c *Cache) Transaction(txns []TxnEntry) error {
return fmt.Errorf("physical/cache: underlying backend does not support transactions")
}
- var lockHashes []string
- for _, txn := range txns {
- lockHashes = append(lockHashes, c.lockHashForKey(txn.Entry.Key))
- }
-
- // Sort and deduplicate. This ensures we don't try to grab the same lock
- // twice, and enforcing a sort means we'll not have multiple goroutines
- // deadlock by acquiring in different orders.
- lockHashes = strutil.RemoveDuplicates(lockHashes)
-
- for _, lockHash := range lockHashes {
- lock := c.locks[lockHash]
+ // Lock the world
+ for _, lock := range c.locks {
lock.Lock()
defer lock.Unlock()
}
diff --git a/physical/consul.go b/physical/consul.go
index 2d55a2ec7c..f0dc5ac52d 100644
--- a/physical/consul.go
+++ b/physical/consul.go
@@ -321,6 +321,9 @@ func (c *ConsulBackend) Transaction(txns []TxnEntry) error {
ops = append(ops, cop)
}
+ c.permitPool.Acquire()
+ defer c.permitPool.Release()
+
ok, resp, _, err := c.kv.Txn(ops, nil)
if err != nil {
return err
diff --git a/physical/dynamodb_test.go b/physical/dynamodb_test.go
index d4efcef4c9..daac8c873f 100644
--- a/physical/dynamodb_test.go
+++ b/physical/dynamodb_test.go
@@ -198,13 +198,12 @@ func testDynamoDBLockTTL(t *testing.T, ha HABackend) {
// The first lock should have lost the leader channel
leaderChClosed := false
blocking := make(chan struct{})
- time.AfterFunc(watchInterval*3, func() {
- close(blocking)
- })
// Attempt to read from the leader or the blocking channel, which ever one
// happens first.
go func() {
select {
+ case <-time.After(watchInterval * 3):
+ return
case <-leaderCh:
leaderChClosed = true
close(blocking)
diff --git a/physical/etcd3.go b/physical/etcd3.go
index 420d8028bc..8076826aec 100644
--- a/physical/etcd3.go
+++ b/physical/etcd3.go
@@ -32,6 +32,9 @@ type EtcdBackend struct {
etcd *clientv3.Client
}
+// etcd default lease duration is 60s. set to 15s for faster recovery.
+const etcd3LockTimeoutInSeconds = 15
+
// newEtcd3Backend constructs a etcd3 backend.
func newEtcd3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
// Get the etcd path form the configuration.
@@ -228,7 +231,7 @@ type EtcdLock struct {
// Lock is used for mutual exclusion based on the given key.
func (c *EtcdBackend) LockWith(key, value string) (Lock, error) {
- session, err := concurrency.NewSession(c.etcd)
+ session, err := concurrency.NewSession(c.etcd, concurrency.WithTTL(etcd3LockTimeoutInSeconds))
if err != nil {
return nil, err
}
@@ -262,7 +265,7 @@ func (c *EtcdLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
}
return nil, err
}
- if _, err := c.etcd.Put(ctx, c.etcdMu.Key(), c.value); err != nil {
+ if _, err := c.etcd.Put(ctx, c.etcdMu.Key(), c.value, clientv3.WithLease(c.etcdSession.Lease())); err != nil {
return nil, err
}
diff --git a/physical/s3.go b/physical/s3.go
index 3e6809c96e..088ddb8dea 100644
--- a/physical/s3.go
+++ b/physical/s3.go
@@ -6,6 +6,7 @@ import (
"io"
"os"
"sort"
+ "strconv"
"strings"
"time"
@@ -16,6 +17,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/awsutil"
)
@@ -25,6 +27,7 @@ type S3Backend struct {
bucket string
client *s3.S3
logger log.Logger
+ permitPool *PermitPool
}
// newS3Backend constructs a S3 backend using a pre-existing
@@ -85,10 +88,23 @@ func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
return nil, fmt.Errorf("unable to access bucket '%s': %v", bucket, err)
}
+ maxParStr, ok := conf["max_parallel"]
+ var maxParInt int
+ if ok {
+ maxParInt, err = strconv.Atoi(maxParStr)
+ if err != nil {
+ return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
+ }
+ if logger.IsDebug() {
+ logger.Debug("s3: max_parallel set", "max_parallel", maxParInt)
+ }
+ }
+
s := &S3Backend{
client: s3conn,
bucket: bucket,
logger: logger,
+ permitPool: NewPermitPool(maxParInt),
}
return s, nil
}
@@ -97,6 +113,9 @@ func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
func (s *S3Backend) Put(entry *Entry) error {
defer metrics.MeasureSince([]string{"s3", "put"}, time.Now())
+ s.permitPool.Acquire()
+ defer s.permitPool.Release()
+
_, err := s.client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(entry.Key),
@@ -114,6 +133,9 @@ func (s *S3Backend) Put(entry *Entry) error {
func (s *S3Backend) Get(key string) (*Entry, error) {
defer metrics.MeasureSince([]string{"s3", "get"}, time.Now())
+ s.permitPool.Acquire()
+ defer s.permitPool.Release()
+
resp, err := s.client.GetObject(&s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(key),
@@ -122,9 +144,8 @@ func (s *S3Backend) Get(key string) (*Entry, error) {
// Return nil on 404s, error on anything else
if awsErr.StatusCode() == 404 {
return nil, nil
- } else {
- return nil, err
}
+ return nil, err
}
if err != nil {
return nil, err
@@ -151,6 +172,9 @@ func (s *S3Backend) Get(key string) (*Entry, error) {
func (s *S3Backend) Delete(key string) error {
defer metrics.MeasureSince([]string{"s3", "delete"}, time.Now())
+ s.permitPool.Acquire()
+ defer s.permitPool.Release()
+
_, err := s.client.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(key),
@@ -168,6 +192,9 @@ func (s *S3Backend) Delete(key string) error {
func (s *S3Backend) List(prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"s3", "list"}, time.Now())
+ s.permitPool.Acquire()
+ defer s.permitPool.Release()
+
params := &s3.ListObjectsV2Input{
Bucket: aws.String(s.bucket),
Prefix: aws.String(prefix),
diff --git a/terraform/aws/variables.tf b/terraform/aws/variables.tf
index 93a6cfcc03..ba0ee3a231 100644
--- a/terraform/aws/variables.tf
+++ b/terraform/aws/variables.tf
@@ -3,7 +3,7 @@
//-------------------------------------------------------------------
variable "download-url" {
- default = "https://releases.hashicorp.com/vault/0.6.5/vault_0.6.5_linux_amd64.zip"
+ default = "https://releases.hashicorp.com/vault/0.7.0/vault_0.7.0_linux_amd64.zip"
description = "URL to download Vault"
}
diff --git a/vault/acl.go b/vault/acl.go
index 81d6a978c8..550e0df029 100644
--- a/vault/acl.go
+++ b/vault/acl.go
@@ -5,6 +5,7 @@ import (
"strings"
"github.com/armon/go-radix"
+ "github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/logical"
)
@@ -348,7 +349,14 @@ func valueInParameterList(v interface{}, list []interface{}) bool {
func valueInSlice(v interface{}, list []interface{}) bool {
for _, el := range list {
- if reflect.DeepEqual(el, v) {
+ if reflect.TypeOf(el).String() == "string" && reflect.TypeOf(v).String() == "string" {
+ item := el.(string)
+ val := v.(string)
+
+ if strutil.GlobbedStringsMatch(item, val) {
+ return true
+ }
+ } else if reflect.DeepEqual(el, v) {
return true
}
}
diff --git a/vault/acl_test.go b/vault/acl_test.go
index 61ce4590cd..7eb45b8b43 100644
--- a/vault/acl_test.go
+++ b/vault/acl_test.go
@@ -366,6 +366,7 @@ func TestACL_ValuePermissions(t *testing.T) {
{"dev/ops", []string{"allow"}, []interface{}{"good"}, true},
{"dev/ops", []string{"allow"}, []interface{}{"bad"}, false},
{"foo/bar", []string{"deny"}, []interface{}{"bad"}, false},
+ {"foo/bar", []string{"deny"}, []interface{}{"bad glob"}, false},
{"foo/bar", []string{"deny"}, []interface{}{"good"}, true},
{"foo/bar", []string{"allow"}, []interface{}{"good"}, true},
{"foo/baz", []string{"aLLow"}, []interface{}{"good"}, true},
@@ -379,6 +380,9 @@ func TestACL_ValuePermissions(t *testing.T) {
{"fizz/buzz", []string{"allow_multi"}, []interface{}{"good"}, true},
{"fizz/buzz", []string{"allow_multi"}, []interface{}{"good1"}, true},
{"fizz/buzz", []string{"allow_multi"}, []interface{}{"good2"}, true},
+ {"fizz/buzz", []string{"allow_multi"}, []interface{}{"glob good2"}, false},
+ {"fizz/buzz", []string{"allow_multi"}, []interface{}{"glob good3"}, true},
+ {"fizz/buzz", []string{"allow_multi"}, []interface{}{"bad"}, false},
{"fizz/buzz", []string{"allow_multi"}, []interface{}{"bad"}, false},
{"fizz/buzz", []string{"allow_multi", "allow"}, []interface{}{"good1", "good"}, true},
{"fizz/buzz", []string{"deny_multi"}, []interface{}{"bad2"}, false},
@@ -686,7 +690,7 @@ path "dev/*" {
path "foo/bar" {
policy = "write"
denied_parameters = {
- "deny" = ["bad"]
+ "deny" = ["bad*"]
}
}
path "foo/baz" {
@@ -701,7 +705,7 @@ path "foo/baz" {
path "fizz/buzz" {
policy = "write"
allowed_parameters = {
- "allow_multi" = ["good", "good1", "good2"]
+ "allow_multi" = ["good", "good1", "good2", "*good3"]
"allow" = ["good"]
}
denied_parameters = {
diff --git a/vault/audit.go b/vault/audit.go
index 46e789dfb3..939184369c 100644
--- a/vault/audit.go
+++ b/vault/audit.go
@@ -87,6 +87,9 @@ func (c *Core) enableAudit(entry *MountEntry) error {
if err != nil {
return err
}
+ if backend == nil {
+ return fmt.Errorf("nil audit backend of type %q returned from factory", entry.Type)
+ }
newTable := c.audit.shallowClone()
newTable.Entries = append(newTable.Entries, entry)
@@ -300,14 +303,18 @@ func (c *Core) setupAudits() error {
view := NewBarrierView(c.barrier, viewPath)
// Initialize the backend
- audit, err := c.newAuditBackend(entry, view, entry.Options)
+ backend, err := c.newAuditBackend(entry, view, entry.Options)
if err != nil {
c.logger.Error("core: failed to create audit entry", "path", entry.Path, "error", err)
continue
}
+ if backend == nil {
+ c.logger.Error("core: created audit entry was nil", "path", entry.Path, "type", entry.Type)
+ continue
+ }
// Mount the backend
- broker.Register(entry.Path, audit, view)
+ broker.Register(entry.Path, backend, view)
successCount += 1
}
@@ -376,6 +383,9 @@ func (c *Core) newAuditBackend(entry *MountEntry, view logical.Storage, conf map
if err != nil {
return nil, err
}
+ if be == nil {
+ return nil, fmt.Errorf("nil backend returned from %q factory function", entry.Type)
+ }
switch entry.Type {
case "file":
diff --git a/vault/audit_test.go b/vault/audit_test.go
index 344605bc0a..5e97da86f4 100644
--- a/vault/audit_test.go
+++ b/vault/audit_test.go
@@ -444,6 +444,7 @@ func TestAuditBroker_LogResponse(t *testing.T) {
b.Register("bar", a2, nil)
auth := &logical.Auth{
+ NumUses: 10,
ClientToken: "foo",
Policies: []string{"dev", "ops"},
Metadata: map[string]string{
diff --git a/vault/auth.go b/vault/auth.go
index 8380dbbe83..c3197bce2c 100644
--- a/vault/auth.go
+++ b/vault/auth.go
@@ -91,6 +91,9 @@ func (c *Core) enableCredential(entry *MountEntry) error {
if err != nil {
return err
}
+ if backend == nil {
+ return fmt.Errorf("nil backend returned from %q factory", entry.Type)
+ }
if err := backend.Initialize(); err != nil {
return err
@@ -151,6 +154,12 @@ func (c *Core) disableCredential(path string) (bool, error) {
return true, err
}
+ // Call cleanup function if it exists
+ backend := c.router.MatchingBackend(fullPath)
+ if backend != nil {
+ backend.Cleanup()
+ }
+
// Unmount the backend
if err := c.router.Unmount(fullPath); err != nil {
return true, err
@@ -386,6 +395,9 @@ func (c *Core) setupCredentials() error {
c.logger.Error("core: failed to create credential entry", "path", entry.Path, "error", err)
return errLoadAuthFailed
}
+ if backend == nil {
+ return fmt.Errorf("nil backend returned from %q factory", entry.Type)
+ }
if err := backend.Initialize(); err != nil {
return err
@@ -430,10 +442,9 @@ func (c *Core) teardownCredentials() error {
if c.auth != nil {
authTable := c.auth.shallowClone()
for _, e := range authTable.Entries {
- prefix := e.Path
- b, ok := c.router.root.Get(prefix)
- if ok {
- b.(*routeEntry).backend.Cleanup()
+ backend := c.router.MatchingBackend(credentialRoutePrefix + e.Path)
+ if backend != nil {
+ backend.Cleanup()
}
}
}
diff --git a/vault/core.go b/vault/core.go
index 9a2f1900ef..ef99741bfd 100644
--- a/vault/core.go
+++ b/vault/core.go
@@ -1565,6 +1565,16 @@ func (c *Core) periodicCheckKeyUpgrade(doneCh, stopCh chan struct{}) {
continue
}
+ // Check for a poison pill. If we can read it, it means we have stale
+ // keys (e.g. from replication being activated) and we need to seal to
+ // be unsealed again.
+ entry, _ := c.barrier.Get(poisonPillPath)
+ if entry != nil && len(entry.Value) > 0 {
+ c.logger.Warn("core: encryption keys have changed out from underneath us (possibly due to replication enabling), must be unsealed again")
+ go c.Shutdown()
+ continue
+ }
+
if err := c.checkKeyUpgrades(); err != nil {
c.logger.Error("core: key rotation periodic upgrade check failed", "error", err)
}
@@ -1581,16 +1591,6 @@ func (c *Core) checkKeyUpgrades() error {
// Check for an upgrade
didUpgrade, newTerm, err := c.barrier.CheckUpgrade()
if err != nil {
- // The problem might be that we can't decrypt the value, e.g. if
- // replication has been turned on, so check to see if a poison pill
- // was written. If we can read it, it means we have stale keys and
- // we need to seal to be unsealed again.
- entry, _ := c.barrier.Get(poisonPillPath)
- if entry != nil && len(entry.Value) > 0 {
- c.logger.Warn("core: encryption keys have changed out from underneath us (possibly due to replication enabling), must be unsealed again")
- go c.Shutdown()
- return nil
- }
return err
}
diff --git a/vault/dynamic_system_view.go b/vault/dynamic_system_view.go
index ca2b89d6c8..eb99f29c62 100644
--- a/vault/dynamic_system_view.go
+++ b/vault/dynamic_system_view.go
@@ -77,7 +77,7 @@ func (d dynamicSystemView) Tainted() bool {
// CachingDisabled indicates whether to use caching behavior
func (d dynamicSystemView) CachingDisabled() bool {
- return d.core.cachingDisabled
+ return d.core.cachingDisabled || (d.mountEntry != nil && d.mountEntry.Config.ForceNoCache)
}
// Checks if this is a primary Vault instance.
diff --git a/vault/logical_passthrough.go b/vault/logical_passthrough.go
index d779673f88..eb52a3f62c 100644
--- a/vault/logical_passthrough.go
+++ b/vault/logical_passthrough.go
@@ -5,7 +5,7 @@ import (
"fmt"
"strings"
- "github.com/hashicorp/vault/helper/duration"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
@@ -133,7 +133,7 @@ func (b *PassthroughBackend) handleRead(
}
ttlDuration := b.System().DefaultLeaseTTL()
if len(ttl) != 0 {
- dur, err := duration.ParseDurationSecond(ttl)
+ dur, err := parseutil.ParseDurationSecond(ttl)
if err == nil {
ttlDuration = dur
}
diff --git a/vault/logical_system.go b/vault/logical_system.go
index 2aff5d0e1b..f43de9ef67 100644
--- a/vault/logical_system.go
+++ b/vault/logical_system.go
@@ -10,7 +10,7 @@ import (
"time"
"github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/duration"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
"github.com/mitchellh/mapstructure"
@@ -1030,6 +1030,7 @@ func (b *SystemBackend) handleMountTable(
"config": map[string]interface{}{
"default_lease_ttl": int64(entry.Config.DefaultLeaseTTL.Seconds()),
"max_lease_ttl": int64(entry.Config.MaxLeaseTTL.Seconds()),
+ "force_no_cache": entry.Config.ForceNoCache,
},
"local": entry.Local,
}
@@ -1064,6 +1065,7 @@ func (b *SystemBackend) handleMount(
var apiConfig struct {
DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
}
configMap := data.Get("config").(map[string]interface{})
if configMap != nil && len(configMap) != 0 {
@@ -1079,7 +1081,7 @@ func (b *SystemBackend) handleMount(
case "":
case "system":
default:
- tmpDef, err := duration.ParseDurationSecond(apiConfig.DefaultLeaseTTL)
+ tmpDef, err := parseutil.ParseDurationSecond(apiConfig.DefaultLeaseTTL)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"unable to parse default TTL of %s: %s", apiConfig.DefaultLeaseTTL, err)),
@@ -1092,7 +1094,7 @@ func (b *SystemBackend) handleMount(
case "":
case "system":
default:
- tmpMax, err := duration.ParseDurationSecond(apiConfig.MaxLeaseTTL)
+ tmpMax, err := parseutil.ParseDurationSecond(apiConfig.MaxLeaseTTL)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"unable to parse max TTL of %s: %s", apiConfig.MaxLeaseTTL, err)),
@@ -1113,6 +1115,11 @@ func (b *SystemBackend) handleMount(
logical.ErrInvalidRequest
}
+ // Copy over the force no cache if set
+ if apiConfig.ForceNoCache {
+ config.ForceNoCache = true
+ }
+
if logicalType == "" {
return logical.ErrorResponse(
"backend type must be specified as a string"),
@@ -1248,10 +1255,17 @@ func (b *SystemBackend) handleTuneReadCommon(path string) (*logical.Response, er
return handleError(fmt.Errorf("sys: cannot fetch sysview for path %s", path))
}
+ mountEntry := b.Core.router.MatchingMountEntry(path)
+ if mountEntry == nil {
+ b.Backend.Logger().Error("sys: cannot fetch mount entry", "path", path)
+ return handleError(fmt.Errorf("sys: cannot fetch mount entry for path %s", path))
+ }
+
resp := &logical.Response{
Data: map[string]interface{}{
"default_lease_ttl": int(sysView.DefaultLeaseTTL().Seconds()),
"max_lease_ttl": int(sysView.MaxLeaseTTL().Seconds()),
+ "force_no_cache": mountEntry.Config.ForceNoCache,
},
}
@@ -1327,7 +1341,7 @@ func (b *SystemBackend) handleTuneWriteCommon(
tmpDef := time.Duration(0)
newDefault = &tmpDef
default:
- tmpDef, err := duration.ParseDurationSecond(defTTL)
+ tmpDef, err := parseutil.ParseDurationSecond(defTTL)
if err != nil {
return handleError(err)
}
@@ -1341,7 +1355,7 @@ func (b *SystemBackend) handleTuneWriteCommon(
tmpMax := time.Duration(0)
newMax = &tmpMax
default:
- tmpMax, err := duration.ParseDurationSecond(maxTTL)
+ tmpMax, err := parseutil.ParseDurationSecond(maxTTL)
if err != nil {
return handleError(err)
}
diff --git a/vault/logical_system_test.go b/vault/logical_system_test.go
index c608b3b867..0785e07a1e 100644
--- a/vault/logical_system_test.go
+++ b/vault/logical_system_test.go
@@ -59,6 +59,7 @@ func TestSystemBackend_mounts(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": resp.Data["secret/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
"max_lease_ttl": resp.Data["secret/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
+ "force_no_cache": false,
},
"local": false,
},
@@ -68,6 +69,7 @@ func TestSystemBackend_mounts(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": resp.Data["sys/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
"max_lease_ttl": resp.Data["sys/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
+ "force_no_cache": false,
},
"local": false,
},
@@ -77,6 +79,7 @@ func TestSystemBackend_mounts(t *testing.T) {
"config": map[string]interface{}{
"default_lease_ttl": resp.Data["cubbyhole/"].(map[string]interface{})["config"].(map[string]interface{})["default_lease_ttl"].(int64),
"max_lease_ttl": resp.Data["cubbyhole/"].(map[string]interface{})["config"].(map[string]interface{})["max_lease_ttl"].(int64),
+ "force_no_cache": false,
},
"local": true,
},
@@ -101,6 +104,32 @@ func TestSystemBackend_mount(t *testing.T) {
}
}
+func TestSystemBackend_mount_force_no_cache(t *testing.T) {
+ core, b, _ := testCoreSystemBackend(t)
+
+ req := logical.TestRequest(t, logical.UpdateOperation, "mounts/prod/secret/")
+ req.Data["type"] = "generic"
+ req.Data["config"] = map[string]interface{}{
+ "force_no_cache": true,
+ }
+
+ resp, err := b.HandleRequest(req)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp != nil {
+ t.Fatalf("bad: %v", resp)
+ }
+
+ mountEntry := core.router.MatchingMountEntry("prod/secret/")
+ if mountEntry == nil {
+ t.Fatalf("missing mount entry")
+ }
+ if !mountEntry.Config.ForceNoCache {
+ t.Fatalf("bad config %#v", mountEntry)
+ }
+}
+
func TestSystemBackend_mount_invalid(t *testing.T) {
b := testSystemBackend(t)
diff --git a/vault/mount.go b/vault/mount.go
index c03c560e38..d428eee8b1 100644
--- a/vault/mount.go
+++ b/vault/mount.go
@@ -146,6 +146,7 @@ type MountEntry struct {
type MountConfig struct {
DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` // Override for global default
MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` // Override for global default
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` // Override for global default
}
// Returns a deep copy of the mount entry
@@ -212,6 +213,9 @@ func (c *Core) mount(entry *MountEntry) error {
if err != nil {
return err
}
+ if backend == nil {
+ return fmt.Errorf("nil backend of type %q returned from creation function", entry.Type)
+ }
// Call initialize; this takes care of init tasks that must be run after
// the ignore paths are collected
@@ -283,9 +287,9 @@ func (c *Core) unmount(path string) (bool, error) {
}
// Call cleanup function if it exists
- b, ok := c.router.root.Get(path)
- if ok {
- b.(*routeEntry).backend.Cleanup()
+ backend := c.router.MatchingBackend(path)
+ if backend != nil {
+ backend.Cleanup()
}
// Unmount the backend entirely
@@ -638,6 +642,9 @@ func (c *Core) setupMounts() error {
c.logger.Error("core: failed to create mount entry", "path", entry.Path, "error", err)
return errLoadMountsFailed
}
+ if backend == nil {
+ return fmt.Errorf("created mount entry of type %q is nil", entry.Type)
+ }
if err := backend.Initialize(); err != nil {
return err
@@ -680,10 +687,9 @@ func (c *Core) unloadMounts() error {
if c.mounts != nil {
mountTable := c.mounts.shallowClone()
for _, e := range mountTable.Entries {
- prefix := e.Path
- b, ok := c.router.root.Get(prefix)
- if ok {
- b.(*routeEntry).backend.Cleanup()
+ backend := c.router.MatchingBackend(e.Path)
+ if backend != nil {
+ backend.Cleanup()
}
}
}
@@ -712,6 +718,9 @@ func (c *Core) newLogicalBackend(t string, sysView logical.SystemView, view logi
if err != nil {
return nil, err
}
+ if b == nil {
+ return nil, fmt.Errorf("nil backend of type %q returned from factory", t)
+ }
return b, nil
}
diff --git a/vault/policy.go b/vault/policy.go
index 4ced40e710..c808c2a866 100644
--- a/vault/policy.go
+++ b/vault/policy.go
@@ -10,7 +10,7 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
- "github.com/hashicorp/vault/helper/duration"
+ "github.com/hashicorp/vault/helper/parseutil"
)
const (
@@ -211,14 +211,14 @@ func parsePaths(result *Policy, list *ast.ObjectList) error {
}
}
if pc.MinWrappingTTLHCL != nil {
- dur, err := duration.ParseDurationSecond(pc.MinWrappingTTLHCL)
+ dur, err := parseutil.ParseDurationSecond(pc.MinWrappingTTLHCL)
if err != nil {
return errwrap.Wrapf("error parsing min_wrapping_ttl: {{err}}", err)
}
pc.Permissions.MinWrappingTTL = dur
}
if pc.MaxWrappingTTLHCL != nil {
- dur, err := duration.ParseDurationSecond(pc.MaxWrappingTTLHCL)
+ dur, err := parseutil.ParseDurationSecond(pc.MaxWrappingTTLHCL)
if err != nil {
return errwrap.Wrapf("error parsing max_wrapping_ttl: {{err}}", err)
}
diff --git a/vault/router.go b/vault/router.go
index c691c745a7..dd2d23220b 100644
--- a/vault/router.go
+++ b/vault/router.go
@@ -283,6 +283,10 @@ func (r *Router) routeCommon(req *logical.Request, existenceCheck bool) (*logica
// Cache the identifier of the request
originalReqID := req.ID
+ // Cache the client token's number of uses in the request
+ originalClientTokenRemainingUses := req.ClientTokenRemainingUses
+ req.ClientTokenRemainingUses = 0
+
// Cache the headers and hide them from backends
headers := req.Headers
req.Headers = nil
@@ -304,6 +308,7 @@ func (r *Router) routeCommon(req *logical.Request, existenceCheck bool) (*logica
req.ID = originalReqID
req.Storage = nil
req.ClientToken = clientToken
+ req.ClientTokenRemainingUses = originalClientTokenRemainingUses
req.WrapInfo = wrapInfo
req.Headers = headers
// This is only set in one place, after routing, so should never be set
diff --git a/vault/token_store.go b/vault/token_store.go
index 48a1d18524..1cd611ca0a 100644
--- a/vault/token_store.go
+++ b/vault/token_store.go
@@ -5,13 +5,12 @@ import (
"fmt"
"regexp"
"strings"
- "sync"
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/duration"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/locksutil"
"github.com/hashicorp/vault/helper/policyutil"
@@ -88,7 +87,7 @@ type TokenStore struct {
policyLookupFunc func(string) (*Policy, error)
- tokenLocks map[string]*sync.RWMutex
+ tokenLocks []*locksutil.LockEntry
cubbyholeDestroyer func(*TokenStore, string) error
}
@@ -109,14 +108,7 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
t.policyLookupFunc = c.policyStore.GetPolicy
}
- t.tokenLocks = map[string]*sync.RWMutex{}
-
- // Create 256 locks
- if err := locksutil.CreateLocks(t.tokenLocks, 256); err != nil {
- return nil, fmt.Errorf("failed to create locks: %v", err)
- }
-
- t.tokenLocks["custom"] = &sync.RWMutex{}
+ t.tokenLocks = locksutil.CreateLocks()
// Setup the framework endpoints
t.Backend = &framework.Backend{
@@ -741,21 +733,6 @@ func (ts *TokenStore) storeCommon(entry *TokenEntry, writeSecondary bool) error
return nil
}
-func (ts *TokenStore) getTokenLock(id string) *sync.RWMutex {
- // Find our multilevel lock, or fall back to global
- var lock *sync.RWMutex
- var ok bool
- if len(id) >= 2 {
- lock, ok = ts.tokenLocks[id[0:2]]
- }
- if !ok || lock == nil {
- // Fall back for custom token IDs
- lock = ts.tokenLocks["custom"]
- }
-
- return lock
-}
-
// UseToken is used to manage restricted use tokens and decrement their
// available uses. Returns two values: a potentially updated entry or, if the
// token has been revoked, nil; and whether an error was encountered. The
@@ -774,8 +751,7 @@ func (ts *TokenStore) UseToken(te *TokenEntry) (*TokenEntry, error) {
return te, nil
}
- lock := ts.getTokenLock(te.ID)
-
+ lock := locksutil.LockForKey(ts.tokenLocks, te.ID)
lock.Lock()
defer lock.Unlock()
@@ -828,7 +804,7 @@ func (ts *TokenStore) Lookup(id string) (*TokenEntry, error) {
return nil, fmt.Errorf("cannot lookup blank token")
}
- lock := ts.getTokenLock(id)
+ lock := locksutil.LockForKey(ts.tokenLocks, id)
lock.RLock()
defer lock.RUnlock()
@@ -932,7 +908,7 @@ func (ts *TokenStore) revokeSalted(saltedId string) (ret error) {
return nil
}
- lock := ts.getTokenLock(entry.ID)
+ lock := locksutil.LockForKey(ts.tokenLocks, entry.ID)
lock.Lock()
// Lookup the token first
@@ -1582,7 +1558,7 @@ func (ts *TokenStore) handleCreateCommon(
}
if data.ExplicitMaxTTL != "" {
- dur, err := duration.ParseDurationSecond(data.ExplicitMaxTTL)
+ dur, err := parseutil.ParseDurationSecond(data.ExplicitMaxTTL)
if err != nil {
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
}
@@ -1598,7 +1574,7 @@ func (ts *TokenStore) handleCreateCommon(
return logical.ErrorResponse("root or sudo privileges required to create periodic token"),
logical.ErrInvalidRequest
}
- dur, err := duration.ParseDurationSecond(data.Period)
+ dur, err := parseutil.ParseDurationSecond(data.Period)
if err != nil {
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
}
@@ -1611,7 +1587,7 @@ func (ts *TokenStore) handleCreateCommon(
// Parse the TTL/lease if any
if data.TTL != "" {
- dur, err := duration.ParseDurationSecond(data.TTL)
+ dur, err := parseutil.ParseDurationSecond(data.TTL)
if err != nil {
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
}
@@ -1713,6 +1689,7 @@ func (ts *TokenStore) handleCreateCommon(
// Generate the response
resp.Auth = &logical.Auth{
+ NumUses: te.NumUses,
DisplayName: te.DisplayName,
Policies: te.Policies,
Metadata: te.Meta,
@@ -1851,7 +1828,7 @@ func (ts *TokenStore) handleLookup(
return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
}
- lock := ts.getTokenLock(id)
+ lock := locksutil.LockForKey(ts.tokenLocks, id)
lock.RLock()
defer lock.RUnlock()
diff --git a/version/version_base.go b/version/version_base.go
index 57e14a5d88..bd1d2ca8fc 100644
--- a/version/version_base.go
+++ b/version/version_base.go
@@ -9,5 +9,5 @@ func init() {
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release
// such as "dev" (in development), "beta", "rc1", etc.
- VersionPrerelease = "beta1"
+ VersionPrerelease = ""
}
diff --git a/website/Gemfile b/website/Gemfile
index 500612a3b8..24926e6fd1 100644
--- a/website/Gemfile
+++ b/website/Gemfile
@@ -1,3 +1,3 @@
source "https://rubygems.org"
-gem "middleman-hashicorp", "0.3.6"
+gem "middleman-hashicorp", "0.3.18"
diff --git a/website/Gemfile.lock b/website/Gemfile.lock
index ca1ef347c8..502823760a 100644
--- a/website/Gemfile.lock
+++ b/website/Gemfile.lock
@@ -1,18 +1,17 @@
GEM
remote: https://rubygems.org/
specs:
- activesupport (4.2.7.1)
+ activesupport (4.2.8)
i18n (~> 0.7)
- json (~> 1.7, >= 1.7.7)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
- autoprefixer-rails (6.5.3)
+ autoprefixer-rails (6.7.7.1)
execjs
bootstrap-sass (3.3.7)
autoprefixer-rails (>= 5.2.1)
sass (>= 3.3.4)
- builder (3.2.2)
+ builder (3.2.3)
capybara (2.4.4)
mime-types (>= 1.16)
nokogiri (>= 1.3.3)
@@ -23,7 +22,7 @@ GEM
coffee-script (2.4.1)
coffee-script-source
execjs
- coffee-script-source (1.10.0)
+ coffee-script-source (1.12.2)
compass (1.0.3)
chunky_png (~> 1.2)
compass-core (~> 1.0.2)
@@ -40,9 +39,9 @@ GEM
eventmachine (>= 0.12.9)
http_parser.rb (~> 0.6.0)
erubis (2.7.0)
- eventmachine (1.2.1)
+ eventmachine (1.2.3)
execjs (2.7.0)
- ffi (1.9.14)
+ ffi (1.9.18)
haml (4.0.7)
tilt
hike (1.2.3)
@@ -50,8 +49,8 @@ GEM
uber (~> 0.0.14)
http_parser.rb (0.6.0)
i18n (0.7.0)
- json (1.8.3)
- kramdown (1.12.0)
+ json (2.0.3)
+ kramdown (1.13.2)
listen (3.0.8)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
@@ -78,13 +77,14 @@ GEM
rack (>= 1.4.5, < 2.0)
thor (>= 0.15.2, < 2.0)
tilt (~> 1.4.1, < 2.0)
- middleman-hashicorp (0.3.6)
+ middleman-hashicorp (0.3.18)
bootstrap-sass (~> 3.3)
builder (~> 3.2)
middleman (~> 3.4)
middleman-livereload (~> 3.4)
middleman-syntax (~> 3.0)
redcarpet (~> 3.3)
+ turbolinks (~> 5.0)
middleman-livereload (3.4.6)
em-websocket (~> 0.5.1)
middleman-core (>= 3.3)
@@ -101,9 +101,9 @@ GEM
mime-types-data (~> 3.2015)
mime-types-data (3.2016.0521)
mini_portile2 (2.1.0)
- minitest (5.9.1)
+ minitest (5.10.1)
multi_json (1.12.1)
- nokogiri (1.6.8.1)
+ nokogiri (1.7.1)
mini_portile2 (~> 2.1.0)
padrino-helpers (0.12.8.1)
i18n (~> 0.6, >= 0.6.7)
@@ -117,11 +117,11 @@ GEM
rack-test (0.6.3)
rack (>= 1.0)
rb-fsevent (0.9.8)
- rb-inotify (0.9.7)
+ rb-inotify (0.9.8)
ffi (>= 0.5.0)
- redcarpet (3.3.4)
- rouge (2.0.6)
- sass (3.4.22)
+ redcarpet (3.4.0)
+ rouge (2.0.7)
+ sass (3.4.23)
sprockets (2.12.4)
hike (~> 1.2)
multi_json (~> 1.0)
@@ -132,10 +132,13 @@ GEM
sprockets-sass (1.3.1)
sprockets (~> 2.0)
tilt (~> 1.1)
- thor (0.19.1)
- thread_safe (0.3.5)
+ thor (0.19.4)
+ thread_safe (0.3.6)
tilt (1.4.1)
- tzinfo (1.2.2)
+ turbolinks (5.0.1)
+ turbolinks-source (~> 5)
+ turbolinks-source (5.0.0)
+ tzinfo (1.2.3)
thread_safe (~> 0.1)
uber (0.0.15)
uglifier (2.7.2)
@@ -148,7 +151,7 @@ PLATFORMS
ruby
DEPENDENCIES
- middleman-hashicorp (= 0.3.6)
+ middleman-hashicorp (= 0.3.18)
BUNDLED WITH
- 1.13.6
+ 1.14.6
diff --git a/website/LICENSE.md b/website/LICENSE.md
index f8bf349a84..3189f43a65 100644
--- a/website/LICENSE.md
+++ b/website/LICENSE.md
@@ -3,8 +3,8 @@
This license is temporary while a more official one is drafted. However,
this should make it clear:
-* The text contents of this website are MPL 2.0 licensed.
+The text contents of this website are MPL 2.0 licensed.
-* The design contents of this website are proprietary and may not be reproduced
- or reused in any way other than to run the Vault website locally. The license
- for the design is owned solely by HashiCorp, Inc.
+The design contents of this website are proprietary and may not be reproduced
+or reused in any way other than to run the website locally. The license for
+the design is owned solely by HashiCorp, Inc.
diff --git a/website/Makefile b/website/Makefile
index 0aec0e3aeb..0a80966c76 100644
--- a/website/Makefile
+++ b/website/Makefile
@@ -1,4 +1,4 @@
-VERSION?="0.3.6"
+VERSION?="0.3.18"
website:
@echo "==> Starting website in Docker..."
diff --git a/website/README.md b/website/README.md
index 0e43e29edd..8a6f2cdf3d 100644
--- a/website/README.md
+++ b/website/README.md
@@ -1,18 +1,21 @@
# Vault Website
-This subdirectory contains the entire source for the [Vault Website](https://www.vaultproject.io/).
-This is a [Middleman](http://middlemanapp.com) project, which builds a static
-site from these source files.
+This subdirectory contains the entire source for the [Vault Website][vault].
+This is a [Middleman][middleman] project, which builds a static site from these
+source files.
## Contributions Welcome!
If you find a typo or you feel like you can improve the HTML, CSS, or
-JavaScript, we welcome contributions. Feel free to open issues or pull
-requests like any normal GitHub project, and we'll merge it in.
+JavaScript, we welcome contributions. Feel free to open issues or pull requests
+like any normal GitHub project, and we'll merge it in.
## Running the Site Locally
-Running the site locally is simple. Clone this repo and run `make dev`.
+Running the site locally is simple. Clone this repo and run `make website`.
Then open up `http://localhost:4567`. Note that some URLs you may need to append
".html" to make them work (in the navigation).
+
+[middleman]: https://www.middlemanapp.com
+[vault]: https://www.vaultproject.io
diff --git a/website/Vagrantfile b/website/Vagrantfile
deleted file mode 100644
index 9b1ad05452..0000000000
--- a/website/Vagrantfile
+++ /dev/null
@@ -1,30 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-$script = <
-
-
-
-<%= javascript_include_tag "application" %>
-
-
-
-