diff --git a/api/api_integration_test.go b/api/api_integration_test.go index a9e4409ae1..77683c49ac 100644 --- a/api/api_integration_test.go +++ b/api/api_integration_test.go @@ -18,10 +18,11 @@ import ( "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/vault" + log "github.com/hashicorp/go-hclog" + auditFile "github.com/hashicorp/vault/builtin/audit/file" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" vaulthttp "github.com/hashicorp/vault/http" - logxi "github.com/mgutz/logxi/v1" dockertest "gopkg.in/ory-am/dockertest.v3" ) @@ -42,7 +43,7 @@ func testVaultServerUnseal(t testing.TB) (*api.Client, []string, func()) { return testVaultServerCoreConfig(t, &vault.CoreConfig{ DisableMlock: true, DisableCache: true, - Logger: logxi.NullLog, + Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "userpass": credUserpass.Factory, }, diff --git a/builtin/credential/cert/backend_test.go b/builtin/credential/cert/backend_test.go index e12fca70ba..2f9b5c379e 100644 --- a/builtin/credential/cert/backend_test.go +++ b/builtin/credential/cert/backend_test.go @@ -21,9 +21,8 @@ import ( "testing" "time" - logxi "github.com/mgutz/logxi/v1" - cleanhttp "github.com/hashicorp/go-cleanhttp" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" vaulthttp "github.com/hashicorp/vault/http" @@ -159,7 +158,7 @@ func TestBackend_PermittedDNSDomainsIntermediateCA(t *testing.T) { coreConfig := &vault.CoreConfig{ DisableMlock: true, DisableCache: true, - Logger: logxi.NullLog, + Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "cert": Factory, }, diff --git a/builtin/credential/cert/path_login.go b/builtin/credential/cert/path_login.go index fce012bced..912332f07f 100644 --- a/builtin/credential/cert/path_login.go +++ b/builtin/credential/cert/path_login.go @@ -337,7 +337,7 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, trustedNonCAs = make([]*ParsedCert, 0) names, err := storage.List(ctx, "cert/") if err != nil { - b.Logger().Error("cert: failed to list trusted certs", "error", err) + b.Logger().Error("failed to list trusted certs", "error", err) return } for _, name := range names { @@ -347,12 +347,12 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, } entry, err := b.Cert(ctx, storage, strings.TrimPrefix(name, "cert/")) if err != nil { - b.Logger().Error("cert: failed to load trusted cert", "name", name, "error", err) + b.Logger().Error("failed to load trusted cert", "name", name, "error", err) continue } parsed := parsePEM([]byte(entry.Certificate)) if len(parsed) == 0 { - b.Logger().Error("cert: failed to parse certificate", "name", name) + b.Logger().Error("failed to parse certificate", "name", name) continue } if !parsed[0].IsCA { diff --git a/builtin/credential/ldap/backend.go b/builtin/credential/ldap/backend.go index defa899dbc..61ef01d46f 100644 --- a/builtin/credential/ldap/backend.go +++ b/builtin/credential/ldap/backend.go @@ -121,7 +121,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri } if b.Logger().IsDebug() { - b.Logger().Debug("auth/ldap: User BindDN fetched", "username", username, "binddn", userBindDN) + b.Logger().Debug("user binddn fetched", "username", username, "binddn", userBindDN) } if cfg.DenyNullBind && len(password) == 0 { @@ -145,7 +145,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri return nil, logical.ErrorResponse(fmt.Sprintf("Encountered an error while attempting to re-bind with the BindDN User: %s", err.Error())), nil, nil } if b.Logger().IsDebug() { - b.Logger().Debug("auth/ldap: Re-Bound to original BindDN") + b.Logger().Debug("re-bound to original binddn") } } @@ -159,7 +159,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri return nil, logical.ErrorResponse(err.Error()), nil, nil } if b.Logger().IsDebug() { - b.Logger().Debug("auth/ldap: Groups fetched from server", "num_server_groups", len(ldapGroups), "server_groups", ldapGroups) + b.Logger().Debug("groups fetched from server", "num_server_groups", len(ldapGroups), "server_groups", ldapGroups) } ldapResponse := &logical.Response{ @@ -177,7 +177,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri user, err := b.User(ctx, req.Storage, username) if err == nil && user != nil && user.Groups != nil { if b.Logger().IsDebug() { - b.Logger().Debug("auth/ldap: adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups) + b.Logger().Debug("adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups) } allGroups = append(allGroups, user.Groups...) } @@ -260,7 +260,7 @@ func (b *backend) getUserBindDN(cfg *ConfigEntry, c *ldap.Conn, username string) filter := fmt.Sprintf("(%s=%s)", cfg.UserAttr, ldap.EscapeFilter(username)) if b.Logger().IsDebug() { - b.Logger().Debug("auth/ldap: Discovering user", "userdn", cfg.UserDN, "filter", filter) + b.Logger().Debug("discovering user", "userdn", cfg.UserDN, "filter", filter) } result, err := c.Search(&ldap.SearchRequest{ BaseDN: cfg.UserDN, @@ -295,7 +295,7 @@ func (b *backend) getUserDN(cfg *ConfigEntry, c *ldap.Conn, bindDN string) (stri // Find the distinguished name for the user if userPrincipalName used for login filter := fmt.Sprintf("(userPrincipalName=%s)", ldap.EscapeFilter(bindDN)) if b.Logger().IsDebug() { - b.Logger().Debug("auth/ldap: Searching UPN", "userdn", cfg.UserDN, "filter", filter) + b.Logger().Debug("searching upn", "userdn", cfg.UserDN, "filter", filter) } result, err := c.Search(&ldap.SearchRequest{ BaseDN: cfg.UserDN, @@ -339,19 +339,19 @@ func (b *backend) getLdapGroups(cfg *ConfigEntry, c *ldap.Conn, userDN string, u ldapMap := make(map[string]bool) if cfg.GroupFilter == "" { - b.Logger().Warn("auth/ldap: GroupFilter is empty, will not query server") + b.Logger().Warn("groupfilter is empty, will not query server") return make([]string, 0), nil } if cfg.GroupDN == "" { - b.Logger().Warn("auth/ldap: GroupDN is empty, will not query server") + b.Logger().Warn("groupdn is empty, will not query server") return make([]string, 0), nil } // If groupfilter was defined, resolve it as a Go template and use the query for // returning the user's groups if b.Logger().IsDebug() { - b.Logger().Debug("auth/ldap: Compiling group filter", "group_filter", cfg.GroupFilter) + b.Logger().Debug("compiling group filter", "group_filter", cfg.GroupFilter) } // Parse the configuration as a template. @@ -374,7 +374,7 @@ func (b *backend) getLdapGroups(cfg *ConfigEntry, c *ldap.Conn, userDN string, u t.Execute(&renderedQuery, context) if b.Logger().IsDebug() { - b.Logger().Debug("auth/ldap: Searching", "groupdn", cfg.GroupDN, "rendered_query", renderedQuery.String()) + b.Logger().Debug("searching", "groupdn", cfg.GroupDN, "rendered_query", renderedQuery.String()) } result, err := c.Search(&ldap.SearchRequest{ diff --git a/builtin/credential/ldap/path_config.go b/builtin/credential/ldap/path_config.go index 306fb2eee6..6cad1e55b0 100644 --- a/builtin/credential/ldap/path_config.go +++ b/builtin/credential/ldap/path_config.go @@ -12,11 +12,11 @@ import ( "text/template" "github.com/go-ldap/ldap" + log "github.com/hashicorp/go-hclog" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/helper/tlsutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" - log "github.com/mgutz/logxi/v1" ) func pathConfig(b *backend) *framework.Path { @@ -427,7 +427,7 @@ func (c *ConfigEntry) DialLDAP() (*ldap.Conn, error) { if err == nil { if retErr != nil { if c.logger.IsDebug() { - c.logger.Debug("ldap: errors connecting to some hosts: %s", retErr.Error()) + c.logger.Debug("errors connecting to some hosts: %s", retErr.Error()) } } retErr = nil diff --git a/builtin/credential/okta/backend.go b/builtin/credential/okta/backend.go index d68586d7ea..d571dcc25f 100644 --- a/builtin/credential/okta/backend.go +++ b/builtin/credential/okta/backend.go @@ -115,13 +115,13 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri switch result.Status { case "LOCKED_OUT": if b.Logger().IsDebug() { - b.Logger().Debug("auth/okta: user is locked out", "user", username) + b.Logger().Debug("user is locked out", "user", username) } return nil, logical.ErrorResponse("okta authentication failed"), nil, nil case "PASSWORD_EXPIRED": if b.Logger().IsDebug() { - b.Logger().Debug("auth/okta: password is expired", "user", username) + b.Logger().Debug("password is expired", "user", username) } return nil, logical.ErrorResponse("okta authentication failed"), nil, nil @@ -131,7 +131,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri case "MFA_ENROLL", "MFA_ENROLL_ACTIVATE": if !cfg.BypassOktaMFA { if b.Logger().IsDebug() { - b.Logger().Debug("auth/okta: user must enroll or complete mfa enrollment", "user", username) + b.Logger().Debug("user must enroll or complete mfa enrollment", "user", username) } return nil, logical.ErrorResponse("okta authentication failed: you must complete MFA enrollment to continue"), nil, nil } @@ -204,7 +204,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri // Allowed default: if b.Logger().IsDebug() { - b.Logger().Debug("auth/okta: unhandled result status", "status", result.Status, "factorstatus", result.FactorResult) + b.Logger().Debug("unhandled result status", "status", result.Status, "factorstatus", result.FactorResult) } return nil, logical.ErrorResponse("okta authentication failed"), nil, nil } @@ -215,7 +215,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri default: if b.Logger().IsDebug() { - b.Logger().Debug("auth/okta: unhandled result status", "status", result.Status) + b.Logger().Debug("unhandled result status", "status", result.Status) } return nil, logical.ErrorResponse("okta authentication failed"), nil, nil } @@ -230,7 +230,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri // Allowed default: if b.Logger().IsDebug() { - b.Logger().Debug("auth/okta: authentication returned a non-success status", "status", result.Status) + b.Logger().Debug("authentication returned a non-success status", "status", result.Status) } return nil, logical.ErrorResponse("okta authentication failed"), nil, nil } @@ -254,12 +254,12 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri user, err := b.User(ctx, req.Storage, username) if err != nil { if b.Logger().IsDebug() { - b.Logger().Debug("auth/okta: error looking up user", "error", err) + b.Logger().Debug("error looking up user", "error", err) } } if err == nil && user != nil && user.Groups != nil { if b.Logger().IsDebug() { - b.Logger().Debug("auth/okta: adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups) + b.Logger().Debug("adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups) } allGroups = append(allGroups, user.Groups...) } @@ -270,7 +270,7 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri entry, _, err := b.Group(ctx, req.Storage, groupName) if err != nil { if b.Logger().IsDebug() { - b.Logger().Debug("auth/okta: error looking up group policies", "error", err) + b.Logger().Debug("error looking up group policies", "error", err) } } if err == nil && entry != nil && entry.Policies != nil { @@ -309,7 +309,7 @@ func (b *backend) getOktaGroups(client *okta.Client, user *okta.User) ([]string, oktaGroups = append(oktaGroups, group.Profile.Name) } if b.Logger().IsDebug() { - b.Logger().Debug("auth/okta: Groups fetched from Okta", "num_groups", len(oktaGroups), "groups", fmt.Sprintf("%#v", oktaGroups)) + b.Logger().Debug("Groups fetched from Okta", "num_groups", len(oktaGroups), "groups", fmt.Sprintf("%#v", oktaGroups)) } return oktaGroups, nil } diff --git a/builtin/credential/okta/backend_test.go b/builtin/credential/okta/backend_test.go index 08768887e2..acb264f2e6 100644 --- a/builtin/credential/okta/backend_test.go +++ b/builtin/credential/okta/backend_test.go @@ -7,9 +7,9 @@ import ( "strings" "testing" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/policyutil" - log "github.com/mgutz/logxi/v1" "time" @@ -21,7 +21,7 @@ func TestBackend_Config(t *testing.T) { defaultLeaseTTLVal := time.Hour * 12 maxLeaseTTLVal := time.Hour * 24 b, err := Factory(context.Background(), &logical.BackendConfig{ - Logger: logformat.NewVaultLogger(log.LevelTrace), + Logger: logging.NewVaultLogger(log.Trace), System: &logical.StaticSystemView{ DefaultLeaseTTLVal: defaultLeaseTTLVal, MaxLeaseTTLVal: maxLeaseTTLVal, diff --git a/builtin/logical/database/backend.go b/builtin/logical/database/backend.go index bcede8e29d..db5fb4e8c4 100644 --- a/builtin/logical/database/backend.go +++ b/builtin/logical/database/backend.go @@ -7,7 +7,7 @@ import ( "strings" "sync" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/errwrap" uuid "github.com/hashicorp/go-uuid" diff --git a/builtin/logical/database/dbplugin/client.go b/builtin/logical/database/dbplugin/client.go index 31c5efbc0f..37cb629c4c 100644 --- a/builtin/logical/database/dbplugin/client.go +++ b/builtin/logical/database/dbplugin/client.go @@ -5,9 +5,9 @@ import ( "errors" "sync" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/helper/pluginutil" - log "github.com/mgutz/logxi/v1" ) // DatabasePluginClient embeds a databasePluginRPCClient and wraps it's Close @@ -61,7 +61,7 @@ func newPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne case *gRPCClient: db = raw.(*gRPCClient) case *databasePluginRPCClient: - logger.Warn("database: plugin is using deprecated net RPC transport, recompile plugin to upgrade to gRPC", "plugin", pluginRunner.Name) + logger.Warn("plugin is using deprecated net RPC transport, recompile plugin to upgrade to gRPC", "plugin", pluginRunner.Name) db = raw.(*databasePluginRPCClient) default: return nil, errors.New("unsupported client type") diff --git a/builtin/logical/database/dbplugin/databasemiddleware.go b/builtin/logical/database/dbplugin/databasemiddleware.go index 36a7558e5b..ba2dd4e5c4 100644 --- a/builtin/logical/database/dbplugin/databasemiddleware.go +++ b/builtin/logical/database/dbplugin/databasemiddleware.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/errwrap" metrics "github.com/armon/go-metrics" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" ) // ---- Tracing Middleware Domain ---- @@ -21,9 +21,6 @@ import ( type databaseTracingMiddleware struct { next Database logger log.Logger - - typeStr string - transport string } func (mw *databaseTracingMiddleware) Type() (string, error) { @@ -32,37 +29,37 @@ func (mw *databaseTracingMiddleware) Type() (string, error) { func (mw *databaseTracingMiddleware) CreateUser(ctx context.Context, statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) { defer func(then time.Time) { - mw.logger.Trace("database", "operation", "CreateUser", "status", "finished", "type", mw.typeStr, "transport", mw.transport, "err", err, "took", time.Since(then)) + mw.logger.Trace("create user", "status", "finished", "err", err, "took", time.Since(then)) }(time.Now()) - mw.logger.Trace("database", "operation", "CreateUser", "status", "started", "type", mw.typeStr, "transport", mw.transport) + mw.logger.Trace("create user", "status", "started") return mw.next.CreateUser(ctx, statements, usernameConfig, expiration) } func (mw *databaseTracingMiddleware) RenewUser(ctx context.Context, statements Statements, username string, expiration time.Time) (err error) { defer func(then time.Time) { - mw.logger.Trace("database", "operation", "RenewUser", "status", "finished", "type", mw.typeStr, "transport", mw.transport, "err", err, "took", time.Since(then)) + mw.logger.Trace("renew user", "status", "finished", "err", err, "took", time.Since(then)) }(time.Now()) - mw.logger.Trace("database", "operation", "RenewUser", "status", "started", mw.typeStr, "transport", mw.transport) + mw.logger.Trace("renew user", "status", "started") return mw.next.RenewUser(ctx, statements, username, expiration) } func (mw *databaseTracingMiddleware) RevokeUser(ctx context.Context, statements Statements, username string) (err error) { defer func(then time.Time) { - mw.logger.Trace("database", "operation", "RevokeUser", "status", "finished", "type", mw.typeStr, "transport", mw.transport, "err", err, "took", time.Since(then)) + mw.logger.Trace("revoke user", "status", "finished", "err", err, "took", time.Since(then)) }(time.Now()) - mw.logger.Trace("database", "operation", "RevokeUser", "status", "started", "type", mw.typeStr, "transport", mw.transport) + mw.logger.Trace("revoke user", "status", "started") return mw.next.RevokeUser(ctx, statements, username) } func (mw *databaseTracingMiddleware) RotateRootCredentials(ctx context.Context, statements []string) (conf map[string]interface{}, err error) { defer func(then time.Time) { - mw.logger.Trace("database", "operation", "RotateRootCredentials", "status", "finished", "type", mw.typeStr, "transport", mw.transport, "err", err, "took", time.Since(then)) + mw.logger.Trace("rotate root credentials", "status", "finished", "err", err, "took", time.Since(then)) }(time.Now()) - mw.logger.Trace("database", "operation", "RotateRootCredentials", "status", "started", "type", mw.typeStr, "transport", mw.transport) + mw.logger.Trace("rotate root credentials", "status", "started") return mw.next.RotateRootCredentials(ctx, statements) } @@ -73,19 +70,19 @@ func (mw *databaseTracingMiddleware) Initialize(ctx context.Context, conf map[st func (mw *databaseTracingMiddleware) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (saveConf map[string]interface{}, err error) { defer func(then time.Time) { - mw.logger.Trace("database", "operation", "Initialize", "status", "finished", "type", mw.typeStr, "transport", mw.transport, "verify", verifyConnection, "err", err, "took", time.Since(then)) + mw.logger.Trace("initialize", "status", "finished", "verify", verifyConnection, "err", err, "took", time.Since(then)) }(time.Now()) - mw.logger.Trace("database", "operation", "Initialize", "status", "started", "type", mw.typeStr, "transport", mw.transport) + mw.logger.Trace("initialize", "status", "started") return mw.next.Init(ctx, conf, verifyConnection) } func (mw *databaseTracingMiddleware) Close() (err error) { defer func(then time.Time) { - mw.logger.Trace("database", "operation", "Close", "status", "finished", "type", mw.typeStr, "transport", mw.transport, "err", err, "took", time.Since(then)) + mw.logger.Trace("close", "status", "finished", "err", err, "took", time.Since(then)) }(time.Now()) - mw.logger.Trace("database", "operation", "Close", "status", "started", "type", mw.typeStr, "transport", mw.transport) + mw.logger.Trace("close", "status", "started") return mw.next.Close() } diff --git a/builtin/logical/database/dbplugin/plugin.go b/builtin/logical/database/dbplugin/plugin.go index a447e24262..4cb8d9b7b4 100644 --- a/builtin/logical/database/dbplugin/plugin.go +++ b/builtin/logical/database/dbplugin/plugin.go @@ -9,9 +9,9 @@ import ( "google.golang.org/grpc" "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/helper/pluginutil" - log "github.com/mgutz/logxi/v1" ) // Database is the interface that all database objects must implement. @@ -39,6 +39,8 @@ func PluginFactory(ctx context.Context, pluginName string, sys pluginutil.LookRu return nil, err } + namedLogger := logger.Named(pluginName) + var transport string var db Database if pluginRunner.Builtin { @@ -59,7 +61,7 @@ func PluginFactory(ctx context.Context, pluginName string, sys pluginutil.LookRu } else { // create a DatabasePluginClient instance - db, err = newPluginClient(ctx, sys, pluginRunner, logger) + db, err = newPluginClient(ctx, sys, pluginRunner, namedLogger) if err != nil { return nil, err } @@ -87,12 +89,10 @@ func PluginFactory(ctx context.Context, pluginName string, sys pluginutil.LookRu } // Wrap with tracing middleware - if logger.IsTrace() { + if namedLogger.IsTrace() { db = &databaseTracingMiddleware{ - transport: transport, - next: db, - typeStr: typeStr, - logger: logger, + next: db, + logger: namedLogger.With("transport", transport), } } diff --git a/builtin/logical/database/dbplugin/plugin_test.go b/builtin/logical/database/dbplugin/plugin_test.go index f33f553e30..f4f8379336 100644 --- a/builtin/logical/database/dbplugin/plugin_test.go +++ b/builtin/logical/database/dbplugin/plugin_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/builtin/logical/database/dbplugin" "github.com/hashicorp/vault/helper/pluginutil" @@ -14,7 +15,6 @@ import ( "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/plugins" "github.com/hashicorp/vault/vault" - log "github.com/mgutz/logxi/v1" ) type mockPlugin struct { @@ -147,7 +147,7 @@ func TestPlugin_Init(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - dbRaw, err := dbplugin.PluginFactory(context.Background(), "test-plugin", sys, &log.NullLogger{}) + dbRaw, err := dbplugin.PluginFactory(context.Background(), "test-plugin", sys, log.NewNullLogger()) if err != nil { t.Fatalf("err: %s", err) } @@ -171,7 +171,7 @@ func TestPlugin_CreateUser(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - db, err := dbplugin.PluginFactory(context.Background(), "test-plugin", sys, &log.NullLogger{}) + db, err := dbplugin.PluginFactory(context.Background(), "test-plugin", sys, log.NewNullLogger()) if err != nil { t.Fatalf("err: %s", err) } @@ -211,7 +211,7 @@ func TestPlugin_RenewUser(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - db, err := dbplugin.PluginFactory(context.Background(), "test-plugin", sys, &log.NullLogger{}) + db, err := dbplugin.PluginFactory(context.Background(), "test-plugin", sys, log.NewNullLogger()) if err != nil { t.Fatalf("err: %s", err) } @@ -245,7 +245,7 @@ func TestPlugin_RevokeUser(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - db, err := dbplugin.PluginFactory(context.Background(), "test-plugin", sys, &log.NullLogger{}) + db, err := dbplugin.PluginFactory(context.Background(), "test-plugin", sys, log.NewNullLogger()) if err != nil { t.Fatalf("err: %s", err) } @@ -287,7 +287,7 @@ func TestPlugin_NetRPC_Init(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - dbRaw, err := dbplugin.PluginFactory(context.Background(), "test-plugin-netRPC", sys, &log.NullLogger{}) + dbRaw, err := dbplugin.PluginFactory(context.Background(), "test-plugin-netRPC", sys, log.NewNullLogger()) if err != nil { t.Fatalf("err: %s", err) } @@ -311,7 +311,7 @@ func TestPlugin_NetRPC_CreateUser(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - db, err := dbplugin.PluginFactory(context.Background(), "test-plugin-netRPC", sys, &log.NullLogger{}) + db, err := dbplugin.PluginFactory(context.Background(), "test-plugin-netRPC", sys, log.NewNullLogger()) if err != nil { t.Fatalf("err: %s", err) } @@ -351,7 +351,7 @@ func TestPlugin_NetRPC_RenewUser(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - db, err := dbplugin.PluginFactory(context.Background(), "test-plugin-netRPC", sys, &log.NullLogger{}) + db, err := dbplugin.PluginFactory(context.Background(), "test-plugin-netRPC", sys, log.NewNullLogger()) if err != nil { t.Fatalf("err: %s", err) } @@ -385,7 +385,7 @@ func TestPlugin_NetRPC_RevokeUser(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - db, err := dbplugin.PluginFactory(context.Background(), "test-plugin-netRPC", sys, &log.NullLogger{}) + db, err := dbplugin.PluginFactory(context.Background(), "test-plugin-netRPC", sys, log.NewNullLogger()) if err != nil { t.Fatalf("err: %s", err) } diff --git a/builtin/logical/postgresql/backend.go b/builtin/logical/postgresql/backend.go index 225a4921a1..ad7c333284 100644 --- a/builtin/logical/postgresql/backend.go +++ b/builtin/logical/postgresql/backend.go @@ -7,7 +7,7 @@ import ( "strings" "sync" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" @@ -64,8 +64,8 @@ type backend struct { // DB returns the database connection. func (b *backend) DB(ctx context.Context, s logical.Storage) (*sql.DB, error) { - b.logger.Trace("postgres/db: enter") - defer b.logger.Trace("postgres/db: exit") + b.logger.Debug("postgres/db: enter") + defer b.logger.Debug("postgres/db: exit") b.lock.Lock() defer b.lock.Unlock() @@ -126,8 +126,8 @@ func (b *backend) DB(ctx context.Context, s logical.Storage) (*sql.DB, error) { // ResetDB forces a connection next time DB() is called. func (b *backend) ResetDB(_ context.Context) { - b.logger.Trace("postgres/resetdb: enter") - defer b.logger.Trace("postgres/resetdb: exit") + b.logger.Debug("postgres/db: enter") + defer b.logger.Debug("postgres/db: exit") b.lock.Lock() defer b.lock.Unlock() diff --git a/builtin/logical/postgresql/path_role_create.go b/builtin/logical/postgresql/path_role_create.go index 832e9bfad0..3d7a7312c3 100644 --- a/builtin/logical/postgresql/path_role_create.go +++ b/builtin/logical/postgresql/path_role_create.go @@ -33,13 +33,9 @@ func pathRoleCreate(b *backend) *framework.Path { } func (b *backend) pathRoleCreateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - b.logger.Trace("postgres/pathRoleCreateRead: enter") - defer b.logger.Trace("postgres/pathRoleCreateRead: exit") - name := data.Get("name").(string) // Get the role - b.logger.Trace("postgres/pathRoleCreateRead: getting role") role, err := b.Role(ctx, req.Storage, name) if err != nil { return nil, err @@ -49,7 +45,6 @@ func (b *backend) pathRoleCreateRead(ctx context.Context, req *logical.Request, } // Determine if we have a lease - b.logger.Trace("postgres/pathRoleCreateRead: getting lease") lease, err := b.Lease(ctx, req.Storage) if err != nil { return nil, err @@ -90,20 +85,17 @@ func (b *backend) pathRoleCreateRead(ctx context.Context, req *logical.Request, Format("2006-01-02 15:04:05-0700") // Get our handle - b.logger.Trace("postgres/pathRoleCreateRead: getting database handle") db, err := b.DB(ctx, req.Storage) if err != nil { return nil, err } // Start a transaction - b.logger.Trace("postgres/pathRoleCreateRead: starting transaction") tx, err := db.Begin() if err != nil { return nil, err } defer func() { - b.logger.Trace("postgres/pathRoleCreateRead: rolling back transaction") tx.Rollback() }() @@ -114,7 +106,6 @@ func (b *backend) pathRoleCreateRead(ctx context.Context, req *logical.Request, continue } - b.logger.Trace("postgres/pathRoleCreateRead: preparing statement") stmt, err := tx.Prepare(Query(query, map[string]string{ "name": username, "password": password, @@ -124,7 +115,6 @@ func (b *backend) pathRoleCreateRead(ctx context.Context, req *logical.Request, return nil, err } defer stmt.Close() - b.logger.Trace("postgres/pathRoleCreateRead: executing statement") if _, err := stmt.Exec(); err != nil { return nil, err } @@ -132,14 +122,12 @@ func (b *backend) pathRoleCreateRead(ctx context.Context, req *logical.Request, // Commit the transaction - b.logger.Trace("postgres/pathRoleCreateRead: committing transaction") if err := tx.Commit(); err != nil { return nil, err } // Return the secret - b.logger.Trace("postgres/pathRoleCreateRead: generating secret") resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ "username": username, "password": password, diff --git a/builtin/logical/ssh/communicator.go b/builtin/logical/ssh/communicator.go index 3ab86fa114..47775b36ba 100644 --- a/builtin/logical/ssh/communicator.go +++ b/builtin/logical/ssh/communicator.go @@ -11,7 +11,7 @@ import ( "os" "path/filepath" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go index 6ab5c337ee..9af1aa535d 100644 --- a/builtin/logical/ssh/path_roles.go +++ b/builtin/logical/ssh/path_roles.go @@ -571,14 +571,14 @@ func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *fra if err != nil { // On error, log warning and continue if b.Logger().IsWarn() { - b.Logger().Warn("ssh: error getting role info", "role", entry, "error", err) + b.Logger().Warn("error getting role info", "role", entry, "error", err) } continue } if role == nil { // On empty role, log warning and continue if b.Logger().IsWarn() { - b.Logger().Warn("ssh: no role info found", "role", entry) + b.Logger().Warn("no role info found", "role", entry) } continue } @@ -586,7 +586,7 @@ func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *fra roleInfo, err := b.parseRole(role) if err != nil { if b.Logger().IsWarn() { - b.Logger().Warn("ssh: error parsing role info", "role", entry, "error", err) + b.Logger().Warn("error parsing role info", "role", entry, "error", err) } continue } diff --git a/builtin/logical/ssh/util.go b/builtin/logical/ssh/util.go index 62eaf19e49..a455b06da1 100644 --- a/builtin/logical/ssh/util.go +++ b/builtin/logical/ssh/util.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/vault/logical" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "golang.org/x/crypto/ssh" ) diff --git a/builtin/logical/transit/backend.go b/builtin/logical/transit/backend.go index b7aa3cc792..1bb7f4c927 100644 --- a/builtin/logical/transit/backend.go +++ b/builtin/logical/transit/backend.go @@ -64,8 +64,8 @@ type backend struct { } func (b *backend) invalidate(_ context.Context, key string) { - if b.Logger().IsTrace() { - b.Logger().Trace("transit: invalidating key", "key", key) + if b.Logger().IsDebug() { + b.Logger().Debug("invalidating key", "key", key) } switch { case strings.HasPrefix(key, "policy/"): diff --git a/builtin/plugin/backend.go b/builtin/plugin/backend.go index 6693f23e02..c448bb89fa 100644 --- a/builtin/plugin/backend.go +++ b/builtin/plugin/backend.go @@ -86,7 +86,7 @@ type backend struct { } func (b *backend) reloadBackend(ctx context.Context) error { - b.Logger().Trace("plugin: reloading plugin backend", "plugin", b.config.Config["plugin_name"]) + b.Logger().Debug("reloading plugin backend", "plugin", b.config.Config["plugin_name"]) return b.startBackend(ctx) } @@ -111,12 +111,12 @@ func (b *backend) startBackend(ctx context.Context) error { if !b.loaded { if b.Backend.Type() != nb.Type() { nb.Cleanup(ctx) - b.Logger().Warn("plugin: failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchType) + b.Logger().Warn("failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchType) return ErrMismatchType } if !reflect.DeepEqual(b.Backend.SpecialPaths(), nb.SpecialPaths()) { nb.Cleanup(ctx) - b.Logger().Warn("plugin: failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchPaths) + b.Logger().Warn("failed to start plugin process", "plugin", b.config.Config["plugin_name"], "error", ErrMismatchPaths) return ErrMismatchPaths } } diff --git a/builtin/plugin/backend_test.go b/builtin/plugin/backend_test.go index ee761fc19d..4667717ae8 100644 --- a/builtin/plugin/backend_test.go +++ b/builtin/plugin/backend_test.go @@ -6,14 +6,14 @@ import ( "os" "testing" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/pluginutil" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/plugin" "github.com/hashicorp/vault/logical/plugin/mock" "github.com/hashicorp/vault/vault" - log "github.com/mgutz/logxi/v1" ) func TestBackend_impl(t *testing.T) { @@ -80,7 +80,7 @@ func testConfig(t *testing.T) (*logical.BackendConfig, func()) { sys := vault.TestDynamicSystemView(core.Core) config := &logical.BackendConfig{ - Logger: logformat.NewVaultLogger(log.LevelTrace), + Logger: logging.NewVaultLogger(log.Debug), System: sys, Config: map[string]string{ "plugin_name": "mock-plugin", diff --git a/command/approle_concurrency_integ_test.go b/command/approle_concurrency_integ_test.go index 8237c3fcc2..4e940d6a23 100644 --- a/command/approle_concurrency_integ_test.go +++ b/command/approle_concurrency_integ_test.go @@ -4,12 +4,12 @@ import ( "sync" "testing" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/vault" - logxi "github.com/mgutz/logxi/v1" ) func TestAppRole_Integ_ConcurrentLogins(t *testing.T) { @@ -17,7 +17,7 @@ func TestAppRole_Integ_ConcurrentLogins(t *testing.T) { coreConfig := &vault.CoreConfig{ DisableMlock: true, DisableCache: true, - Logger: logxi.NullLog, + Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, diff --git a/command/command_test.go b/command/command_test.go index 5303935494..d91eb5c4e1 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/builtin/logical/pki" @@ -22,11 +23,10 @@ import ( auditFile "github.com/hashicorp/vault/builtin/audit/file" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" vaulthttp "github.com/hashicorp/vault/http" - logxi "github.com/mgutz/logxi/v1" ) var ( - defaultVaultLogger = logxi.NullLog + defaultVaultLogger = log.NewNullLogger() defaultVaultCredentialBackends = map[string]logical.Factory{ "userpass": credUserpass.Factory, diff --git a/command/identity_group_aliases_integ_test.go b/command/identity_group_aliases_integ_test.go index 5790ae2021..c9c60b1b54 100644 --- a/command/identity_group_aliases_integ_test.go +++ b/command/identity_group_aliases_integ_test.go @@ -3,12 +3,12 @@ package command import ( "testing" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/builtin/credential/ldap" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/vault" - logxi "github.com/mgutz/logxi/v1" ) func TestIdentityStore_Integ_GroupAliases(t *testing.T) { @@ -16,7 +16,7 @@ func TestIdentityStore_Integ_GroupAliases(t *testing.T) { coreConfig := &vault.CoreConfig{ DisableMlock: true, DisableCache: true, - Logger: logxi.NullLog, + Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "ldap": ldap.Factory, }, diff --git a/command/path_map_upgrade_api_test.go b/command/path_map_upgrade_api_test.go index 1f408256f0..14c84bffcf 100644 --- a/command/path_map_upgrade_api_test.go +++ b/command/path_map_upgrade_api_test.go @@ -3,11 +3,11 @@ package command import ( "testing" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/vault" - logxi "github.com/mgutz/logxi/v1" credAppId "github.com/hashicorp/vault/builtin/credential/app-id" ) @@ -17,7 +17,7 @@ func TestPathMap_Upgrade_API(t *testing.T) { coreConfig := &vault.CoreConfig{ DisableMlock: true, DisableCache: true, - Logger: logxi.NullLog, + Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "app-id": credAppId.Factory, }, diff --git a/command/server.go b/command/server.go index 415c883029..0241bd6b90 100644 --- a/command/server.go +++ b/command/server.go @@ -21,7 +21,6 @@ import ( "time" colorable "github.com/mattn/go-colorable" - log "github.com/mgutz/logxi/v1" "github.com/mitchellh/cli" testing "github.com/mitchellh/go-testing-interface" "github.com/posener/complete" @@ -32,13 +31,12 @@ import ( "github.com/armon/go-metrics/circonus" "github.com/armon/go-metrics/datadog" "github.com/hashicorp/errwrap" - hclog "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/helper/gated-writer" - "github.com/hashicorp/vault/helper/logbridge" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/mlock" "github.com/hashicorp/vault/helper/parseutil" "github.com/hashicorp/vault/helper/reload" @@ -288,21 +286,19 @@ func (c *ServerCommand) Run(args []string) int { // Create a logger. We wrap it in a gated writer so that it doesn't // start logging too early. c.logGate = &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)} - var level int + var level log.Level c.flagLogLevel = strings.ToLower(strings.TrimSpace(c.flagLogLevel)) switch c.flagLogLevel { case "trace": - level = log.LevelTrace + level = log.Trace case "debug": - level = log.LevelDebug - case "info", "": - level = log.LevelInfo - case "notice": - level = log.LevelNotice + level = log.Debug + case "notice", "info", "": + level = log.Info case "warn", "warning": - level = log.LevelWarn + level = log.Warn case "err", "error": - level = log.LevelError + level = log.Error default: c.UI.Error(fmt.Sprintf("Unknown log level: %s", c.flagLogLevel)) return 1 @@ -315,20 +311,20 @@ func (c *ServerCommand) Run(args []string) int { switch strings.ToLower(logFormat) { case "vault", "vault_json", "vault-json", "vaultjson", "json", "": if c.flagDevThreeNode || c.flagDevFourCluster { - c.logger = logbridge.NewLogger(hclog.New(&hclog.LoggerOptions{ + c.logger = log.New(&log.LoggerOptions{ Mutex: &sync.Mutex{}, Output: c.logGate, - Level: hclog.Trace, - })).LogxiLogger() + Level: log.Trace, + }) } else { - c.logger = logformat.NewVaultLoggerWithWriter(c.logGate, level) + c.logger = logging.NewVaultLoggerWithWriter(c.logGate, level) } default: - c.logger = log.NewLogger(c.logGate, "vault") - c.logger.SetLevel(level) + c.logger = logging.NewVaultLoggerWithWriter(c.logGate, level) } + grpclog.SetLogger(&grpclogFaker{ - logger: c.logger, + logger: c.logger.Named("grpclogfaker"), log: os.Getenv("VAULT_GRPC_LOGGING") != "", }) @@ -412,7 +408,7 @@ func (c *ServerCommand) Run(args []string) int { c.UI.Error(fmt.Sprintf("Unknown storage type %s", config.Storage.Type)) return 1 } - backend, err := factory(config.Storage.Config, c.logger) + backend, err := factory(config.Storage.Config, c.logger.ResetNamed("storage."+config.Storage.Type)) if err != nil { c.UI.Error(fmt.Sprintf("Error initializing storage of type %s: %s", config.Storage.Type, err)) return 1 @@ -718,8 +714,8 @@ CLUSTER_SYNTHESIS_COMPLETE: } c.reloadFuncsLock.Unlock() if !disableClustering { - if c.logger.IsTrace() { - c.logger.Trace("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs) + if c.logger.IsDebug() { + c.logger.Debug("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs) } } @@ -1095,7 +1091,7 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m testCluster := vault.NewTestCluster(&testing.RuntimeT{}, base, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, BaseListenAddress: c.flagDevListenAddr, - RawLogger: c.logger, + Logger: c.logger, TempDir: tempDir, }) defer c.cleanupGuard.Do(testCluster.Cleanup) @@ -1577,19 +1573,19 @@ func (g *grpclogFaker) Fatalln(args ...interface{}) { } func (g *grpclogFaker) Print(args ...interface{}) { - if g.log && g.logger.IsTrace() { - g.logger.Trace(fmt.Sprint(args...)) + if g.log && g.logger.IsDebug() { + g.logger.Debug(fmt.Sprint(args...)) } } func (g *grpclogFaker) Printf(format string, args ...interface{}) { - if g.log && g.logger.IsTrace() { - g.logger.Trace(fmt.Sprintf(format, args...)) + if g.log && g.logger.IsDebug() { + g.logger.Debug(fmt.Sprintf(format, args...)) } } func (g *grpclogFaker) Println(args ...interface{}) { - if g.log && g.logger.IsTrace() { - g.logger.Trace(fmt.Sprintln(args...)) + if g.log && g.logger.IsDebug() { + g.logger.Debug(fmt.Sprintln(args...)) } } diff --git a/command/server/config.go b/command/server/config.go index 2bb1ddbfa3..b24a5e0f90 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -10,7 +10,7 @@ import ( "strings" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" diff --git a/command/server/config_test.go b/command/server/config_test.go index 95a4032f1d..e1bc2442be 100644 --- a/command/server/config_test.go +++ b/command/server/config_test.go @@ -6,14 +6,14 @@ import ( "testing" "time" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/vault/helper/logformat" - log "github.com/mgutz/logxi/v1" + "github.com/hashicorp/vault/helper/logging" ) func TestLoadConfigFile(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) config, err := LoadConfigFile("./test-fixtures/config.hcl", logger) if err != nil { @@ -79,7 +79,7 @@ func TestLoadConfigFile(t *testing.T) { } func TestLoadConfigFile_topLevel(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) config, err := LoadConfigFile("./test-fixtures/config2.hcl", logger) if err != nil { @@ -150,7 +150,7 @@ func TestLoadConfigFile_topLevel(t *testing.T) { } func TestLoadConfigFile_json(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) config, err := LoadConfigFile("./test-fixtures/config.hcl.json", logger) if err != nil { @@ -215,7 +215,7 @@ func TestLoadConfigFile_json(t *testing.T) { } func TestLoadConfigFile_json2(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) config, err := LoadConfigFile("./test-fixtures/config2.hcl.json", logger) if err != nil { @@ -283,7 +283,7 @@ func TestLoadConfigFile_json2(t *testing.T) { } func TestLoadConfigDir(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) config, err := LoadConfigDir("./test-fixtures/config-dir", logger) if err != nil { @@ -383,7 +383,7 @@ listener "tcp" { } func TestParseConfig_badTopLevel(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) _, err := ParseConfig(strings.TrimSpace(` backend {} @@ -405,7 +405,7 @@ nope = "yes" } func TestParseConfig_badListener(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) _, err := ParseConfig(strings.TrimSpace(` listener "tcp" { @@ -429,7 +429,7 @@ listener "tcp" { } func TestParseConfig_badTelemetry(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) _, err := ParseConfig(strings.TrimSpace(` telemetry { diff --git a/helper/logbridge/logger.go b/helper/logbridge/logger.go deleted file mode 100644 index 2626f1e65c..0000000000 --- a/helper/logbridge/logger.go +++ /dev/null @@ -1,122 +0,0 @@ -package logbridge - -import ( - "log" - - hclog "github.com/hashicorp/go-hclog" -) - -type Logger struct { - hclogger hclog.Logger -} - -func NewLogger(hclogger hclog.Logger) *Logger { - return &Logger{hclogger: hclogger} -} -func (l *Logger) Trace(msg string, args ...interface{}) { - l.hclogger.Trace(msg, args...) -} -func (l *Logger) Debug(msg string, args ...interface{}) { - l.hclogger.Debug(msg, args...) -} -func (l *Logger) Info(msg string, args ...interface{}) { - l.hclogger.Info(msg, args...) -} -func (l *Logger) Warn(msg string, args ...interface{}) { - l.hclogger.Warn(msg, args...) -} -func (l *Logger) Error(msg string, args ...interface{}) { - l.hclogger.Error(msg, args...) -} -func (l *Logger) IsTrace() bool { - return l.hclogger.IsTrace() -} -func (l *Logger) IsDebug() bool { - return l.hclogger.IsDebug() -} -func (l *Logger) IsInfo() bool { - return l.hclogger.IsInfo() -} -func (l *Logger) IsWarn() bool { - return l.hclogger.IsWarn() -} -func (l *Logger) With(args ...interface{}) *Logger { - return &Logger{ - hclogger: l.hclogger.With(args...), - } -} -func (l *Logger) Named(name string) *Logger { - return &Logger{ - hclogger: l.hclogger.Named(name), - } -} -func (l *Logger) ResetNamed(name string) *Logger { - return &Logger{ - hclogger: l.hclogger.ResetNamed(name), - } -} -func (l *Logger) StandardLogger(opts *hclog.StandardLoggerOptions) *log.Logger { - return l.hclogger.StandardLogger(opts) -} -func (l *Logger) LogxiLogger() *LogxiLogger { - return &LogxiLogger{ - l: l, - } -} - -// This is only for compatibility with whatever the fuck is up with the errors -// coming back from warn/error in Logxi's API. Don't use this directly. -type LogxiLogger struct { - l *Logger -} - -func (l *LogxiLogger) Trace(msg string, args ...interface{}) { - l.l.Trace(msg, args...) -} -func (l *LogxiLogger) Debug(msg string, args ...interface{}) { - l.l.Debug(msg, args...) -} -func (l *LogxiLogger) Info(msg string, args ...interface{}) { - l.l.Info(msg, args...) -} -func (l *LogxiLogger) Warn(msg string, args ...interface{}) error { - l.l.Warn(msg, args...) - return nil -} -func (l *LogxiLogger) Error(msg string, args ...interface{}) error { - l.l.Error(msg, args...) - return nil -} -func (l *LogxiLogger) Fatal(msg string, args ...interface{}) { - panic(msg) -} -func (l *LogxiLogger) Log(level int, msg string, args []interface{}) { - panic(msg) -} -func (l *LogxiLogger) IsTrace() bool { - return l.l.IsTrace() -} -func (l *LogxiLogger) IsDebug() bool { - return l.l.IsDebug() -} -func (l *LogxiLogger) IsInfo() bool { - return l.l.IsInfo() -} -func (l *LogxiLogger) IsWarn() bool { - return l.l.IsWarn() -} -func (l *LogxiLogger) SetLevel(level int) { - panic("set level") -} -func (l *LogxiLogger) With(args ...interface{}) *LogxiLogger { - return l.l.With(args...).LogxiLogger() -} -func (l *LogxiLogger) Named(name string) *LogxiLogger { - return l.l.Named(name).LogxiLogger() -} -func (l *LogxiLogger) ResetNamed(name string) *LogxiLogger { - return l.l.ResetNamed(name).LogxiLogger() -} -func (l *LogxiLogger) StandardLogger(opts *hclog.StandardLoggerOptions) *log.Logger { - return l.l.StandardLogger(opts) -} diff --git a/helper/logformat/vault.go b/helper/logformat/vault.go deleted file mode 100644 index fa53a199f3..0000000000 --- a/helper/logformat/vault.go +++ /dev/null @@ -1,175 +0,0 @@ -package logformat - -import ( - "encoding/json" - "fmt" - "io" - "os" - "strings" - "sync" - "time" - - log "github.com/mgutz/logxi/v1" -) - -const ( - styledefault = iota - stylejson -) - -// NewVaultLogger creates a new logger with the specified level and a Vault -// formatter -func NewVaultLogger(level int) log.Logger { - logger := log.New("vault") - return setLevelFormatter(logger, level, createVaultFormatter()) -} - -// NewVaultLoggerWithWriter creates a new logger with the specified level and -// writer and a Vault formatter -func NewVaultLoggerWithWriter(w io.Writer, level int) log.Logger { - logger := log.NewLogger(w, "vault") - return setLevelFormatter(logger, level, createVaultFormatter()) -} - -// Sets the level and formatter on the log, which must be a DefaultLogger -func setLevelFormatter(logger log.Logger, level int, formatter log.Formatter) log.Logger { - logger.(*log.DefaultLogger).SetLevel(level) - logger.(*log.DefaultLogger).SetFormatter(formatter) - return logger -} - -// Creates a formatter, checking env vars for the style -func createVaultFormatter() log.Formatter { - ret := &vaultFormatter{ - Mutex: &sync.Mutex{}, - } - logFormat := os.Getenv("VAULT_LOG_FORMAT") - if logFormat == "" { - logFormat = os.Getenv("LOGXI_FORMAT") - } - switch strings.ToLower(logFormat) { - case "json", "vault_json", "vault-json", "vaultjson": - ret.style = stylejson - default: - ret.style = styledefault - } - return ret -} - -// Thread safe formatter -type vaultFormatter struct { - *sync.Mutex - style int - module string -} - -func (v *vaultFormatter) Format(writer io.Writer, level int, msg string, args []interface{}) { - currTime := time.Now() - v.Lock() - defer v.Unlock() - switch v.style { - case stylejson: - v.formatJSON(writer, currTime, level, msg, args) - default: - v.formatDefault(writer, currTime, level, msg, args) - } -} - -func (v *vaultFormatter) formatDefault(writer io.Writer, currTime time.Time, level int, msg string, args []interface{}) { - // Write a trailing newline - defer writer.Write([]byte("\n")) - - writer.Write([]byte(currTime.Local().Format("2006/01/02 15:04:05.000000"))) - - switch level { - case log.LevelCritical: - writer.Write([]byte(" [CRIT ] ")) - case log.LevelError: - writer.Write([]byte(" [ERROR] ")) - case log.LevelWarn: - writer.Write([]byte(" [WARN ] ")) - case log.LevelInfo: - writer.Write([]byte(" [INFO ] ")) - case log.LevelDebug: - writer.Write([]byte(" [DEBUG] ")) - case log.LevelTrace: - writer.Write([]byte(" [TRACE] ")) - default: - writer.Write([]byte(" [ALL ] ")) - } - - if v.module != "" { - writer.Write([]byte(fmt.Sprintf("(%s) ", v.module))) - } - - writer.Write([]byte(msg)) - - if args != nil && len(args) > 0 { - if len(args)%2 != 0 { - args = append(args, "[unknown!]") - } - - writer.Write([]byte(":")) - - for i := 0; i < len(args); i = i + 2 { - var quote string - switch args[i+1].(type) { - case string: - if strings.ContainsRune(args[i+1].(string), ' ') { - quote = `"` - } - } - writer.Write([]byte(fmt.Sprintf(" %s=%s%v%s", args[i], quote, args[i+1], quote))) - } - } -} - -func (v *vaultFormatter) formatJSON(writer io.Writer, currTime time.Time, level int, msg string, args []interface{}) { - vals := map[string]interface{}{ - "@message": msg, - "@timestamp": currTime.Format("2006-01-02T15:04:05.000000Z07:00"), - } - - var levelStr string - switch level { - case log.LevelCritical: - levelStr = "critical" - case log.LevelError: - levelStr = "error" - case log.LevelWarn: - levelStr = "warn" - case log.LevelInfo: - levelStr = "info" - case log.LevelDebug: - levelStr = "debug" - case log.LevelTrace: - levelStr = "trace" - default: - levelStr = "all" - } - - vals["@level"] = levelStr - - if v.module != "" { - vals["@module"] = v.module - } - - if args != nil && len(args) > 0 { - - if len(args)%2 != 0 { - args = append(args, "[unknown!]") - } - - for i := 0; i < len(args); i = i + 2 { - if _, ok := args[i].(string); !ok { - // As this is the logging function not much we can do here - // without injecting into logs... - continue - } - vals[args[i].(string)] = args[i+1] - } - } - - enc := json.NewEncoder(writer) - enc.Encode(vals) -} diff --git a/helper/logging/vault.go b/helper/logging/vault.go new file mode 100644 index 0000000000..3e7e4766da --- /dev/null +++ b/helper/logging/vault.go @@ -0,0 +1,39 @@ +package logging + +import ( + "io" + "os" + "strings" + + log "github.com/hashicorp/go-hclog" +) + +// NewVaultLogger creates a new logger with the specified level and a Vault +// formatter +func NewVaultLogger(level log.Level) log.Logger { + return NewVaultLoggerWithWriter(log.DefaultOutput, level) +} + +// NewVaultLoggerWithWriter creates a new logger with the specified level and +// writer and a Vault formatter +func NewVaultLoggerWithWriter(w io.Writer, level log.Level) log.Logger { + opts := &log.LoggerOptions{ + Level: level, + Output: w, + JSONFormat: useJson(), + } + return log.New(opts) +} + +func useJson() bool { + logFormat := os.Getenv("VAULT_LOG_FORMAT") + if logFormat == "" { + logFormat = os.Getenv("LOGXI_FORMAT") + } + switch strings.ToLower(logFormat) { + case "json", "vault_json", "vault-json", "vaultjson": + return true + default: + return false + } +} diff --git a/helper/pluginutil/logger.go b/helper/pluginutil/logger.go deleted file mode 100644 index fff8ff129e..0000000000 --- a/helper/pluginutil/logger.go +++ /dev/null @@ -1,158 +0,0 @@ -package pluginutil - -import ( - "bytes" - "fmt" - stdlog "log" - "strings" - - hclog "github.com/hashicorp/go-hclog" - log "github.com/mgutz/logxi/v1" -) - -// pluginLogFaker is a wrapper on logxi.Logger that -// implements hclog.Logger -type hclogFaker struct { - logger log.Logger - - name string - implied []interface{} -} - -func (f *hclogFaker) buildLog(msg string, args ...interface{}) (string, []interface{}) { - if f.name != "" { - msg = fmt.Sprintf("%s: %s", f.name, msg) - } - args = append(f.implied, args...) - - return msg, args -} - -func (f *hclogFaker) Trace(msg string, args ...interface{}) { - msg, args = f.buildLog(msg, args...) - f.logger.Trace(msg, args...) -} - -func (f *hclogFaker) Debug(msg string, args ...interface{}) { - msg, args = f.buildLog(msg, args...) - f.logger.Debug(msg, args...) -} - -func (f *hclogFaker) Info(msg string, args ...interface{}) { - msg, args = f.buildLog(msg, args...) - f.logger.Info(msg, args...) -} - -func (f *hclogFaker) Warn(msg string, args ...interface{}) { - msg, args = f.buildLog(msg, args...) - f.logger.Warn(msg, args...) -} - -func (f *hclogFaker) Error(msg string, args ...interface{}) { - msg, args = f.buildLog(msg, args...) - f.logger.Error(msg, args...) -} - -func (f *hclogFaker) IsTrace() bool { - return f.logger.IsTrace() -} - -func (f *hclogFaker) IsDebug() bool { - return f.logger.IsDebug() -} - -func (f *hclogFaker) IsInfo() bool { - return f.logger.IsInfo() -} - -func (f *hclogFaker) IsWarn() bool { - return f.logger.IsWarn() -} - -func (f *hclogFaker) IsError() bool { - return !f.logger.IsTrace() && !f.logger.IsDebug() && !f.logger.IsInfo() && !f.IsWarn() -} - -func (f *hclogFaker) With(args ...interface{}) hclog.Logger { - var nf = *f - nf.implied = append(nf.implied, args...) - return f -} - -func (f *hclogFaker) Named(name string) hclog.Logger { - var nf = *f - if nf.name != "" { - nf.name = nf.name + "." + name - } - return &nf -} - -func (f *hclogFaker) ResetNamed(name string) hclog.Logger { - var nf = *f - nf.name = name - return &nf -} - -func (f *hclogFaker) StandardLogger(opts *hclog.StandardLoggerOptions) *stdlog.Logger { - if opts == nil { - opts = &hclog.StandardLoggerOptions{} - } - - return stdlog.New(&stdlogAdapter{f, opts.InferLevels}, "", 0) -} - -// Provides a io.Writer to shim the data out of *log.Logger -// and back into our Logger. This is basically the only way to -// build upon *log.Logger. -type stdlogAdapter struct { - hl hclog.Logger - inferLevels bool -} - -// Take the data, infer the levels if configured, and send it through -// a regular Logger -func (s *stdlogAdapter) Write(data []byte) (int, error) { - str := string(bytes.TrimRight(data, " \t\n")) - - if s.inferLevels { - level, str := s.pickLevel(str) - switch level { - case hclog.Trace: - s.hl.Trace(str) - case hclog.Debug: - s.hl.Debug(str) - case hclog.Info: - s.hl.Info(str) - case hclog.Warn: - s.hl.Warn(str) - case hclog.Error: - s.hl.Error(str) - default: - s.hl.Info(str) - } - } else { - s.hl.Info(str) - } - - return len(data), nil -} - -// Detect, based on conventions, what log level this is -func (s *stdlogAdapter) pickLevel(str string) (hclog.Level, string) { - switch { - case strings.HasPrefix(str, "[DEBUG]"): - return hclog.Debug, strings.TrimSpace(str[7:]) - case strings.HasPrefix(str, "[TRACE]"): - return hclog.Trace, strings.TrimSpace(str[7:]) - case strings.HasPrefix(str, "[INFO]"): - return hclog.Info, strings.TrimSpace(str[6:]) - case strings.HasPrefix(str, "[WARN]"): - return hclog.Warn, strings.TrimSpace(str[7:]) - case strings.HasPrefix(str, "[ERROR]"): - return hclog.Error, strings.TrimSpace(str[7:]) - case strings.HasPrefix(str, "[ERR]"): - return hclog.Error, strings.TrimSpace(str[5:]) - default: - return hclog.Info, str - } -} diff --git a/helper/pluginutil/runner.go b/helper/pluginutil/runner.go index 7d76202797..436e169fe8 100644 --- a/helper/pluginutil/runner.go +++ b/helper/pluginutil/runner.go @@ -9,11 +9,11 @@ import ( "os/exec" "time" + log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/wrapping" "github.com/hashicorp/vault/version" - log "github.com/mgutz/logxi/v1" ) // Looker defines the plugin Lookup function that looks into the plugin catalog @@ -73,12 +73,6 @@ func (r *PluginRunner) runCommon(ctx context.Context, wrapper RunnerUtil, plugin } cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version)) - // Create logger for the plugin client - clogger := &hclogFaker{ - logger: logger, - } - namedLogger := clogger.ResetNamed("plugin") - var clientTLSConfig *tls.Config if !isMetadataMode { // Add the metadata mode ENV and set it to false @@ -106,7 +100,7 @@ func (r *PluginRunner) runCommon(ctx context.Context, wrapper RunnerUtil, plugin // Add the response wrap token to the ENV of the plugin cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken)) } else { - namedLogger = clogger.ResetNamed("plugin.metadata") + logger = logger.With("metadata", "true") cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMetadataModeEnv, "true")) } @@ -121,7 +115,7 @@ func (r *PluginRunner) runCommon(ctx context.Context, wrapper RunnerUtil, plugin Cmd: cmd, SecureConfig: secureConfig, TLSConfig: clientTLSConfig, - Logger: namedLogger, + Logger: logger, AllowedProtocols: []plugin.Protocol{ plugin.ProtocolNetRPC, plugin.ProtocolGRPC, diff --git a/helper/storagepacker/storagepacker.go b/helper/storagepacker/storagepacker.go index 62e0bced22..67c05b9b1b 100644 --- a/helper/storagepacker/storagepacker.go +++ b/helper/storagepacker/storagepacker.go @@ -10,10 +10,10 @@ import ( "github.com/golang/protobuf/proto" "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/compressutil" "github.com/hashicorp/vault/helper/locksutil" "github.com/hashicorp/vault/logical" - log "github.com/mgutz/logxi/v1" ) const ( @@ -347,7 +347,7 @@ func NewStoragePacker(view logical.Storage, logger log.Logger, viewPrefix string packer := &StoragePacker{ view: view, viewPrefix: viewPrefix, - logger: logger, + logger: logger.Named("storagepacker"), storageLocks: locksutil.CreateLocks(), } diff --git a/helper/storagepacker/storagepacker_test.go b/helper/storagepacker/storagepacker_test.go index 992658777f..405b118e66 100644 --- a/helper/storagepacker/storagepacker_test.go +++ b/helper/storagepacker/storagepacker_test.go @@ -5,14 +5,14 @@ import ( "testing" "github.com/golang/protobuf/ptypes" + log "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/logical" - log "github.com/mgutz/logxi/v1" ) func BenchmarkStoragePacker(b *testing.B) { - storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New("storagepackertest"), "") + storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New(&log.LoggerOptions{Name: "storagepackertest"}), "") if err != nil { b.Fatal(err) } @@ -61,7 +61,7 @@ func BenchmarkStoragePacker(b *testing.B) { } func TestStoragePacker(t *testing.T) { - storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New("storagepackertest"), "") + storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New(&log.LoggerOptions{Name: "storagepackertest"}), "") if err != nil { t.Fatal(err) } @@ -107,7 +107,7 @@ func TestStoragePacker(t *testing.T) { } func TestStoragePacker_SerializeDeserializeComplexItem(t *testing.T) { - storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New("storagepackertest"), "") + storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New(&log.LoggerOptions{Name: "storagepackertest"}), "") if err != nil { t.Fatal(err) } diff --git a/http/forwarding_test.go b/http/forwarding_test.go index 630b6af484..a55b4f66bd 100644 --- a/http/forwarding_test.go +++ b/http/forwarding_test.go @@ -290,7 +290,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64) waitCond.L.Unlock() waitCond.Broadcast() - core.Logger().Trace("Starting goroutine", "id", id) + core.Logger().Debug("Starting goroutine", "id", id) startTime := time.Now() for { diff --git a/http/handler.go b/http/handler.go index 00d2994b05..55d1bd458e 100644 --- a/http/handler.go +++ b/http/handler.go @@ -167,7 +167,7 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle if r.Header.Get(NoRequestForwardingHeaderName) != "" { // Forwarding explicitly disabled, fall back to previous behavior - core.Logger().Trace("http/handleRequestForwarding: forwarding disabled by client request") + core.Logger().Debug("handleRequestForwarding: forwarding disabled by client request") handler.ServeHTTP(w, r) return } @@ -202,9 +202,9 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle statusCode, header, retBytes, err := core.ForwardRequest(r) if err != nil { if err == vault.ErrCannotForward { - core.Logger().Trace("http/handleRequestForwarding: cannot forward (possibly disabled on active node), falling back") + core.Logger().Debug("handleRequestForwarding: cannot forward (possibly disabled on active node), falling back") } else { - core.Logger().Error("http/handleRequestForwarding: error forwarding request", "error", err) + core.Logger().Error("handleRequestForwarding: error forwarding request", "error", err) } // Fall back to redirection diff --git a/http/logical_test.go b/http/logical_test.go index fee8fb07ee..4008668c8a 100644 --- a/http/logical_test.go +++ b/http/logical_test.go @@ -13,9 +13,9 @@ import ( "testing" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/physical" "github.com/hashicorp/vault/physical/inmem" @@ -85,7 +85,7 @@ func TestLogical_StandbyRedirect(t *testing.T) { defer ln2.Close() // Create an HA Vault - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) inmha, err := inmem.NewInmemHA(nil, logger) if err != nil { @@ -137,9 +137,9 @@ func TestLogical_StandbyRedirect(t *testing.T) { resp := testHttpPutDisableRedirect(t, root, addr2+"/v1/secret/foo", map[string]interface{}{ "data": "bar", }) - logger.Trace("307 test one starting") + logger.Debug("307 test one starting") testResponseStatus(t, resp, 307) - logger.Trace("307 test one stopping") + logger.Debug("307 test one stopping") //// READ to standby resp = testHttpGet(t, root, addr2+"/v1/auth/token/lookup-self") @@ -181,9 +181,9 @@ func TestLogical_StandbyRedirect(t *testing.T) { //// DELETE to standby resp = testHttpDeleteDisableRedirect(t, root, addr2+"/v1/secret/foo") - logger.Trace("307 test two starting") + logger.Debug("307 test two starting") testResponseStatus(t, resp, 307) - logger.Trace("307 test two stopping") + logger.Debug("307 test two stopping") } func TestLogical_CreateToken(t *testing.T) { diff --git a/http/plugin_test.go b/http/plugin_test.go index b5c978992a..5525eb483f 100644 --- a/http/plugin_test.go +++ b/http/plugin_test.go @@ -8,10 +8,9 @@ import ( "sync" "testing" - hclog "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" bplugin "github.com/hashicorp/vault/builtin/plugin" - "github.com/hashicorp/vault/helper/logbridge" "github.com/hashicorp/vault/helper/pluginutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/plugin" @@ -20,8 +19,8 @@ import ( "github.com/hashicorp/vault/vault" ) -func getPluginClusterAndCore(t testing.TB, logger *logbridge.Logger) (*vault.TestCluster, *vault.TestClusterCore) { - inmha, err := inmem.NewInmemHA(nil, logger.LogxiLogger()) +func getPluginClusterAndCore(t testing.TB, logger log.Logger) (*vault.TestCluster, *vault.TestClusterCore) { + inmha, err := inmem.NewInmemHA(nil, logger) if err != nil { t.Fatal(err) } @@ -35,7 +34,7 @@ func getPluginClusterAndCore(t testing.TB, logger *logbridge.Logger) (*vault.Tes cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ HandlerFunc: Handler, - RawLogger: logger, + Logger: logger.Named("testclusteroptions"), }) cluster.Start() @@ -91,9 +90,9 @@ func TestPlugin_PluginMain(t *testing.T) { } func TestPlugin_MockList(t *testing.T) { - logger := logbridge.NewLogger(hclog.New(&hclog.LoggerOptions{ + logger := log.New(&log.LoggerOptions{ Mutex: &sync.Mutex{}, - })) + }) cluster, core := getPluginClusterAndCore(t, logger) defer cluster.Cleanup() @@ -129,9 +128,9 @@ func TestPlugin_MockList(t *testing.T) { } func TestPlugin_MockRawResponse(t *testing.T) { - logger := logbridge.NewLogger(hclog.New(&hclog.LoggerOptions{ + logger := log.New(&log.LoggerOptions{ Mutex: &sync.Mutex{}, - })) + }) cluster, core := getPluginClusterAndCore(t, logger) defer cluster.Cleanup() @@ -155,9 +154,9 @@ func TestPlugin_MockRawResponse(t *testing.T) { } func TestPlugin_GetParams(t *testing.T) { - logger := logbridge.NewLogger(hclog.New(&hclog.LoggerOptions{ + logger := log.New(&log.LoggerOptions{ Mutex: &sync.Mutex{}, - })) + }) cluster, core := getPluginClusterAndCore(t, logger) defer cluster.Cleanup() diff --git a/logical/framework/backend.go b/logical/framework/backend.go index b9bf3eb87e..caf8b2cccf 100644 --- a/logical/framework/backend.go +++ b/logical/framework/backend.go @@ -11,11 +11,11 @@ import ( "sync" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/helper/errutil" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/parseutil" "github.com/hashicorp/vault/logical" ) @@ -255,7 +255,7 @@ func (b *Backend) Logger() log.Logger { return b.logger } - return logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelOff) + return logging.NewVaultLoggerWithWriter(ioutil.Discard, log.NoLevel) } // System returns the backend's system view. diff --git a/logical/logical.go b/logical/logical.go index 6ac40ab7b9..a3456e9671 100644 --- a/logical/logical.go +++ b/logical/logical.go @@ -3,7 +3,7 @@ package logical import ( "context" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" ) // BackendType is the type of backend that is being implemented diff --git a/logical/plugin/backend.go b/logical/plugin/backend.go index 713f5d3c39..b55a0aaefd 100644 --- a/logical/plugin/backend.go +++ b/logical/plugin/backend.go @@ -7,9 +7,8 @@ import ( "google.golang.org/grpc" - hclog "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" - "github.com/hashicorp/vault/helper/logbridge" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/plugin/pb" ) @@ -18,12 +17,18 @@ import ( type BackendPlugin struct { Factory logical.Factory metadataMode bool - Logger hclog.Logger + Logger log.Logger } // Server gets called when on plugin.Serve() func (b *BackendPlugin) Server(broker *plugin.MuxBroker) (interface{}, error) { - return &backendPluginServer{factory: b.Factory, broker: broker}, nil + return &backendPluginServer{ + factory: b.Factory, + broker: broker, + // We pass the logger down into the backend so go-plugin will forward + // logs for us. + logger: b.Logger, + }, nil } // Client gets called on plugin.NewClient() @@ -37,7 +42,7 @@ func (b BackendPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) err factory: b.Factory, // We pass the logger down into the backend so go-plugin will forward // logs for us. - logger: logbridge.NewLogger(b.Logger).LogxiLogger(), + logger: b.Logger, }) return nil } diff --git a/logical/plugin/backend_client.go b/logical/plugin/backend_client.go index 8f03e956e2..43a442f4ba 100644 --- a/logical/plugin/backend_client.go +++ b/logical/plugin/backend_client.go @@ -5,9 +5,9 @@ import ( "errors" "net/rpc" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/logical" - log "github.com/mgutz/logxi/v1" ) var ( @@ -204,16 +204,6 @@ func (b *backendPluginClient) Setup(ctx context.Context, config *logical.Backend impl: storageImpl, }) - // Shim log.Logger - loggerImpl := config.Logger - if b.metadataMode { - loggerImpl = log.NullLog - } - loggerID := b.broker.NextId() - go b.broker.AcceptAndServe(loggerID, &LoggerServer{ - logger: loggerImpl, - }) - // Shim logical.SystemView sysViewImpl := config.System if b.metadataMode { @@ -226,7 +216,6 @@ func (b *backendPluginClient) Setup(ctx context.Context, config *logical.Backend args := &SetupArgs{ StorageID: storageID, - LoggerID: loggerID, SysViewID: sysViewID, Config: config.Config, BackendUUID: config.BackendUUID, diff --git a/logical/plugin/backend_server.go b/logical/plugin/backend_server.go index db88ea2080..338afb45dd 100644 --- a/logical/plugin/backend_server.go +++ b/logical/plugin/backend_server.go @@ -6,6 +6,8 @@ import ( "net/rpc" "os" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/helper/pluginutil" "github.com/hashicorp/vault/logical" @@ -22,7 +24,7 @@ type backendPluginServer struct { backend logical.Backend factory logical.Factory - loggerClient *rpc.Client + logger hclog.Logger sysViewClient *rpc.Client storageClient *rpc.Client } @@ -77,7 +79,6 @@ func (b *backendPluginServer) Cleanup(_ interface{}, _ *struct{}) error { b.backend.Cleanup(context.Background()) // Close rpc clients - b.loggerClient.Close() b.sysViewClient.Close() b.storageClient.Close() return nil @@ -109,19 +110,6 @@ func (b *backendPluginServer) Setup(args *SetupArgs, reply *SetupReply) error { storage := &StorageClient{client: rawStorageClient} - // Dial for logger - loggerConn, err := b.broker.Dial(args.LoggerID) - if err != nil { - *reply = SetupReply{ - Error: wrapError(err), - } - return nil - } - rawLoggerClient := rpc.NewClient(loggerConn) - b.loggerClient = rawLoggerClient - - logger := &LoggerClient{client: rawLoggerClient} - // Dial for sys view sysViewConn, err := b.broker.Dial(args.SysViewID) if err != nil { @@ -137,7 +125,7 @@ func (b *backendPluginServer) Setup(args *SetupArgs, reply *SetupReply) error { config := &logical.BackendConfig{ StorageView: storage, - Logger: logger, + Logger: b.logger, System: sysView, Config: args.Config, BackendUUID: args.BackendUUID, diff --git a/logical/plugin/backend_test.go b/logical/plugin/backend_test.go index 18aab4eba4..3294506493 100644 --- a/logical/plugin/backend_test.go +++ b/logical/plugin/backend_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" + log "github.com/hashicorp/go-hclog" gplugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/plugin/mock" - log "github.com/mgutz/logxi/v1" ) func TestBackendPlugin_impl(t *testing.T) { @@ -156,7 +156,7 @@ func testBackend(t *testing.T) (logical.Backend, func()) { b := raw.(logical.Backend) err = b.Setup(context.Background(), &logical.BackendConfig{ - Logger: logformat.NewVaultLogger(log.LevelTrace), + Logger: logging.NewVaultLogger(log.Debug), System: &logical.StaticSystemView{ DefaultLeaseTTLVal: 300 * time.Second, MaxLeaseTTLVal: 1800 * time.Second, diff --git a/logical/plugin/grpc_backend_client.go b/logical/plugin/grpc_backend_client.go index a248938783..305ee102e2 100644 --- a/logical/plugin/grpc_backend_client.go +++ b/logical/plugin/grpc_backend_client.go @@ -7,11 +7,11 @@ import ( "google.golang.org/grpc" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/helper/pluginutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/plugin/pb" - log "github.com/mgutz/logxi/v1" ) var ErrPluginShutdown = errors.New("plugin is shut down") diff --git a/logical/plugin/grpc_backend_server.go b/logical/plugin/grpc_backend_server.go index bb264f015f..977738a0a9 100644 --- a/logical/plugin/grpc_backend_server.go +++ b/logical/plugin/grpc_backend_server.go @@ -3,10 +3,10 @@ package plugin import ( "context" + log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/plugin/pb" - log "github.com/mgutz/logxi/v1" "google.golang.org/grpc" ) diff --git a/logical/plugin/grpc_backend_test.go b/logical/plugin/grpc_backend_test.go index 137ef055b0..3448152944 100644 --- a/logical/plugin/grpc_backend_test.go +++ b/logical/plugin/grpc_backend_test.go @@ -6,12 +6,11 @@ import ( "testing" "time" - hclog "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" gplugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/plugin/mock" - log "github.com/mgutz/logxi/v1" ) func TestGRPCBackendPlugin_impl(t *testing.T) { @@ -143,8 +142,8 @@ func testGRPCBackend(t *testing.T) (logical.Backend, func()) { pluginMap := map[string]gplugin.Plugin{ "backend": &BackendPlugin{ Factory: mock.Factory, - Logger: hclog.New(&hclog.LoggerOptions{ - Level: hclog.Trace, + Logger: log.New(&log.LoggerOptions{ + Level: log.Debug, Output: os.Stderr, JSONFormat: true, }), @@ -163,7 +162,7 @@ func testGRPCBackend(t *testing.T) (logical.Backend, func()) { b := raw.(logical.Backend) err = b.Setup(context.Background(), &logical.BackendConfig{ - Logger: logformat.NewVaultLogger(log.LevelTrace), + Logger: logging.NewVaultLogger(log.Debug), System: &logical.StaticSystemView{ DefaultLeaseTTLVal: 300 * time.Second, MaxLeaseTTLVal: 1800 * time.Second, diff --git a/logical/plugin/logger.go b/logical/plugin/logger.go index e556260d9e..a59a8a3da2 100644 --- a/logical/plugin/logger.go +++ b/logical/plugin/logger.go @@ -1,114 +1,9 @@ package plugin -import ( - "net/rpc" - - log "github.com/mgutz/logxi/v1" -) - -type LoggerClient struct { - client *rpc.Client -} - -func (l *LoggerClient) Trace(msg string, args ...interface{}) { - cArgs := &LoggerArgs{ - Msg: msg, - Args: args, - } - l.client.Call("Plugin.Trace", cArgs, &struct{}{}) -} - -func (l *LoggerClient) Debug(msg string, args ...interface{}) { - cArgs := &LoggerArgs{ - Msg: msg, - Args: args, - } - l.client.Call("Plugin.Debug", cArgs, &struct{}{}) -} - -func (l *LoggerClient) Info(msg string, args ...interface{}) { - cArgs := &LoggerArgs{ - Msg: msg, - Args: args, - } - l.client.Call("Plugin.Info", cArgs, &struct{}{}) -} -func (l *LoggerClient) Warn(msg string, args ...interface{}) error { - var reply LoggerReply - cArgs := &LoggerArgs{ - Msg: msg, - Args: args, - } - err := l.client.Call("Plugin.Warn", cArgs, &reply) - if err != nil { - return err - } - if reply.Error != nil { - return reply.Error - } - - return nil -} -func (l *LoggerClient) Error(msg string, args ...interface{}) error { - var reply LoggerReply - cArgs := &LoggerArgs{ - Msg: msg, - Args: args, - } - err := l.client.Call("Plugin.Error", cArgs, &reply) - if err != nil { - return err - } - if reply.Error != nil { - return reply.Error - } - - return nil -} - -func (l *LoggerClient) Fatal(msg string, args ...interface{}) { - // NOOP since it's not actually used within vault - return -} - -func (l *LoggerClient) Log(level int, msg string, args []interface{}) { - cArgs := &LoggerArgs{ - Level: level, - Msg: msg, - Args: args, - } - l.client.Call("Plugin.Log", cArgs, &struct{}{}) -} - -func (l *LoggerClient) SetLevel(level int) { - l.client.Call("Plugin.SetLevel", level, &struct{}{}) -} - -func (l *LoggerClient) IsTrace() bool { - var reply LoggerReply - l.client.Call("Plugin.IsTrace", new(interface{}), &reply) - return reply.IsTrue -} -func (l *LoggerClient) IsDebug() bool { - var reply LoggerReply - l.client.Call("Plugin.IsDebug", new(interface{}), &reply) - return reply.IsTrue -} - -func (l *LoggerClient) IsInfo() bool { - var reply LoggerReply - l.client.Call("Plugin.IsInfo", new(interface{}), &reply) - return reply.IsTrue -} - -func (l *LoggerClient) IsWarn() bool { - var reply LoggerReply - l.client.Call("Plugin.IsWarn", new(interface{}), &reply) - return reply.IsTrue -} +import hclog "github.com/hashicorp/go-hclog" type LoggerServer struct { - logger log.Logger + logger hclog.Logger } func (l *LoggerServer) Trace(args *LoggerArgs, _ *struct{}) error { @@ -127,34 +22,42 @@ func (l *LoggerServer) Info(args *LoggerArgs, _ *struct{}) error { } func (l *LoggerServer) Warn(args *LoggerArgs, reply *LoggerReply) error { - err := l.logger.Warn(args.Msg, args.Args...) - if err != nil { - *reply = LoggerReply{ - Error: wrapError(err), - } - return nil - } + l.logger.Warn(args.Msg, args.Args...) return nil } func (l *LoggerServer) Error(args *LoggerArgs, reply *LoggerReply) error { - err := l.logger.Error(args.Msg, args.Args...) - if err != nil { - *reply = LoggerReply{ - Error: wrapError(err), - } - return nil - } + l.logger.Error(args.Msg, args.Args...) return nil } func (l *LoggerServer) Log(args *LoggerArgs, _ *struct{}) error { - l.logger.Log(args.Level, args.Msg, args.Args) + + switch translateLevel(args.Level) { + + case hclog.Trace: + l.logger.Trace(args.Msg, args.Args...) + + case hclog.Debug: + l.logger.Debug(args.Msg, args.Args...) + + case hclog.Info: + l.logger.Info(args.Msg, args.Args...) + + case hclog.Warn: + l.logger.Warn(args.Msg, args.Args...) + + case hclog.Error: + l.logger.Error(args.Msg, args.Args...) + + case hclog.NoLevel: + } return nil } func (l *LoggerServer) SetLevel(args int, _ *struct{}) error { - l.logger.SetLevel(args) + level := translateLevel(args) + l.logger = hclog.New(&hclog.LoggerOptions{Level: level}) return nil } @@ -202,3 +105,30 @@ type LoggerReply struct { IsTrue bool Error error } + +func translateLevel(logxiLevel int) hclog.Level { + + switch logxiLevel { + + case 1000, 10: + // logxi.LevelAll, logxi.LevelTrace: + return hclog.Trace + + case 7: + // logxi.LevelDebug: + return hclog.Debug + + case 6, 5: + // logxi.LevelInfo, logxi.LevelNotice: + return hclog.Info + + case 4: + // logxi.LevelWarn: + return hclog.Warn + + case 3, 2, 1, -1: + // logxi.LevelError, logxi.LevelFatal, logxi.LevelAlert, logxi.LevelEmergency: + return hclog.Error + } + return hclog.NoLevel +} diff --git a/logical/plugin/logger_test.go b/logical/plugin/logger_test.go index 10b389c69a..9acc292413 100644 --- a/logical/plugin/logger_test.go +++ b/logical/plugin/logger_test.go @@ -4,17 +4,15 @@ import ( "bufio" "bytes" "io/ioutil" + "net/rpc" "strings" "testing" - plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/vault/helper/logformat" - log "github.com/mgutz/logxi/v1" -) + "github.com/hashicorp/go-hclog" -func TestLogger_impl(t *testing.T) { - var _ log.Logger = new(LoggerClient) -} + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/helper/logging" +) func TestLogger_levels(t *testing.T) { client, server := plugin.TestRPCConn(t) @@ -23,14 +21,14 @@ func TestLogger_levels(t *testing.T) { var buf bytes.Buffer writer := bufio.NewWriter(&buf) - l := logformat.NewVaultLoggerWithWriter(writer, log.LevelTrace) + l := logging.NewVaultLoggerWithWriter(writer, hclog.Trace) server.RegisterName("Plugin", &LoggerServer{ logger: l, }) expected := "foobar" - testLogger := &LoggerClient{client: client} + testLogger := &deprecatedLoggerClient{client: client} // Test trace testLogger.Trace(expected) @@ -103,13 +101,13 @@ func TestLogger_isLevels(t *testing.T) { client, server := plugin.TestRPCConn(t) defer client.Close() - l := logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelAll) + l := logging.NewVaultLoggerWithWriter(ioutil.Discard, hclog.Trace) server.RegisterName("Plugin", &LoggerServer{ logger: l, }) - testLogger := &LoggerClient{client: client} + testLogger := &deprecatedLoggerClient{client: client} if !testLogger.IsDebug() || !testLogger.IsInfo() || !testLogger.IsTrace() || !testLogger.IsWarn() { t.Fatal("expected logger to return true for all logger level checks") @@ -123,17 +121,17 @@ func TestLogger_log(t *testing.T) { var buf bytes.Buffer writer := bufio.NewWriter(&buf) - l := logformat.NewVaultLoggerWithWriter(writer, log.LevelTrace) + l := logging.NewVaultLoggerWithWriter(writer, hclog.Trace) server.RegisterName("Plugin", &LoggerServer{ logger: l, }) expected := "foobar" - testLogger := &LoggerClient{client: client} + testLogger := &deprecatedLoggerClient{client: client} - // Test trace - testLogger.Log(log.LevelInfo, expected, nil) + // Test trace 6 = logxi.LevelInfo + testLogger.Log(6, expected, nil) if err := writer.Flush(); err != nil { t.Fatal(err) } @@ -148,16 +146,117 @@ func TestLogger_setLevel(t *testing.T) { client, server := plugin.TestRPCConn(t) defer client.Close() - l := log.NewLogger(ioutil.Discard, "test-logger") + l := hclog.New(&hclog.LoggerOptions{Output: ioutil.Discard}) server.RegisterName("Plugin", &LoggerServer{ logger: l, }) - testLogger := &LoggerClient{client: client} - testLogger.SetLevel(log.LevelWarn) + testLogger := &deprecatedLoggerClient{client: client} + testLogger.SetLevel(4) // 4 == logxi.LevelWarn if !testLogger.IsWarn() { t.Fatal("expected logger to support warn level") } } + +type deprecatedLoggerClient struct { + client *rpc.Client +} + +func (l *deprecatedLoggerClient) Trace(msg string, args ...interface{}) { + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + l.client.Call("Plugin.Trace", cArgs, &struct{}{}) +} + +func (l *deprecatedLoggerClient) Debug(msg string, args ...interface{}) { + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + l.client.Call("Plugin.Debug", cArgs, &struct{}{}) +} + +func (l *deprecatedLoggerClient) Info(msg string, args ...interface{}) { + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + l.client.Call("Plugin.Info", cArgs, &struct{}{}) +} +func (l *deprecatedLoggerClient) Warn(msg string, args ...interface{}) error { + var reply LoggerReply + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + err := l.client.Call("Plugin.Warn", cArgs, &reply) + if err != nil { + return err + } + if reply.Error != nil { + return reply.Error + } + + return nil +} +func (l *deprecatedLoggerClient) Error(msg string, args ...interface{}) error { + var reply LoggerReply + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + err := l.client.Call("Plugin.Error", cArgs, &reply) + if err != nil { + return err + } + if reply.Error != nil { + return reply.Error + } + + return nil +} + +func (l *deprecatedLoggerClient) Fatal(msg string, args ...interface{}) { + // NOOP since it's not actually used within vault + return +} + +func (l *deprecatedLoggerClient) Log(level int, msg string, args []interface{}) { + cArgs := &LoggerArgs{ + Level: level, + Msg: msg, + Args: args, + } + l.client.Call("Plugin.Log", cArgs, &struct{}{}) +} + +func (l *deprecatedLoggerClient) SetLevel(level int) { + l.client.Call("Plugin.SetLevel", level, &struct{}{}) +} + +func (l *deprecatedLoggerClient) IsTrace() bool { + var reply LoggerReply + l.client.Call("Plugin.IsTrace", new(interface{}), &reply) + return reply.IsTrue +} +func (l *deprecatedLoggerClient) IsDebug() bool { + var reply LoggerReply + l.client.Call("Plugin.IsDebug", new(interface{}), &reply) + return reply.IsTrue +} + +func (l *deprecatedLoggerClient) IsInfo() bool { + var reply LoggerReply + l.client.Call("Plugin.IsInfo", new(interface{}), &reply) + return reply.IsTrue +} + +func (l *deprecatedLoggerClient) IsWarn() bool { + var reply LoggerReply + l.client.Call("Plugin.IsWarn", new(interface{}), &reply) + return reply.IsTrue +} diff --git a/logical/plugin/middleware.go b/logical/plugin/middleware.go index dd17681ddc..d9aeed0f7a 100644 --- a/logical/plugin/middleware.go +++ b/logical/plugin/middleware.go @@ -4,16 +4,14 @@ import ( "context" "time" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/logical" - log "github.com/mgutz/logxi/v1" ) // backendPluginClient implements logical.Backend and is the // go-plugin client. type backendTracingMiddleware struct { - logger log.Logger - transport string - typeStr string + logger log.Logger next logical.Backend } @@ -23,19 +21,19 @@ var _ logical.Backend = &backendTracingMiddleware{} func (b *backendTracingMiddleware) HandleRequest(ctx context.Context, req *logical.Request) (resp *logical.Response, err error) { defer func(then time.Time) { - b.logger.Trace("plugin.HandleRequest", "path", req.Path, "status", "finished", "type", b.typeStr, "transport", b.transport, "err", err, "took", time.Since(then)) + b.logger.Trace("handle request", "path", req.Path, "status", "finished", "err", err, "took", time.Since(then)) }(time.Now()) - b.logger.Trace("plugin.HandleRequest", "path", req.Path, "status", "started", "type", b.typeStr, "transport", b.transport) + b.logger.Trace("handle request", "path", req.Path, "status", "started") return b.next.HandleRequest(ctx, req) } func (b *backendTracingMiddleware) SpecialPaths() *logical.Paths { defer func(then time.Time) { - b.logger.Trace("plugin.SpecialPaths", "status", "finished", "type", b.typeStr, "transport", b.transport, "took", time.Since(then)) + b.logger.Trace("special paths", "status", "finished", "took", time.Since(then)) }(time.Now()) - b.logger.Trace("plugin.SpecialPaths", "status", "started", "type", b.typeStr, "transport", b.transport) + b.logger.Trace("special paths", "status", "started") return b.next.SpecialPaths() } @@ -49,45 +47,45 @@ func (b *backendTracingMiddleware) Logger() log.Logger { func (b *backendTracingMiddleware) HandleExistenceCheck(ctx context.Context, req *logical.Request) (found bool, exists bool, err error) { defer func(then time.Time) { - b.logger.Trace("plugin.HandleExistenceCheck", "path", req.Path, "status", "finished", "type", b.typeStr, "transport", b.transport, "err", err, "took", time.Since(then)) + b.logger.Trace("handle existence check", "path", req.Path, "status", "finished", "err", err, "took", time.Since(then)) }(time.Now()) - b.logger.Trace("plugin.HandleExistenceCheck", "path", req.Path, "status", "started", "type", b.typeStr, "transport", b.transport) + b.logger.Trace("handle existence check", "path", req.Path, "status", "started") return b.next.HandleExistenceCheck(ctx, req) } func (b *backendTracingMiddleware) Cleanup(ctx context.Context) { defer func(then time.Time) { - b.logger.Trace("plugin.Cleanup", "status", "finished", "type", b.typeStr, "transport", b.transport, "took", time.Since(then)) + b.logger.Trace("cleanup", "status", "finished", "took", time.Since(then)) }(time.Now()) - b.logger.Trace("plugin.Cleanup", "status", "started", "type", b.typeStr, "transport", b.transport) + b.logger.Trace("cleanup", "status", "started") b.next.Cleanup(ctx) } func (b *backendTracingMiddleware) InvalidateKey(ctx context.Context, key string) { defer func(then time.Time) { - b.logger.Trace("plugin.InvalidateKey", "key", key, "status", "finished", "type", b.typeStr, "transport", b.transport, "took", time.Since(then)) + b.logger.Trace("invalidate key", "key", key, "status", "finished", "took", time.Since(then)) }(time.Now()) - b.logger.Trace("plugin.InvalidateKey", "key", key, "status", "started", "type", b.typeStr, "transport", b.transport) + b.logger.Trace("invalidate key", "key", key, "status", "started") b.next.InvalidateKey(ctx, key) } func (b *backendTracingMiddleware) Setup(ctx context.Context, config *logical.BackendConfig) (err error) { defer func(then time.Time) { - b.logger.Trace("plugin.Setup", "status", "finished", "type", b.typeStr, "transport", b.transport, "err", err, "took", time.Since(then)) + b.logger.Trace("setup", "status", "finished", "err", err, "took", time.Since(then)) }(time.Now()) - b.logger.Trace("plugin.Setup", "status", "started", "type", b.typeStr, "transport", b.transport) + b.logger.Trace("setup", "status", "started") return b.next.Setup(ctx, config) } func (b *backendTracingMiddleware) Type() logical.BackendType { defer func(then time.Time) { - b.logger.Trace("plugin.Type", "status", "finished", "type", b.typeStr, "transport", b.transport, "took", time.Since(then)) + b.logger.Trace("type", "status", "finished", "took", time.Since(then)) }(time.Now()) - b.logger.Trace("plugin.Type", "status", "started", "type", b.typeStr, "transport", b.transport) + b.logger.Trace("type", "status", "started") return b.next.Type() } diff --git a/logical/plugin/plugin.go b/logical/plugin/plugin.go index 7b4f957cb0..65b53a2992 100644 --- a/logical/plugin/plugin.go +++ b/logical/plugin/plugin.go @@ -11,10 +11,10 @@ import ( "sync" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/helper/pluginutil" "github.com/hashicorp/vault/logical" - log "github.com/mgutz/logxi/v1" ) // init registers basic structs with gob which will be used to transport complex @@ -101,12 +101,14 @@ func newPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne }, } + namedLogger := logger.Named(pluginRunner.Name) + var client *plugin.Client var err error if isMetadataMode { - client, err = pluginRunner.RunMetadataMode(ctx, sys, pluginMap, handshakeConfig, []string{}, logger) + client, err = pluginRunner.RunMetadataMode(ctx, sys, pluginMap, handshakeConfig, []string{}, namedLogger) } else { - client, err = pluginRunner.Run(ctx, sys, pluginMap, handshakeConfig, []string{}, logger) + client, err = pluginRunner.Run(ctx, sys, pluginMap, handshakeConfig, []string{}, namedLogger) } if err != nil { return nil, err @@ -140,12 +142,10 @@ func newPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunne } // Wrap the backend in a tracing middleware - if logger.IsTrace() { + if namedLogger.IsTrace() { backend = &backendTracingMiddleware{ - logger: logger, - transport: transport, - typeStr: pluginRunner.Name, - next: backend, + logger: namedLogger.With("transport", transport), + next: backend, } } diff --git a/logical/plugin/serve.go b/logical/plugin/serve.go index 97afe0cd42..d7dc867b41 100644 --- a/logical/plugin/serve.go +++ b/logical/plugin/serve.go @@ -4,7 +4,7 @@ import ( "crypto/tls" "os" - hclog "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/hashicorp/vault/helper/pluginutil" "github.com/hashicorp/vault/logical" @@ -19,7 +19,7 @@ type TLSProviderFunc func() (*tls.Config, error) type ServeOpts struct { BackendFactoryFunc logical.Factory TLSProviderFunc TLSProviderFunc - Logger hclog.Logger + Logger log.Logger } // Serve is a helper function used to serve a backend plugin. This @@ -27,8 +27,8 @@ type ServeOpts struct { func Serve(opts *ServeOpts) error { logger := opts.Logger if logger == nil { - logger = hclog.New(&hclog.LoggerOptions{ - Level: hclog.Trace, + logger = log.New(&log.LoggerOptions{ + Level: log.Trace, Output: os.Stderr, JSONFormat: true, }) diff --git a/logical/testing.go b/logical/testing.go index 6b44123f02..f93f65b433 100644 --- a/logical/testing.go +++ b/logical/testing.go @@ -6,8 +6,8 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/logformat" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" ) // TestRequest is a helper to create a purely in-memory Request struct. @@ -76,10 +76,9 @@ func TestSystemView() *StaticSystemView { func TestBackendConfig() *BackendConfig { bc := &BackendConfig{ - Logger: logformat.NewVaultLogger(log.LevelTrace), + Logger: logging.NewVaultLogger(log.Trace), System: TestSystemView(), } - bc.Logger.SetLevel(log.LevelTrace) return bc } diff --git a/logical/testing/testing.go b/logical/testing/testing.go index 2939069f94..c8ed22c794 100644 --- a/logical/testing/testing.go +++ b/logical/testing/testing.go @@ -9,11 +9,11 @@ import ( "sort" "testing" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/physical/inmem" @@ -135,7 +135,7 @@ func Test(tt TestT, c TestCase) { } // Create an in-memory Vault core - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Trace) phys, err := inmem.NewInmem(nil, logger) if err != nil { @@ -210,8 +210,8 @@ func Test(tt TestT, c TestCase) { // Make requests var revoke []*logical.Request for i, s := range c.Steps { - if log.IsWarn() { - log.Warn("Executing test step", "step_number", i+1) + if logger.IsWarn() { + logger.Warn("Executing test step", "step_number", i+1) } // Create the request @@ -294,8 +294,8 @@ func Test(tt TestT, c TestCase) { // Revoke any secrets we might have. var failedRevokes []*logical.Secret for _, req := range revoke { - if log.IsWarn() { - log.Warn("Revoking secret", "secret", fmt.Sprintf("%#v", req)) + if logger.IsWarn() { + logger.Warn("Revoking secret", "secret", fmt.Sprintf("%#v", req)) } req.ClientToken = client.Token() resp, err := core.HandleRequest(req) @@ -311,7 +311,7 @@ func Test(tt TestT, c TestCase) { // Perform any rollbacks. This should no-op if there aren't any. // We set the "immediate" flag here that any backend can pick up on // to do all rollbacks immediately even if the WAL entries are new. - log.Warn("Requesting RollbackOperation") + logger.Warn("Requesting RollbackOperation") req := logical.RollbackRequest(prefix + "/") req.Data["immediate"] = true req.ClientToken = client.Token() diff --git a/physical/azure/azure.go b/physical/azure/azure.go index c0b67e9a38..3728edf778 100644 --- a/physical/azure/azure.go +++ b/physical/azure/azure.go @@ -12,7 +12,7 @@ import ( "time" storage "github.com/Azure/azure-sdk-for-go/storage" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/armon/go-metrics" "github.com/hashicorp/errwrap" @@ -86,7 +86,7 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { - logger.Debug("azure: max_parallel set", "max_parallel", maxParInt) + logger.Debug("max_parallel set", "max_parallel", maxParInt) } } diff --git a/physical/azure/azure_test.go b/physical/azure/azure_test.go index eb0c510892..dfcd8b9413 100644 --- a/physical/azure/azure_test.go +++ b/physical/azure/azure_test.go @@ -7,9 +7,9 @@ import ( "time" cleanhttp "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" storage "github.com/Azure/azure-sdk-for-go/storage" ) @@ -29,7 +29,7 @@ func TestAzureBackend(t *testing.T) { cleanupClient, _ := storage.NewBasicClient(accountName, accountKey) cleanupClient.HTTPClient = cleanhttp.DefaultPooledClient() - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) backend, err := NewAzureBackend(map[string]string{ "container": name, diff --git a/physical/cache.go b/physical/cache.go index d2e070ed8e..11e40fb9aa 100644 --- a/physical/cache.go +++ b/physical/cache.go @@ -4,9 +4,9 @@ import ( "context" "sync/atomic" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/golang-lru" "github.com/hashicorp/vault/helper/locksutil" - log "github.com/mgutz/logxi/v1" ) const ( @@ -41,8 +41,8 @@ var _ Transactional = (*TransactionalCache)(nil) // NewCache returns a physical cache of the given size. // If no size is provided, the default size is used. func NewCache(b Backend, size int, logger log.Logger) *Cache { - if logger.IsTrace() { - logger.Trace("physical/cache: creating LRU cache", "size", size) + if logger.IsDebug() { + logger.Debug("creating LRU cache", "size", size) } if size <= 0 { size = DefaultCacheSize diff --git a/physical/cassandra/cassandra.go b/physical/cassandra/cassandra.go index 51b3d47475..47571a03ae 100644 --- a/physical/cassandra/cassandra.go +++ b/physical/cassandra/cassandra.go @@ -10,7 +10,7 @@ import ( "strings" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/armon/go-metrics" "github.com/gocql/gocql" @@ -145,7 +145,8 @@ func NewCassandraBackend(conf map[string]string, logger log.Logger) (physical.Ba impl := &CassandraBackend{ sess: sess, table: table, - logger: logger} + logger: logger, + } return impl, nil } diff --git a/physical/cassandra/cassandra_test.go b/physical/cassandra/cassandra_test.go index 1c9b1f1f27..93f77c42ff 100644 --- a/physical/cassandra/cassandra_test.go +++ b/physical/cassandra/cassandra_test.go @@ -9,9 +9,9 @@ import ( "time" "github.com/gocql/gocql" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" dockertest "gopkg.in/ory-am/dockertest.v3" ) @@ -24,7 +24,7 @@ func TestCassandraBackend(t *testing.T) { defer cleanup() // Run vault tests - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewCassandraBackend(map[string]string{ "hosts": hosts, "protocol_version": "3", diff --git a/physical/cockroachdb/cockroachdb.go b/physical/cockroachdb/cockroachdb.go index d99589d158..a03f24e4c8 100644 --- a/physical/cockroachdb/cockroachdb.go +++ b/physical/cockroachdb/cockroachdb.go @@ -12,9 +12,9 @@ import ( "github.com/armon/go-metrics" "github.com/cockroachdb/cockroach-go/crdb" "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/strutil" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" // CockroachDB uses the Postgres SQL driver _ "github.com/lib/pq" @@ -58,7 +58,7 @@ func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical. return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { - logger.Debug("cockroachdb: max_parallel set", "max_parallel", maxParInt) + logger.Debug("max_parallel set", "max_parallel", maxParInt) } } diff --git a/physical/cockroachdb/cockroachdb_test.go b/physical/cockroachdb/cockroachdb_test.go index 35bcecf746..043d60df85 100644 --- a/physical/cockroachdb/cockroachdb_test.go +++ b/physical/cockroachdb/cockroachdb_test.go @@ -8,9 +8,9 @@ import ( dockertest "gopkg.in/ory-am/dockertest.v3" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" _ "github.com/lib/pq" ) @@ -72,7 +72,7 @@ func TestCockroachDBBackend(t *testing.T) { defer cleanup() // Run vault tests - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewCockroachDBBackend(map[string]string{ "connection_url": connURL, diff --git a/physical/consul/consul.go b/physical/consul/consul.go index f479782baf..405d7197c5 100644 --- a/physical/consul/consul.go +++ b/physical/consul/consul.go @@ -17,7 +17,7 @@ import ( "golang.org/x/net/http2" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "crypto/tls" "crypto/x509" @@ -110,16 +110,16 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe path = "vault/" } if logger.IsDebug() { - logger.Debug("physical/consul: config path set", "path", path) + logger.Debug("config path set", "path", path) } // Ensure path is suffixed but not prefixed if !strings.HasSuffix(path, "/") { - logger.Warn("physical/consul: appending trailing forward slash to path") + logger.Warn("appending trailing forward slash to path") path += "/" } if strings.HasPrefix(path, "/") { - logger.Warn("physical/consul: trimming path of its forward slash") + logger.Warn("trimming path of its forward slash") path = strings.TrimPrefix(path, "/") } @@ -134,7 +134,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe disableRegistration = b } if logger.IsDebug() { - logger.Debug("physical/consul: config disable_registration set", "disable_registration", disableRegistration) + logger.Debug("config disable_registration set", "disable_registration", disableRegistration) } // Get the service name to advertise in Consul @@ -146,13 +146,13 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe return nil, errors.New("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes") } if logger.IsDebug() { - logger.Debug("physical/consul: config service set", "service", service) + logger.Debug("config service set", "service", service) } // Get the additional tags to attach to the registered service name tags := conf["service_tags"] if logger.IsDebug() { - logger.Debug("physical/consul: config service_tags set", "service_tags", tags) + logger.Debug("config service_tags set", "service_tags", tags) } // Get the service-specific address to override the use of the HA redirect address @@ -162,7 +162,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe serviceAddr = &serviceAddrStr } if logger.IsDebug() { - logger.Debug("physical/consul: config service_address set", "service_address", serviceAddr) + logger.Debug("config service_address set", "service_address", serviceAddr) } checkTimeout := defaultCheckTimeout @@ -180,7 +180,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe checkTimeout = d if logger.IsDebug() { - logger.Debug("physical/consul: config check_timeout set", "check_timeout", d) + logger.Debug("config check_timeout set", "check_timeout", d) } } @@ -192,18 +192,18 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe if addr, ok := conf["address"]; ok { consulConf.Address = addr if logger.IsDebug() { - logger.Debug("physical/consul: config address set", "address", addr) + logger.Debug("config address set", "address", addr) } } if scheme, ok := conf["scheme"]; ok { consulConf.Scheme = scheme if logger.IsDebug() { - logger.Debug("physical/consul: config scheme set", "scheme", scheme) + logger.Debug("config scheme set", "scheme", scheme) } } if token, ok := conf["token"]; ok { consulConf.Token = token - logger.Debug("physical/consul: config token set") + logger.Debug("config token set") } if consulConf.Scheme == "https" { @@ -216,7 +216,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe if err := http2.ConfigureTransport(consulConf.Transport); err != nil { return nil, err } - logger.Debug("physical/consul: configured TLS") + logger.Debug("configured TLS") } consulConf.HttpClient = &http.Client{Transport: consulConf.Transport} @@ -233,7 +233,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { - logger.Debug("physical/consul: max_parallel set", "max_parallel", maxParInt) + logger.Debug("max_parallel set", "max_parallel", maxParInt) } } @@ -544,7 +544,7 @@ func (c *ConsulBackend) NotifyActiveStateChange() error { default: // NOTE: If this occurs Vault's active status could be out of // sync with Consul until reconcileTimer expires. - c.logger.Warn("physical/consul: Concurrent state change notify dropped") + c.logger.Warn("concurrent state change notify dropped") } return nil @@ -556,7 +556,7 @@ func (c *ConsulBackend) NotifySealedStateChange() error { default: // NOTE: If this occurs Vault's sealed status could be out of // sync with Consul until checkTimer expires. - c.logger.Warn("physical/consul: Concurrent sealed state change notify dropped") + c.logger.Warn("concurrent sealed state change notify dropped") } return nil @@ -629,7 +629,7 @@ func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh ph serviceID, err := c.reconcileConsul(registeredServiceID, activeFunc, sealedFunc) if err != nil { if c.logger.IsWarn() { - c.logger.Warn("physical/consul: reconcile unable to talk with Consul backend", "error", err) + c.logger.Warn("reconcile unable to talk with Consul backend", "error", err) } time.Sleep(consulRetryInterval) continue @@ -655,7 +655,7 @@ func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh ph sealed := sealedFunc() if err := c.runCheck(sealed); err != nil { if c.logger.IsWarn() { - c.logger.Warn("physical/consul: check unable to talk with Consul backend", "error", err) + c.logger.Warn("check unable to talk with Consul backend", "error", err) } time.Sleep(consulRetryInterval) continue @@ -665,7 +665,7 @@ func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh ph }() } case <-shutdownCh: - c.logger.Info("physical/consul: Shutting down consul backend") + c.logger.Info("shutting down consul backend") shutdown = true } } @@ -674,7 +674,7 @@ func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh ph defer c.serviceLock.RUnlock() if err := c.client.Agent().ServiceDeregister(registeredServiceID); err != nil { if c.logger.IsWarn() { - c.logger.Warn("physical/consul: service deregistration failed", "error", err) + c.logger.Warn("service deregistration failed", "error", err) } } } diff --git a/physical/consul/consul_test.go b/physical/consul/consul_test.go index 7b1d1f4dc8..fd22b44e17 100644 --- a/physical/consul/consul_test.go +++ b/physical/consul/consul_test.go @@ -9,10 +9,10 @@ import ( "testing" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/consul/api" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/strutil" "github.com/hashicorp/vault/physical" dockertest "gopkg.in/ory-am/dockertest.v2" @@ -36,7 +36,7 @@ func testConsulBackend(t *testing.T) *ConsulBackend { } func testConsulBackendConfig(t *testing.T, conf *consulConf) *ConsulBackend { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) be, err := NewConsulBackend(*conf, logger) if err != nil { @@ -93,7 +93,7 @@ func TestConsul_ServiceTags(t *testing.T) { "max_parallel": "4", "disable_registration": "false", } - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) be, err := NewConsulBackend(consulConfig, logger) if err != nil { @@ -138,7 +138,7 @@ func TestConsul_ServiceAddress(t *testing.T) { } for _, test := range tests { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) be, err := NewConsulBackend(test.consulConfig, logger) if err != nil { @@ -226,7 +226,7 @@ func TestConsul_newConsulBackend(t *testing.T) { } for _, test := range tests { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) be, err := NewConsulBackend(test.consulConfig, logger) if test.fail { @@ -425,7 +425,7 @@ func TestConsul_serviceID(t *testing.T) { }, } - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) for _, test := range tests { be, err := NewConsulBackend(consulConf{ @@ -482,7 +482,7 @@ func TestConsulBackend(t *testing.T) { client.KV().DeleteTree(randPath, nil) }() - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewConsulBackend(map[string]string{ "address": conf.Address, @@ -523,7 +523,7 @@ func TestConsulHABackend(t *testing.T) { client.KV().DeleteTree(randPath, nil) }() - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewConsulBackend(map[string]string{ "address": conf.Address, diff --git a/physical/couchdb/couchdb.go b/physical/couchdb/couchdb.go index 888ad3ed13..2289735009 100644 --- a/physical/couchdb/couchdb.go +++ b/physical/couchdb/couchdb.go @@ -16,8 +16,8 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) // CouchDBBackend allows the management of couchdb users @@ -177,7 +177,7 @@ func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBac return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { - logger.Debug("couchdb: max_parallel set", "max_parallel", maxParInt) + logger.Debug("max_parallel set", "max_parallel", maxParInt) } } diff --git a/physical/couchdb/couchdb_test.go b/physical/couchdb/couchdb_test.go index de4d05d501..ad4a8bf6ee 100644 --- a/physical/couchdb/couchdb_test.go +++ b/physical/couchdb/couchdb_test.go @@ -9,9 +9,9 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" dockertest "gopkg.in/ory-am/dockertest.v3" ) @@ -19,7 +19,7 @@ func TestCouchDBBackend(t *testing.T) { cleanup, endpoint, username, password := prepareCouchdbDBTestContainer(t) defer cleanup() - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewCouchDBBackend(map[string]string{ "endpoint": endpoint, @@ -38,7 +38,7 @@ func TestTransactionalCouchDBBackend(t *testing.T) { cleanup, endpoint, username, password := prepareCouchdbDBTestContainer(t) defer cleanup() - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewTransactionalCouchDBBackend(map[string]string{ "endpoint": endpoint, diff --git a/physical/dynamodb/dynamodb.go b/physical/dynamodb/dynamodb.go index eaa36afb08..c229f9a5b0 100644 --- a/physical/dynamodb/dynamodb.go +++ b/physical/dynamodb/dynamodb.go @@ -13,7 +13,7 @@ import ( "sync" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/armon/go-metrics" "github.com/aws/aws-sdk-go/aws" diff --git a/physical/dynamodb/dynamodb_test.go b/physical/dynamodb/dynamodb_test.go index 426f23fcae..1a72e90536 100644 --- a/physical/dynamodb/dynamodb_test.go +++ b/physical/dynamodb/dynamodb_test.go @@ -8,9 +8,9 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" dockertest "gopkg.in/ory-am/dockertest.v3" "github.com/aws/aws-sdk-go/aws" @@ -48,7 +48,7 @@ func TestDynamoDBBackend(t *testing.T) { }) }() - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewDynamoDBBackend(map[string]string{ "access_key": creds.AccessKeyID, @@ -95,7 +95,7 @@ func TestDynamoDBHABackend(t *testing.T) { }) }() - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewDynamoDBBackend(map[string]string{ "access_key": creds.AccessKeyID, "secret_key": creds.SecretAccessKey, diff --git a/physical/etcd/etcd.go b/physical/etcd/etcd.go index 04b28be6ef..48e47d31d1 100644 --- a/physical/etcd/etcd.go +++ b/physical/etcd/etcd.go @@ -10,8 +10,8 @@ import ( "github.com/coreos/etcd/client" "github.com/coreos/go-semver/semver" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) var ( diff --git a/physical/etcd/etcd2.go b/physical/etcd/etcd2.go index de3acd0f09..b67dfd06c6 100644 --- a/physical/etcd/etcd2.go +++ b/physical/etcd/etcd2.go @@ -14,8 +14,8 @@ import ( metrics "github.com/armon/go-metrics" "github.com/coreos/etcd/client" "github.com/coreos/etcd/pkg/transport" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) const ( diff --git a/physical/etcd/etcd3.go b/physical/etcd/etcd3.go index ad5edf90c0..0cd40217cd 100644 --- a/physical/etcd/etcd3.go +++ b/physical/etcd/etcd3.go @@ -14,9 +14,9 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/clientv3/concurrency" "github.com/coreos/etcd/pkg/transport" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/strutil" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" "golang.org/x/net/context" ) diff --git a/physical/etcd/etcd3_test.go b/physical/etcd/etcd3_test.go index fbd842da1e..81369edd45 100644 --- a/physical/etcd/etcd3_test.go +++ b/physical/etcd/etcd3_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) func TestEtcd3Backend(t *testing.T) { @@ -17,7 +17,7 @@ func TestEtcd3Backend(t *testing.T) { t.Skipf("Skipped. No etcd3 server found") } - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewEtcdBackend(map[string]string{ "path": fmt.Sprintf("/vault-%d", time.Now().Unix()), diff --git a/physical/etcd/etcd_test.go b/physical/etcd/etcd_test.go index d5c30bb6fc..c8b28b97dd 100644 --- a/physical/etcd/etcd_test.go +++ b/physical/etcd/etcd_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" "github.com/coreos/etcd/client" "golang.org/x/net/context" @@ -51,7 +51,7 @@ func TestEtcdBackend(t *testing.T) { // Generate new etcd backend. The etcd address is read from ETCD_ADDR. No // need to provide it explicitly. - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewEtcdBackend(map[string]string{ "path": randPath, diff --git a/physical/file/file.go b/physical/file/file.go index e30a4927b6..6028bad445 100644 --- a/physical/file/file.go +++ b/physical/file/file.go @@ -10,7 +10,7 @@ import ( "strings" "sync" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/consts" "github.com/hashicorp/vault/helper/jsonutil" diff --git a/physical/file/file_test.go b/physical/file/file_test.go index 63f9d6f517..6ac0450b25 100644 --- a/physical/file/file_test.go +++ b/physical/file/file_test.go @@ -9,9 +9,9 @@ import ( "reflect" "testing" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) func TestFileBackend_Base64URLEncoding(t *testing.T) { @@ -21,7 +21,7 @@ func TestFileBackend_Base64URLEncoding(t *testing.T) { } defer os.RemoveAll(backendPath) - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewFileBackend(map[string]string{ "path": backendPath, @@ -140,7 +140,7 @@ func TestFileBackend_ValidatePath(t *testing.T) { } defer os.RemoveAll(dir) - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewFileBackend(map[string]string{ "path": dir, @@ -164,7 +164,7 @@ func TestFileBackend(t *testing.T) { } defer os.RemoveAll(dir) - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewFileBackend(map[string]string{ "path": dir, diff --git a/physical/gcs/gcs.go b/physical/gcs/gcs.go index d7a6286066..1e641effe6 100644 --- a/physical/gcs/gcs.go +++ b/physical/gcs/gcs.go @@ -11,9 +11,9 @@ import ( "time" "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/useragent" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" "cloud.google.com/go/storage" "github.com/armon/go-metrics" @@ -108,6 +108,7 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error if err != nil { return nil, errwrap.Wrapf("failed to parse chunk_size: {{err}}", err) } + // Values are specified as kb, but the API expects them as bytes. chunkSize = chunkSize * 1024 diff --git a/physical/gcs/gcs_ha_test.go b/physical/gcs/gcs_ha_test.go index 60126821e6..4f8940cc95 100644 --- a/physical/gcs/gcs_ha_test.go +++ b/physical/gcs/gcs_ha_test.go @@ -8,9 +8,9 @@ import ( "time" "cloud.google.com/go/storage" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" "golang.org/x/net/context" ) @@ -40,7 +40,7 @@ func TestHABackend(t *testing.T) { backend, err := NewBackend(map[string]string{ "bucket": bucket, "ha_enabled": "true", - }, logformat.NewVaultLogger(log.LevelTrace)) + }, logging.NewVaultLogger(log.Trace)) if err != nil { t.Fatal(err) } diff --git a/physical/gcs/gcs_test.go b/physical/gcs/gcs_test.go index 930178a196..2a2027308d 100644 --- a/physical/gcs/gcs_test.go +++ b/physical/gcs/gcs_test.go @@ -8,9 +8,9 @@ import ( "time" "cloud.google.com/go/storage" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" "golang.org/x/net/context" "google.golang.org/api/googleapi" ) @@ -52,7 +52,7 @@ func TestBackend(t *testing.T) { backend, err := NewBackend(map[string]string{ "bucket": bucket, "ha_enabled": "false", - }, logformat.NewVaultLogger(log.LevelTrace)) + }, logging.NewVaultLogger(log.Trace)) if err != nil { t.Fatal(err) } diff --git a/physical/inmem/cache_test.go b/physical/inmem/cache_test.go index 4394b5d524..90561f3a37 100644 --- a/physical/inmem/cache_test.go +++ b/physical/inmem/cache_test.go @@ -4,13 +4,13 @@ import ( "context" "testing" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) func TestCache(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) inm, err := NewInmem(nil, logger) if err != nil { @@ -22,7 +22,7 @@ func TestCache(t *testing.T) { } func TestCache_Purge(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) inm, err := NewInmem(nil, logger) if err != nil { @@ -69,7 +69,7 @@ func TestCache_Purge(t *testing.T) { } func TestCache_Disable(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) inm, err := NewInmem(nil, logger) if err != nil { diff --git a/physical/inmem/inmem.go b/physical/inmem/inmem.go index 54ce7bec4a..139671ce6a 100644 --- a/physical/inmem/inmem.go +++ b/physical/inmem/inmem.go @@ -7,8 +7,8 @@ import ( "sync" "sync/atomic" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" "github.com/armon/go-radix" ) diff --git a/physical/inmem/inmem_ha.go b/physical/inmem/inmem_ha.go index 5dcacb7cd2..6755100704 100644 --- a/physical/inmem/inmem_ha.go +++ b/physical/inmem/inmem_ha.go @@ -4,8 +4,8 @@ import ( "fmt" "sync" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) type InmemHABackend struct { diff --git a/physical/inmem/inmem_ha_test.go b/physical/inmem/inmem_ha_test.go index 8288595945..36c9dc1a97 100644 --- a/physical/inmem/inmem_ha_test.go +++ b/physical/inmem/inmem_ha_test.go @@ -3,13 +3,13 @@ package inmem import ( "testing" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) func TestInmemHA(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) inm, err := NewInmemHA(nil, logger) if err != nil { diff --git a/physical/inmem/inmem_test.go b/physical/inmem/inmem_test.go index 998061ba92..61fcf04857 100644 --- a/physical/inmem/inmem_test.go +++ b/physical/inmem/inmem_test.go @@ -3,13 +3,13 @@ package inmem import ( "testing" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) func TestInmem(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) inm, err := NewInmem(nil, logger) if err != nil { diff --git a/physical/inmem/physical_view_test.go b/physical/inmem/physical_view_test.go index 1c90f3a111..1a8f5e43b8 100644 --- a/physical/inmem/physical_view_test.go +++ b/physical/inmem/physical_view_test.go @@ -4,9 +4,9 @@ import ( "context" "testing" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) func TestPhysicalView_impl(t *testing.T) { @@ -14,7 +14,7 @@ func TestPhysicalView_impl(t *testing.T) { } func newInmemTestBackend() (physical.Backend, error) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) return NewInmem(nil, logger) } diff --git a/physical/inmem/transactions_test.go b/physical/inmem/transactions_test.go index bfa21b9def..fe123100c7 100644 --- a/physical/inmem/transactions_test.go +++ b/physical/inmem/transactions_test.go @@ -8,9 +8,9 @@ import ( "testing" radix "github.com/armon/go-radix" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) type faultyPseudo struct { @@ -70,7 +70,7 @@ func newFaultyPseudo(logger log.Logger, faultyPaths []string) *faultyPseudo { underlying: InmemBackend{ root: radix.New(), permitPool: physical.NewPermitPool(1), - logger: logger, + logger: logger.Named("storage.inmembackend"), }, faultyPaths: make(map[string]struct{}, len(faultyPaths)), } @@ -81,21 +81,21 @@ func newFaultyPseudo(logger log.Logger, faultyPaths []string) *faultyPseudo { } func TestPseudo_Basic(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) p := newFaultyPseudo(logger, nil) physical.ExerciseBackend(t, p) physical.ExerciseBackend_ListPrefix(t, p) } func TestPseudo_SuccessfulTransaction(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) p := newFaultyPseudo(logger, nil) physical.ExerciseTransactionalBackend(t, p) } func TestPseudo_FailedTransaction(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) p := newFaultyPseudo(logger, []string{"zip"}) txns := physical.SetupTestingTransactions(t, p) diff --git a/physical/latency.go b/physical/latency.go index 61f9729520..7aa9fab985 100644 --- a/physical/latency.go +++ b/physical/latency.go @@ -5,7 +5,7 @@ import ( "math/rand" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" ) const ( @@ -37,7 +37,7 @@ func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log if jitter < 0 || jitter > 100 { jitter = DefaultJitterPercent } - logger.Info("physical/latency: creating latency injector") + logger.Info("creating latency injector") return &LatencyInjector{ backend: b, diff --git a/physical/manta/manta.go b/physical/manta/manta.go index c868ed6358..bb17e7499c 100644 --- a/physical/manta/manta.go +++ b/physical/manta/manta.go @@ -14,12 +14,12 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/physical" "github.com/joyent/triton-go" "github.com/joyent/triton-go/authentication" "github.com/joyent/triton-go/errors" "github.com/joyent/triton-go/storage" - log "github.com/mgutz/logxi/v1" ) const mantaDefaultRootStore = "/stor" @@ -74,7 +74,7 @@ func NewMantaBackend(conf map[string]string, logger log.Logger) (physical.Backen return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { - logger.Debug("manta: max_parallel set", "max_parallel", maxParInt) + logger.Debug("max_parallel set", "max_parallel", maxParInt) } } diff --git a/physical/manta/manta_test.go b/physical/manta/manta_test.go index 2bf0759cf9..ba81bef2c9 100644 --- a/physical/manta/manta_test.go +++ b/physical/manta/manta_test.go @@ -9,13 +9,13 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" "github.com/joyent/triton-go" "github.com/joyent/triton-go/authentication" tt "github.com/joyent/triton-go/errors" "github.com/joyent/triton-go/storage" - log "github.com/mgutz/logxi/v1" ) func TestMantaBackend(t *testing.T) { @@ -49,11 +49,11 @@ func TestMantaBackend(t *testing.T) { t.Fatalf("failed initialising Storage client: %s", err.Error()) } - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) mb := &MantaBackend{ client: client, directory: testHarnessBucket, - logger: logger, + logger: logger.Named("storage.mantabackend"), permitPool: physical.NewPermitPool(128), } diff --git a/physical/mssql/mssql.go b/physical/mssql/mssql.go index a9af982456..908c8f90be 100644 --- a/physical/mssql/mssql.go +++ b/physical/mssql/mssql.go @@ -12,9 +12,9 @@ import ( "github.com/armon/go-metrics" _ "github.com/denisenkom/go-mssqldb" "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/strutil" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) // Verify MSSQLBackend satisfies the correct interfaces @@ -53,7 +53,7 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { - logger.Debug("mysql: max_parallel set", "max_parallel", maxParInt) + logger.Debug("max_parallel set", "max_parallel", maxParInt) } } else { maxParInt = physical.DefaultParallelOperations diff --git a/physical/mssql/mssql_test.go b/physical/mssql/mssql_test.go index 7e1446e94d..9c55228018 100644 --- a/physical/mssql/mssql_test.go +++ b/physical/mssql/mssql_test.go @@ -4,9 +4,9 @@ import ( "os" "testing" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" _ "github.com/denisenkom/go-mssqldb" ) @@ -31,7 +31,7 @@ func TestMSSQLBackend(t *testing.T) { password := os.Getenv("MSSQL_PASSWORD") // Run vault tests - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewMSSQLBackend(map[string]string{ "server": server, diff --git a/physical/mysql/mysql.go b/physical/mysql/mysql.go index 1bdfcb2b38..ac71f09d60 100644 --- a/physical/mysql/mysql.go +++ b/physical/mysql/mysql.go @@ -13,7 +13,7 @@ import ( "strings" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/armon/go-metrics" mysql "github.com/go-sql-driver/mysql" @@ -79,7 +79,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen return nil, errwrap.Wrapf("failed parsing max_idle_connections parameter: {{err}}", err) } if logger.IsDebug() { - logger.Debug("mysql: max_idle_connections set", "max_idle_connections", maxIdleConnInt) + logger.Debug("max_idle_connections set", "max_idle_connections", maxIdleConnInt) } } @@ -91,7 +91,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen return nil, errwrap.Wrapf("failed parsing max_connection_lifetime parameter: {{err}}", err) } if logger.IsDebug() { - logger.Debug("mysql: max_connection_lifetime set", "max_connection_lifetime", maxConnLifeInt) + logger.Debug("max_connection_lifetime set", "max_connection_lifetime", maxConnLifeInt) } } @@ -103,7 +103,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { - logger.Debug("mysql: max_parallel set", "max_parallel", maxParInt) + logger.Debug("max_parallel set", "max_parallel", maxParInt) } } else { maxParInt = physical.DefaultParallelOperations diff --git a/physical/mysql/mysql_test.go b/physical/mysql/mysql_test.go index ecf8431416..a8f5d9e511 100644 --- a/physical/mysql/mysql_test.go +++ b/physical/mysql/mysql_test.go @@ -4,9 +4,9 @@ import ( "os" "testing" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" _ "github.com/go-sql-driver/mysql" ) @@ -31,7 +31,7 @@ func TestMySQLBackend(t *testing.T) { password := os.Getenv("MYSQL_PASSWORD") // Run vault tests - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewMySQLBackend(map[string]string{ "address": address, diff --git a/physical/physical.go b/physical/physical.go index f6677a5616..c7a37fc3ee 100644 --- a/physical/physical.go +++ b/physical/physical.go @@ -5,7 +5,7 @@ import ( "strings" "sync" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" ) const DefaultParallelOperations = 128 diff --git a/physical/postgresql/postgresql.go b/physical/postgresql/postgresql.go index cc75e8e305..04764f4f8b 100644 --- a/physical/postgresql/postgresql.go +++ b/physical/postgresql/postgresql.go @@ -10,7 +10,8 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" + //log "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" "github.com/armon/go-metrics" "github.com/lib/pq" @@ -56,7 +57,7 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { - logger.Debug("postgres: max_parallel set", "max_parallel", maxParInt) + logger.Debug("max_parallel set", "max_parallel", maxParInt) } } else { maxParInt = physical.DefaultParallelOperations diff --git a/physical/postgresql/postgresql_test.go b/physical/postgresql/postgresql_test.go index 940d0e253a..1341533d19 100644 --- a/physical/postgresql/postgresql_test.go +++ b/physical/postgresql/postgresql_test.go @@ -4,9 +4,9 @@ import ( "os" "testing" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" _ "github.com/lib/pq" ) @@ -23,7 +23,7 @@ func TestPostgreSQLBackend(t *testing.T) { } // Run vault tests - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewPostgreSQLBackend(map[string]string{ "connection_url": connURL, diff --git a/physical/s3/s3.go b/physical/s3/s3.go index fb8795dbbb..da14c1a4e8 100644 --- a/physical/s3/s3.go +++ b/physical/s3/s3.go @@ -12,7 +12,7 @@ import ( "strings" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/armon/go-metrics" "github.com/aws/aws-sdk-go/aws" @@ -131,7 +131,7 @@ func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend, return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { - logger.Debug("s3: max_parallel set", "max_parallel", maxParInt) + logger.Debug("max_parallel set", "max_parallel", maxParInt) } } diff --git a/physical/s3/s3_test.go b/physical/s3/s3_test.go index dbe4c93339..b1c195564f 100644 --- a/physical/s3/s3_test.go +++ b/physical/s3/s3_test.go @@ -7,10 +7,10 @@ import ( "testing" "time" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/awsutil" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" @@ -79,7 +79,7 @@ func TestS3Backend(t *testing.T) { } }() - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) // This uses the same logic to find the AWS credentials as we did at the beginning of the test b, err := NewS3Backend(map[string]string{ diff --git a/physical/spanner/spanner.go b/physical/spanner/spanner.go index 48f654494d..59a43d5092 100644 --- a/physical/spanner/spanner.go +++ b/physical/spanner/spanner.go @@ -10,10 +10,10 @@ import ( metrics "github.com/armon/go-metrics" "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/strutil" "github.com/hashicorp/vault/helper/useragent" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/grpc/codes" @@ -94,7 +94,7 @@ type Backend struct { // configuration. This uses the official Golang Cloud SDK and therefore supports // specifying credentials via envvars, credential files, etc. func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error) { - logger.Debug("physical/spanner: configuring backend") + logger.Debug("configuring backend") // Database name database := os.Getenv(envDatabase) @@ -143,14 +143,14 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error return nil, errwrap.Wrapf("failed to parse max_parallel: {{err}}", err) } - logger.Debug("physical/spanner: configuration", + logger.Debug("configuration", "database", database, "table", table, "haEnabled", haEnabled, "haTable", haTable, "maxParallel", maxParallel, ) - logger.Debug("physical/spanner: creating client") + logger.Debug("creating client") ctx := context.Background() client, err := spanner.NewClient(ctx, database, diff --git a/physical/spanner/spanner_ha_test.go b/physical/spanner/spanner_ha_test.go index 8c17c68016..2e36b0143d 100644 --- a/physical/spanner/spanner_ha_test.go +++ b/physical/spanner/spanner_ha_test.go @@ -5,9 +5,9 @@ import ( "testing" "cloud.google.com/go/spanner" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" "golang.org/x/net/context" ) @@ -43,7 +43,7 @@ func TestHABackend(t *testing.T) { "table": table, "ha_table": haTable, "ha_enabled": "true", - }, logformat.NewVaultLogger(log.LevelTrace)) + }, logging.NewVaultLogger(log.Debug)) if err != nil { t.Fatal(err) } diff --git a/physical/spanner/spanner_test.go b/physical/spanner/spanner_test.go index 59e784942b..18f7ccf13f 100644 --- a/physical/spanner/spanner_test.go +++ b/physical/spanner/spanner_test.go @@ -5,9 +5,9 @@ import ( "testing" "cloud.google.com/go/spanner" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" "golang.org/x/net/context" ) @@ -46,7 +46,7 @@ func TestBackend(t *testing.T) { "database": database, "table": table, "ha_enabled": "false", - }, logformat.NewVaultLogger(log.LevelTrace)) + }, logging.NewVaultLogger(log.Debug)) if err != nil { t.Fatal(err) } diff --git a/physical/swift/swift.go b/physical/swift/swift.go index 91d6f1263a..6888eca73f 100644 --- a/physical/swift/swift.go +++ b/physical/swift/swift.go @@ -9,7 +9,7 @@ import ( "strings" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/armon/go-metrics" "github.com/hashicorp/errwrap" @@ -113,7 +113,7 @@ func NewSwiftBackend(conf map[string]string, logger log.Logger) (physical.Backen return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { - logger.Debug("swift: max_parallel set", "max_parallel", maxParInt) + logger.Debug("max_parallel set", "max_parallel", maxParInt) } } diff --git a/physical/swift/swift_test.go b/physical/swift/swift_test.go index 5aa2ec9581..70980aed68 100644 --- a/physical/swift/swift_test.go +++ b/physical/swift/swift_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" "github.com/ncw/swift" ) @@ -65,7 +65,7 @@ func TestSwiftBackend(t *testing.T) { } }() - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewSwiftBackend(map[string]string{ "username": username, diff --git a/physical/zookeeper/zookeeper.go b/physical/zookeeper/zookeeper.go index f4cd8850af..d152091971 100644 --- a/physical/zookeeper/zookeeper.go +++ b/physical/zookeeper/zookeeper.go @@ -9,8 +9,8 @@ import ( "sync" "time" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" metrics "github.com/armon/go-metrics" "github.com/samuel/go-zookeeper/zk" diff --git a/physical/zookeeper/zookeeper_test.go b/physical/zookeeper/zookeeper_test.go index a85c27ccd8..7e70899eee 100644 --- a/physical/zookeeper/zookeeper_test.go +++ b/physical/zookeeper/zookeeper_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" "github.com/samuel/go-zookeeper/zk" ) @@ -44,7 +44,7 @@ func TestZooKeeperBackend(t *testing.T) { client.Close() }() - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewZooKeeperBackend(map[string]string{ "address": addr + "," + addr, @@ -84,7 +84,7 @@ func TestZooKeeperHABackend(t *testing.T) { client.Close() }() - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Debug) b, err := NewZooKeeperBackend(map[string]string{ "address": addr + "," + addr, diff --git a/vault/audit.go b/vault/audit.go index 3bd9141e05..6ad3e44e68 100644 --- a/vault/audit.go +++ b/vault/audit.go @@ -110,7 +110,7 @@ func (c *Core) enableAudit(ctx context.Context, entry *MountEntry) error { // Register the backend c.auditBroker.Register(entry.Path, backend, view) if c.logger.IsInfo() { - c.logger.Info("core: enabled audit backend", "path", entry.Path, "type", entry.Type) + c.logger.Info("enabled audit backend", "path", entry.Path, "type", entry.Type) } return nil } @@ -152,7 +152,7 @@ func (c *Core) disableAudit(ctx context.Context, path string) (bool, error) { // Unmount the backend c.auditBroker.Deregister(path) if c.logger.IsInfo() { - c.logger.Info("core: disabled audit backend", "path", path) + c.logger.Info("disabled audit backend", "path", path) } return true, nil @@ -166,12 +166,12 @@ func (c *Core) loadAudits(ctx context.Context) error { // Load the existing audit table raw, err := c.barrier.Get(ctx, coreAuditConfigPath) if err != nil { - c.logger.Error("core: failed to read audit table", "error", err) + c.logger.Error("failed to read audit table", "error", err) return errLoadAuditFailed } rawLocal, err := c.barrier.Get(ctx, coreLocalAuditConfigPath) if err != nil { - c.logger.Error("core: failed to read local audit table", "error", err) + c.logger.Error("failed to read local audit table", "error", err) return errLoadAuditFailed } @@ -180,7 +180,7 @@ func (c *Core) loadAudits(ctx context.Context) error { if raw != nil { if err := jsonutil.DecodeJSON(raw.Value, auditTable); err != nil { - c.logger.Error("core: failed to decode audit table", "error", err) + c.logger.Error("failed to decode audit table", "error", err) return errLoadAuditFailed } c.audit = auditTable @@ -194,7 +194,7 @@ func (c *Core) loadAudits(ctx context.Context) error { if rawLocal != nil { if err := jsonutil.DecodeJSON(rawLocal.Value, localAuditTable); err != nil { - c.logger.Error("core: failed to decode local audit table", "error", err) + c.logger.Error("failed to decode local audit table", "error", err) return errLoadAuditFailed } if localAuditTable != nil && len(localAuditTable.Entries) > 0 { @@ -237,13 +237,13 @@ func (c *Core) loadAudits(ctx context.Context) error { // persistAudit is used to persist the audit table after modification func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bool) error { if table.Type != auditTableType { - c.logger.Error("core: given table to persist has wrong type", "actual_type", table.Type, "expected_type", auditTableType) + c.logger.Error("given table to persist has wrong type", "actual_type", table.Type, "expected_type", auditTableType) return fmt.Errorf("invalid table type given, not persisting") } for _, entry := range table.Entries { if entry.Table != table.Type { - c.logger.Error("core: given entry to persist in audit table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type) + c.logger.Error("given entry to persist in audit table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type) return fmt.Errorf("invalid audit entry found, not persisting") } } @@ -268,7 +268,7 @@ func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bo // Marshal the table compressedBytes, err := jsonutil.EncodeJSONAndCompress(nonLocalAudit, nil) if err != nil { - c.logger.Error("core: failed to encode and/or compress audit table", "error", err) + c.logger.Error("failed to encode and/or compress audit table", "error", err) return err } @@ -280,7 +280,7 @@ func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bo // Write to the physical backend if err := c.barrier.Put(ctx, entry); err != nil { - c.logger.Error("core: failed to persist audit table", "error", err) + c.logger.Error("failed to persist audit table", "error", err) return err } } @@ -288,7 +288,7 @@ func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bo // Repeat with local audit compressedBytes, err := jsonutil.EncodeJSONAndCompress(localAudit, nil) if err != nil { - c.logger.Error("core: failed to encode and/or compress local audit table", "error", err) + c.logger.Error("failed to encode and/or compress local audit table", "error", err) return err } @@ -298,7 +298,7 @@ func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bo } if err := c.barrier.Put(ctx, entry); err != nil { - c.logger.Error("core: failed to persist local audit table", "error", err) + c.logger.Error("failed to persist local audit table", "error", err) return err } @@ -308,7 +308,7 @@ func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bo // setupAudit is invoked after we've loaded the audit able to // initialize the audit backends func (c *Core) setupAudits(ctx context.Context) error { - broker := NewAuditBroker(c.logger) + broker := NewAuditBroker(c.logger.ResetNamed("audit")) c.auditLock.Lock() defer c.auditLock.Unlock() @@ -329,11 +329,11 @@ func (c *Core) setupAudits(ctx context.Context) error { // Initialize the backend backend, err := c.newAuditBackend(ctx, entry, view, entry.Options) if err != nil { - c.logger.Error("core: failed to create audit entry", "path", entry.Path, "error", err) + c.logger.Error("failed to create audit entry", "path", entry.Path, "error", err) continue } if backend == nil { - c.logger.Error("core: created audit entry was nil", "path", entry.Path, "type", entry.Type) + c.logger.Error("created audit entry was nil", "path", entry.Path, "type", entry.Type) continue } @@ -377,7 +377,7 @@ func (c *Core) removeAuditReloadFunc(entry *MountEntry) { c.reloadFuncsLock.Lock() if c.logger.IsDebug() { - c.logger.Debug("audit: removing reload function", "path", entry.Path) + c.logger.ResetNamed("audit").Debug("removing reload function", "path", entry.Path) } delete(c.reloadFuncs, key) @@ -410,37 +410,39 @@ func (c *Core) newAuditBackend(ctx context.Context, entry *MountEntry, view logi return nil, fmt.Errorf("nil backend returned from %q factory function", entry.Type) } + auditLogger := c.logger.ResetNamed("audit") + switch entry.Type { case "file": key := "audit_file|" + entry.Path c.reloadFuncsLock.Lock() - if c.logger.IsDebug() { - c.logger.Debug("audit: adding reload function", "path", entry.Path) + if auditLogger.IsDebug() { + auditLogger.Debug("adding reload function", "path", entry.Path) if entry.Options != nil { - c.logger.Debug("audit: file backend options", "path", entry.Path, "file_path", entry.Options["file_path"]) + auditLogger.Debug("file backend options", "path", entry.Path, "file_path", entry.Options["file_path"]) } } c.reloadFuncs[key] = append(c.reloadFuncs[key], func(map[string]interface{}) error { - if c.logger.IsInfo() { - c.logger.Info("audit: reloading file audit backend", "path", entry.Path) + if auditLogger.IsInfo() { + auditLogger.Info("reloading file audit backend", "path", entry.Path) } return be.Reload(ctx) }) c.reloadFuncsLock.Unlock() case "socket": - if c.logger.IsDebug() { + if auditLogger.IsDebug() { if entry.Options != nil { - c.logger.Debug("audit: socket backend options", "path", entry.Path, "address", entry.Options["address"], "socket type", entry.Options["socket_type"]) + auditLogger.Debug("socket backend options", "path", entry.Path, "address", entry.Options["address"], "socket type", entry.Options["socket_type"]) } } case "syslog": - if c.logger.IsDebug() { + if auditLogger.IsDebug() { if entry.Options != nil { - c.logger.Debug("audit: syslog backend options", "path", entry.Path, "facility", entry.Options["facility"], "tag", entry.Options["tag"]) + auditLogger.Debug("syslog backend options", "path", entry.Path, "facility", entry.Options["facility"], "tag", entry.Options["tag"]) } } } diff --git a/vault/audit_broker.go b/vault/audit_broker.go index d39856d8d6..8dc1b909c4 100644 --- a/vault/audit_broker.go +++ b/vault/audit_broker.go @@ -7,9 +7,9 @@ import ( "time" metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/audit" - log "github.com/mgutz/logxi/v1" ) type backendEntry struct { @@ -82,7 +82,7 @@ func (a *AuditBroker) LogRequest(ctx context.Context, in *audit.LogInput, header defer func() { if r := recover(); r != nil { - a.logger.Error("audit: panic during logging", "request_path", in.Request.Path, "error", r) + a.logger.Error("panic during logging", "request_path", in.Request.Path, "error", r) retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log")) } @@ -96,7 +96,7 @@ func (a *AuditBroker) LogRequest(ctx context.Context, in *audit.LogInput, header // All logged requests must have an identifier //if req.ID == "" { - // a.logger.Error("audit: missing identifier in request object", "request_path", req.Path) + // a.logger.Error("missing identifier in request object", "request_path", req.Path) // retErr = multierror.Append(retErr, fmt.Errorf("missing identifier in request object: %s", req.Path)) // return //} @@ -112,7 +112,7 @@ func (a *AuditBroker) LogRequest(ctx context.Context, in *audit.LogInput, header in.Request.Headers = nil transHeaders, thErr := headersConfig.ApplyConfig(ctx, headers, be.backend.GetHash) if thErr != nil { - a.logger.Error("audit: backend failed to include headers", "backend", name, "error", thErr) + a.logger.Error("backend failed to include headers", "backend", name, "error", thErr) continue } in.Request.Headers = transHeaders @@ -121,7 +121,7 @@ func (a *AuditBroker) LogRequest(ctx context.Context, in *audit.LogInput, header lrErr := be.backend.LogRequest(ctx, in) metrics.MeasureSince([]string{"audit", name, "log_request"}, start) if lrErr != nil { - a.logger.Error("audit: backend failed to log request", "backend", name, "error", lrErr) + a.logger.Error("backend failed to log request", "backend", name, "error", lrErr) } else { anyLogged = true } @@ -144,7 +144,7 @@ func (a *AuditBroker) LogResponse(ctx context.Context, in *audit.LogInput, heade defer func() { if r := recover(); r != nil { - a.logger.Error("audit: panic during logging", "request_path", in.Request.Path, "error", r) + a.logger.Error("panic during logging", "request_path", in.Request.Path, "error", r) retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log")) } @@ -168,7 +168,7 @@ func (a *AuditBroker) LogResponse(ctx context.Context, in *audit.LogInput, heade in.Request.Headers = nil transHeaders, thErr := headersConfig.ApplyConfig(ctx, headers, be.backend.GetHash) if thErr != nil { - a.logger.Error("audit: backend failed to include headers", "backend", name, "error", thErr) + a.logger.Error("backend failed to include headers", "backend", name, "error", thErr) continue } in.Request.Headers = transHeaders @@ -177,7 +177,7 @@ func (a *AuditBroker) LogResponse(ctx context.Context, in *audit.LogInput, heade lrErr := be.backend.LogResponse(ctx, in) metrics.MeasureSince([]string{"audit", name, "log_response"}, start) if lrErr != nil { - a.logger.Error("audit: backend failed to log response", "backend", name, "error", lrErr) + a.logger.Error("backend failed to log response", "backend", name, "error", lrErr) } else { anyLogged = true } diff --git a/vault/audit_test.go b/vault/audit_test.go index b40bba9878..2b1e36fbc2 100644 --- a/vault/audit_test.go +++ b/vault/audit_test.go @@ -12,13 +12,13 @@ import ( "errors" "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/helper/jsonutil" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" - log "github.com/mgutz/logxi/v1" "github.com/mitchellh/copystructure" ) @@ -432,7 +432,7 @@ func verifyDefaultAuditTable(t *testing.T, table *MountTable) { } func TestAuditBroker_LogRequest(t *testing.T) { - l := logformat.NewVaultLogger(log.LevelTrace) + l := logging.NewVaultLogger(log.Trace) b := NewAuditBroker(l) a1 := &NoopAudit{} a2 := &NoopAudit{} @@ -518,7 +518,7 @@ func TestAuditBroker_LogRequest(t *testing.T) { } func TestAuditBroker_LogResponse(t *testing.T) { - l := logformat.NewVaultLogger(log.LevelTrace) + l := logging.NewVaultLogger(log.Trace) b := NewAuditBroker(l) a1 := &NoopAudit{} a2 := &NoopAudit{} @@ -622,7 +622,7 @@ func TestAuditBroker_LogResponse(t *testing.T) { } func TestAuditBroker_AuditHeaders(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Trace) b := NewAuditBroker(logger) _, barrier, _ := mockBarrier(t) view := NewBarrierView(barrier, "headers/") diff --git a/vault/auth.go b/vault/auth.go index 523dab2b10..f5d2dbc30d 100644 --- a/vault/auth.go +++ b/vault/auth.go @@ -146,7 +146,7 @@ func (c *Core) enableCredential(ctx context.Context, entry *MountEntry) error { } if c.logger.IsInfo() { - c.logger.Info("core: enabled credential backend", "path", entry.Path, "type", entry.Type) + c.logger.Info("enabled credential backend", "path", entry.Path, "type", entry.Type) } return nil } @@ -205,7 +205,7 @@ func (c *Core) disableCredential(ctx context.Context, path string) error { case entry.Local, !c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary): // Have writable storage, remove the whole thing if err := logical.ClearView(ctx, view); err != nil { - c.logger.Error("core: failed to clear view for path being unmounted", "error", err, "path", path) + c.logger.Error("failed to clear view for path being unmounted", "error", err, "path", path) return err } @@ -216,7 +216,7 @@ func (c *Core) disableCredential(ctx context.Context, path string) error { return err } if c.logger.IsInfo() { - c.logger.Info("core: disabled credential backend", "path", path) + c.logger.Info("disabled credential backend", "path", path) } return nil } @@ -230,7 +230,7 @@ func (c *Core) removeCredEntry(ctx context.Context, path string) error { newTable := c.auth.shallowClone() entry := newTable.remove(path) if entry == nil { - c.logger.Error("core: nil entry found removing entry in auth table", "path", path) + c.logger.Error("nil entry found removing entry in auth table", "path", path) return logical.CodedError(500, "failed to remove entry in auth table") } @@ -296,12 +296,12 @@ func (c *Core) loadCredentials(ctx context.Context) error { // Load the existing mount table raw, err := c.barrier.Get(ctx, coreAuthConfigPath) if err != nil { - c.logger.Error("core: failed to read auth table", "error", err) + c.logger.Error("failed to read auth table", "error", err) return errLoadAuthFailed } rawLocal, err := c.barrier.Get(ctx, coreLocalAuthConfigPath) if err != nil { - c.logger.Error("core: failed to read local auth table", "error", err) + c.logger.Error("failed to read local auth table", "error", err) return errLoadAuthFailed } @@ -310,7 +310,7 @@ func (c *Core) loadCredentials(ctx context.Context) error { if raw != nil { if err := jsonutil.DecodeJSON(raw.Value, authTable); err != nil { - c.logger.Error("core: failed to decode auth table", "error", err) + c.logger.Error("failed to decode auth table", "error", err) return errLoadAuthFailed } c.auth = authTable @@ -324,7 +324,7 @@ func (c *Core) loadCredentials(ctx context.Context) error { if rawLocal != nil { if err := jsonutil.DecodeJSON(rawLocal.Value, localAuthTable); err != nil { - c.logger.Error("core: failed to decode local auth table", "error", err) + c.logger.Error("failed to decode local auth table", "error", err) return errLoadAuthFailed } if localAuthTable != nil && len(localAuthTable.Entries) > 0 { @@ -370,7 +370,7 @@ func (c *Core) loadCredentials(ctx context.Context) error { } if err := c.persistAuth(ctx, c.auth, false); err != nil { - c.logger.Error("core: failed to persist auth table", "error", err) + c.logger.Error("failed to persist auth table", "error", err) return errLoadAuthFailed } return nil @@ -379,13 +379,13 @@ func (c *Core) loadCredentials(ctx context.Context) error { // persistAuth is used to persist the auth table after modification func (c *Core) persistAuth(ctx context.Context, table *MountTable, localOnly bool) error { if table.Type != credentialTableType { - c.logger.Error("core: given table to persist has wrong type", "actual_type", table.Type, "expected_type", credentialTableType) + c.logger.Error("given table to persist has wrong type", "actual_type", table.Type, "expected_type", credentialTableType) return fmt.Errorf("invalid table type given, not persisting") } for _, entry := range table.Entries { if entry.Table != table.Type { - c.logger.Error("core: given entry to persist in auth table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type) + c.logger.Error("given entry to persist in auth table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type) return fmt.Errorf("invalid auth entry found, not persisting") } } @@ -410,7 +410,7 @@ func (c *Core) persistAuth(ctx context.Context, table *MountTable, localOnly boo // Marshal the table compressedBytes, err := jsonutil.EncodeJSONAndCompress(nonLocalAuth, nil) if err != nil { - c.logger.Error("core: failed to encode and/or compress auth table", "error", err) + c.logger.Error("failed to encode and/or compress auth table", "error", err) return err } @@ -422,7 +422,7 @@ func (c *Core) persistAuth(ctx context.Context, table *MountTable, localOnly boo // Write to the physical backend if err := c.barrier.Put(ctx, entry); err != nil { - c.logger.Error("core: failed to persist auth table", "error", err) + c.logger.Error("failed to persist auth table", "error", err) return err } } @@ -430,7 +430,7 @@ func (c *Core) persistAuth(ctx context.Context, table *MountTable, localOnly boo // Repeat with local auth compressedBytes, err := jsonutil.EncodeJSONAndCompress(localAuth, nil) if err != nil { - c.logger.Error("core: failed to encode and/or compress local auth table", "error", err) + c.logger.Error("failed to encode and/or compress local auth table", "error", err) return err } @@ -440,7 +440,7 @@ func (c *Core) persistAuth(ctx context.Context, table *MountTable, localOnly boo } if err := c.barrier.Put(ctx, entry); err != nil { - c.logger.Error("core: failed to persist local auth table", "error", err) + c.logger.Error("failed to persist local auth table", "error", err) return err } @@ -475,12 +475,12 @@ func (c *Core) setupCredentials(ctx context.Context) error { backend, err = c.newCredentialBackend(ctx, entry, sysView, view) if err != nil { - c.logger.Error("core: failed to create credential entry", "path", entry.Path, "error", err) + c.logger.Error("failed to create credential entry", "path", entry.Path, "error", err) if entry.Type == "plugin" { // If we encounter an error instantiating the backend due to an error, // skip backend initialization but register the entry to the mount table // to preserve storage and path. - c.logger.Warn("core: skipping plugin-based credential entry", "path", entry.Path) + c.logger.Warn("skipping plugin-based credential entry", "path", entry.Path) goto ROUTER_MOUNT } return errLoadAuthFailed @@ -500,7 +500,7 @@ func (c *Core) setupCredentials(ctx context.Context) error { path := credentialRoutePrefix + entry.Path err = c.router.Mount(backend, path, entry, view) if err != nil { - c.logger.Error("core: failed to mount auth entry", "path", entry.Path, "error", err) + c.logger.Error("failed to mount auth entry", "path", entry.Path, "error", err) return errLoadAuthFailed } @@ -569,7 +569,7 @@ func (c *Core) newCredentialBackend(ctx context.Context, entry *MountEntry, sysV config := &logical.BackendConfig{ StorageView: view, - Logger: c.logger, + Logger: c.logger.ResetNamed(fmt.Sprintf("auth.%s.%s", t, entry.Accessor)), Config: conf, System: sysView, BackendUUID: entry.BackendAwareUUID, diff --git a/vault/barrier_aes_gcm_test.go b/vault/barrier_aes_gcm_test.go index 410e5b47de..71270e2626 100644 --- a/vault/barrier_aes_gcm_test.go +++ b/vault/barrier_aes_gcm_test.go @@ -6,14 +6,14 @@ import ( "encoding/json" "testing" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" "github.com/hashicorp/vault/physical/inmem" - log "github.com/mgutz/logxi/v1" ) var ( - logger = logformat.NewVaultLogger(log.LevelTrace) + logger = logging.NewVaultLogger(log.Trace) ) // mockBarrier returns a physical backend, security barrier, and master key diff --git a/vault/cluster.go b/vault/cluster.go index 9e5a9d8eaf..91ab84f880 100644 --- a/vault/cluster.go +++ b/vault/cluster.go @@ -106,19 +106,19 @@ func (c *Core) loadLocalClusterTLS(adv activeAdvertisement) (retErr error) { return nil case adv.ClusterKeyParams == nil: - c.logger.Error("core: no key params found loading local cluster TLS information") + c.logger.Error("no key params found loading local cluster TLS information") return fmt.Errorf("no local cluster key params found") case adv.ClusterKeyParams.X == nil, adv.ClusterKeyParams.Y == nil, adv.ClusterKeyParams.D == nil: - c.logger.Error("core: failed to parse local cluster key due to missing params") + c.logger.Error("failed to parse local cluster key due to missing params") return fmt.Errorf("failed to parse local cluster key") case adv.ClusterKeyParams.Type != corePrivateKeyTypeP521: - c.logger.Error("core: unknown local cluster key type", "key_type", adv.ClusterKeyParams.Type) + c.logger.Error("unknown local cluster key type", "key_type", adv.ClusterKeyParams.Type) return fmt.Errorf("failed to find valid local cluster key type") case adv.ClusterCert == nil || len(adv.ClusterCert) == 0: - c.logger.Error("core: no local cluster cert found") + c.logger.Error("no local cluster cert found") return fmt.Errorf("no local cluster cert found") } @@ -138,7 +138,7 @@ func (c *Core) loadLocalClusterTLS(adv activeAdvertisement) (retErr error) { cert, err := x509.ParseCertificate(adv.ClusterCert) if err != nil { - c.logger.Error("core: failed parsing local cluster certificate", "error", err) + c.logger.Error("failed parsing local cluster certificate", "error", err) return fmt.Errorf("error parsing local cluster certificate: %v", err) } @@ -158,7 +158,7 @@ func (c *Core) setupCluster(ctx context.Context) error { // Check if storage index is already present or not cluster, err := c.Cluster(ctx) if err != nil { - c.logger.Error("core: failed to get cluster details", "error", err) + c.logger.Error("failed to get cluster details", "error", err) return err } @@ -171,10 +171,10 @@ func (c *Core) setupCluster(ctx context.Context) error { if cluster.Name == "" { // If cluster name is not supplied, generate one if c.clusterName == "" { - c.logger.Trace("core: cluster name not found/set, generating new") + c.logger.Debug("cluster name not found/set, generating new") clusterNameBytes, err := uuid.GenerateRandomBytes(4) if err != nil { - c.logger.Error("core: failed to generate cluster name", "error", err) + c.logger.Error("failed to generate cluster name", "error", err) return err } @@ -183,21 +183,21 @@ func (c *Core) setupCluster(ctx context.Context) error { cluster.Name = c.clusterName if c.logger.IsDebug() { - c.logger.Debug("core: cluster name set", "name", cluster.Name) + c.logger.Debug("cluster name set", "name", cluster.Name) } modified = true } if cluster.ID == "" { - c.logger.Trace("core: cluster ID not found, generating new") + c.logger.Debug("cluster ID not found, generating new") // Generate a clusterID cluster.ID, err = uuid.GenerateUUID() if err != nil { - c.logger.Error("core: failed to generate cluster identifier", "error", err) + c.logger.Error("failed to generate cluster identifier", "error", err) return err } if c.logger.IsDebug() { - c.logger.Debug("core: cluster ID set", "id", cluster.ID) + c.logger.Debug("cluster ID set", "id", cluster.ID) } modified = true } @@ -206,10 +206,10 @@ func (c *Core) setupCluster(ctx context.Context) error { if c.ha != nil { // Create a private key if c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey) == nil { - c.logger.Trace("core: generating cluster private key") + c.logger.Trace("generating cluster private key") key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) if err != nil { - c.logger.Error("core: failed to generate local cluster key", "error", err) + c.logger.Error("failed to generate local cluster key", "error", err) return err } @@ -218,7 +218,7 @@ func (c *Core) setupCluster(ctx context.Context) error { // Create a certificate if c.localClusterCert.Load().([]byte) == nil { - c.logger.Trace("core: generating local cluster certificate") + c.logger.Debug("generating local cluster certificate") host, err := uuid.GenerateUUID() if err != nil { @@ -245,13 +245,13 @@ func (c *Core) setupCluster(ctx context.Context) error { certBytes, err := x509.CreateCertificate(rand.Reader, template, template, c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey).Public(), c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey)) if err != nil { - c.logger.Error("core: error generating self-signed cert", "error", err) + c.logger.Error("error generating self-signed cert", "error", err) return errwrap.Wrapf("unable to generate local cluster certificate: {{err}}", err) } parsedCert, err := x509.ParseCertificate(certBytes) if err != nil { - c.logger.Error("core: error parsing self-signed cert", "error", err) + c.logger.Error("error parsing self-signed cert", "error", err) return errwrap.Wrapf("error parsing generated certificate: {{err}}", err) } @@ -264,7 +264,7 @@ func (c *Core) setupCluster(ctx context.Context) error { // Encode the cluster information into as a JSON string rawCluster, err := json.Marshal(cluster) if err != nil { - c.logger.Error("core: failed to encode cluster details", "error", err) + c.logger.Error("failed to encode cluster details", "error", err) return err } @@ -274,7 +274,7 @@ func (c *Core) setupCluster(ctx context.Context) error { Value: rawCluster, }) if err != nil { - c.logger.Error("core: failed to store cluster details", "error", err) + c.logger.Error("failed to store cluster details", "error", err) return err } } @@ -288,16 +288,16 @@ func (c *Core) setupCluster(ctx context.Context) error { // be built in the same mechanism or started independently. func (c *Core) startClusterListener(ctx context.Context) error { if c.clusterAddr == "" { - c.logger.Info("core: clustering disabled, not starting listeners") + c.logger.Info("clustering disabled, not starting listeners") return nil } if c.clusterListenerAddrs == nil || len(c.clusterListenerAddrs) == 0 { - c.logger.Warn("core: clustering not disabled but no addresses to listen on") + c.logger.Warn("clustering not disabled but no addresses to listen on") return fmt.Errorf("cluster addresses not found") } - c.logger.Trace("core: starting cluster listeners") + c.logger.Debug("starting cluster listeners") err := c.startForwarding(ctx) if err != nil { @@ -311,15 +311,16 @@ func (c *Core) startClusterListener(ctx context.Context) error { // assumed that the state lock is held while this is run. func (c *Core) stopClusterListener() { if c.clusterAddr == "" { - c.logger.Trace("core: clustering disabled, not stopping listeners") + + c.logger.Debug("clustering disabled, not stopping listeners") return } if !c.clusterListenersRunning { - c.logger.Info("core: cluster listeners not running") + c.logger.Info("cluster listeners not running") return } - c.logger.Info("core: stopping cluster listeners") + c.logger.Info("stopping cluster listeners") // Tell the goroutine managing the listeners to perform the shutdown // process @@ -328,11 +329,12 @@ func (c *Core) stopClusterListener() { // The reason for this loop-de-loop is that we may be unsealing again // quickly, and if the listeners are not yet closed, we will get socket // bind errors. This ensures proper ordering. - c.logger.Trace("core: waiting for success notification while stopping cluster listeners") + + c.logger.Debug("waiting for success notification while stopping cluster listeners") <-c.clusterListenerShutdownSuccessCh c.clusterListenersRunning = false - c.logger.Info("core: cluster listeners successfully shut down") + c.logger.Info("cluster listeners successfully shut down") } // ClusterTLSConfig generates a TLS configuration based on the local/replicated @@ -352,8 +354,6 @@ func (c *Core) ClusterTLSConfig(ctx context.Context, repClusters *ReplicatedClus localCert := make([]byte, len(currCert)) copy(localCert, currCert) - //c.logger.Trace("core: performing cert name lookup", "hello_server_name", clientHello.ServerName, "local_cluster_cert_name", parsedCert.Subject.CommonName) - return &tls.Certificate{ Certificate: [][]byte{localCert}, PrivateKey: c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey), @@ -363,7 +363,6 @@ func (c *Core) ClusterTLSConfig(ctx context.Context, repClusters *ReplicatedClus } clientLookup := func(requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) { - //c.logger.Trace("core: performing client cert lookup") if len(requestInfo.AcceptableCAs) != 1 { return nil, fmt.Errorf("expected only a single acceptable CA") @@ -385,7 +384,7 @@ func (c *Core) ClusterTLSConfig(ctx context.Context, repClusters *ReplicatedClus } serverConfigLookup := func(clientHello *tls.ClientHelloInfo) (*tls.Config, error) { - //c.logger.Trace("core: performing server config lookup") + for _, v := range clientHello.SupportedProtos { switch v { case "h2", requestForwardingALPN: diff --git a/vault/cluster_test.go b/vault/cluster_test.go index 40048a3abc..282c3701b3 100644 --- a/vault/cluster_test.go +++ b/vault/cluster_test.go @@ -10,12 +10,12 @@ import ( "testing" "time" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/consts" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/physical" "github.com/hashicorp/vault/physical/inmem" - log "github.com/mgutz/logxi/v1" ) var ( @@ -41,7 +41,7 @@ func TestClusterFetching(t *testing.T) { } func TestClusterHAFetching(t *testing.T) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Trace) redirect := "http://127.0.0.1:8200" diff --git a/vault/core.go b/vault/core.go index d255e6f6c3..fa9dccedd7 100644 --- a/vault/core.go +++ b/vault/core.go @@ -16,7 +16,7 @@ import ( "time" "github.com/armon/go-metrics" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "google.golang.org/grpc" @@ -28,7 +28,7 @@ import ( "github.com/hashicorp/vault/helper/errutil" "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/helper/jsonutil" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/mlock" "github.com/hashicorp/vault/helper/reload" "github.com/hashicorp/vault/helper/tlsutil" @@ -471,7 +471,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { // Make a default logger if not provided if conf.Logger == nil { - conf.Logger = logformat.NewVaultLogger(log.LevelTrace) + conf.Logger = logging.NewVaultLogger(log.Trace) } // Setup the core @@ -484,7 +484,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { router: NewRouter(), sealed: true, standby: true, - logger: conf.Logger, + logger: conf.Logger.Named("core"), defaultLeaseTTL: conf.DefaultLeaseTTL, maxLeaseTTL: conf.MaxLeaseTTL, cachingDisabled: conf.DisableCache, @@ -527,15 +527,15 @@ func NewCore(conf *CoreConfig) (*Core, error) { } c.seal.SetCore(c) - c.sealUnwrapper = NewSealUnwrapper(phys, conf.Logger) + c.sealUnwrapper = NewSealUnwrapper(phys, conf.Logger.Named("sealunwrapper")) var ok bool // Wrap the physical backend in a cache layer if enabled if txnOK { - c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, conf.Logger) + c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, conf.Logger.ResetNamed("storage.cache")) } else { - c.physical = physical.NewCache(c.sealUnwrapper, conf.CacheSize, conf.Logger) + c.physical = physical.NewCache(c.sealUnwrapper, conf.CacheSize, conf.Logger.Named("storage.cache")) } c.physicalCache = c.physical.(physical.ToggleablePurgemonster) @@ -592,7 +592,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { } logicalBackends["cubbyhole"] = CubbyholeBackendFactory logicalBackends["system"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - b := NewSystemBackend(c) + b := NewSystemBackend(c, conf.Logger.Named("system")) if err := b.Setup(ctx, config); err != nil { return nil, err } @@ -600,7 +600,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { } logicalBackends["identity"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - return NewIdentityStore(ctx, c, config) + return NewIdentityStore(ctx, c, config, conf.Logger.Named("identity")) } c.logicalBackends = logicalBackends @@ -610,7 +610,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { credentialBackends[k] = f } credentialBackends["token"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { - return NewTokenStore(ctx, c, config) + return NewTokenStore(ctx, conf.Logger.Named("token"), c, config) } c.credentialBackends = credentialBackends @@ -628,7 +628,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { // problem. It is only used to gracefully quit in the case of HA so that failover // happens as quickly as possible. func (c *Core) Shutdown() error { - c.logger.Trace("core: shutdown called") + c.logger.Debug("shutdown called") c.stateLock.RLock() // Tell any requests that know about this to stop if c.activeContextCancelFunc != nil { @@ -636,12 +636,12 @@ func (c *Core) Shutdown() error { } c.stateLock.RUnlock() - c.logger.Trace("core: shutdown initiating internal seal") + c.logger.Debug("shutdown initiating internal seal") // Seal the Vault, causes a leader stepdown c.stateLock.Lock() defer c.stateLock.Unlock() - c.logger.Trace("core: shutdown running internal seal") + c.logger.Debug("shutdown running internal seal") return c.sealInternal(false) } @@ -694,12 +694,12 @@ func (c *Core) fetchEntityAndDerivedPolicies(entityID string) (*identity.Entity, return nil, nil, nil } - //c.logger.Debug("core: entity set on the token", "entity_id", te.EntityID) + //c.logger.Debug("entity set on the token", "entity_id", te.EntityID) // Fetch the entity entity, err := c.identityStore.MemDBEntityByID(entityID, false) if err != nil { - c.logger.Error("core: failed to lookup entity using its ID", "error", err) + c.logger.Error("failed to lookup entity using its ID", "error", err) return nil, nil, err } @@ -709,21 +709,21 @@ func (c *Core) fetchEntityAndDerivedPolicies(entityID string) (*identity.Entity, // finding entity based on the merged entity index. entity, err = c.identityStore.MemDBEntityByMergedEntityID(entityID, false) if err != nil { - c.logger.Error("core: failed to lookup entity in merged entity ID index", "error", err) + c.logger.Error("failed to lookup entity in merged entity ID index", "error", err) return nil, nil, err } } var policies []string if entity != nil { - //c.logger.Debug("core: entity successfully fetched; adding entity policies to token's policies to create ACL") + //c.logger.Debug("entity successfully fetched; adding entity policies to token's policies to create ACL") // Attach the policies on the entity policies = append(policies, entity.Policies...) groupPolicies, err := c.identityStore.groupPoliciesByEntityID(entity.ID) if err != nil { - c.logger.Error("core: failed to fetch group policies", "error", err) + c.logger.Error("failed to fetch group policies", "error", err) return nil, nil, err } @@ -743,14 +743,14 @@ func (c *Core) fetchACLTokenEntryAndEntity(clientToken string) (*ACL, *TokenEntr } if c.tokenStore == nil { - c.logger.Error("core: token store is unavailable") + c.logger.Error("token store is unavailable") return nil, nil, nil, ErrInternalError } // Resolve the token policy te, err := c.tokenStore.Lookup(c.activeContext, clientToken) if err != nil { - c.logger.Error("core: failed to lookup token", "error", err) + c.logger.Error("failed to lookup token", "error", err) return nil, nil, nil, ErrInternalError } @@ -771,7 +771,7 @@ func (c *Core) fetchACLTokenEntryAndEntity(clientToken string) (*ACL, *TokenEntr // Construct the corresponding ACL object acl, err := c.policyStore.ACL(c.activeContext, tokenPolicies...) if err != nil { - c.logger.Error("core: failed to construct ACL", "error", err) + c.logger.Error("failed to construct ACL", "error", err) return nil, nil, nil, ErrInternalError } @@ -820,7 +820,7 @@ func (c *Core) checkToken(ctx context.Context, req *logical.Request, unauth bool case nil: // Continue on default: - c.logger.Error("core: failed to run existence check", "error", err) + c.logger.Error("failed to run existence check", "error", err) if _, ok := err.(errutil.UserError); ok { return nil, nil, err } else { @@ -935,7 +935,7 @@ func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err erro return false, localRedirAddr, localClusterAddr, nil } - c.logger.Trace("core: found new active node information, refreshing") + c.logger.Trace("found new active node information, refreshing") c.clusterLeaderParamsLock.Lock() defer c.clusterLeaderParamsLock.Unlock() @@ -962,12 +962,12 @@ func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err erro if err != nil { // Fall back to pre-struct handling adv.RedirectAddr = string(entry.Value) - c.logger.Trace("core: parsed redirect addr for new active node", "redirect_addr", adv.RedirectAddr) + c.logger.Debug("parsed redirect addr for new active node", "redirect_addr", adv.RedirectAddr) oldAdv = true } if !oldAdv { - c.logger.Trace("core: parsing information for new active node", "active_cluster_addr", adv.ClusterAddr, "active_redirect_addr", adv.RedirectAddr) + c.logger.Debug("parsing information for new active node", "active_cluster_addr", adv.ClusterAddr, "active_redirect_addr", adv.RedirectAddr) // Ensure we are using current values err = c.loadLocalClusterTLS(adv) @@ -1141,7 +1141,7 @@ func (c *Core) unsealPart(ctx context.Context, config *SealConfig, key []byte, u // the call only if we have met the threshold if len(c.unlockInfo.Parts) < config.SecretThreshold { if c.logger.IsDebug() { - c.logger.Debug("core: cannot unseal, not enough keys", "keys", len(c.unlockInfo.Parts), "threshold", config.SecretThreshold, "nonce", c.unlockInfo.Nonce) + c.logger.Debug("cannot unseal, not enough keys", "keys", len(c.unlockInfo.Parts), "threshold", config.SecretThreshold, "nonce", c.unlockInfo.Nonce) } return nil, nil } @@ -1213,7 +1213,7 @@ func (c *Core) unsealInternal(ctx context.Context, masterKey []byte) (bool, erro return false, err } if c.logger.IsInfo() { - c.logger.Info("core: vault is unsealed") + c.logger.Info("vault is unsealed") } // Do post-unseal setup if HA is not enabled @@ -1221,16 +1221,16 @@ func (c *Core) unsealInternal(ctx context.Context, masterKey []byte) (bool, erro // We still need to set up cluster info even if it's not part of a // cluster right now. This also populates the cached cluster object. if err := c.setupCluster(ctx); err != nil { - c.logger.Error("core: cluster setup failed", "error", err) + c.logger.Error("cluster setup failed", "error", err) c.barrier.Seal() - c.logger.Warn("core: vault is sealed") + c.logger.Warn("vault is sealed") return false, err } if err := c.postUnseal(); err != nil { - c.logger.Error("core: post-unseal setup failed", "error", err) + c.logger.Error("post-unseal setup failed", "error", err) c.barrier.Seal() - c.logger.Warn("core: vault is sealed") + c.logger.Warn("vault is sealed") return false, err } @@ -1256,7 +1256,7 @@ func (c *Core) unsealInternal(ctx context.Context, masterKey []byte) (bool, erro if ok { if err := sd.NotifySealedStateChange(); err != nil { if c.logger.IsWarn() { - c.logger.Warn("core: failed to notify unsealed status", "error", err) + c.logger.Warn("failed to notify unsealed status", "error", err) } } } @@ -1325,7 +1325,7 @@ func (c *Core) sealInitCommon(ctx context.Context, req *logical.Request) (retErr // just returning with an error and recommending a vault restart, which // essentially does the same thing. if c.standby { - c.logger.Error("core: vault cannot seal when in standby mode; please restart instead") + c.logger.Error("vault cannot seal when in standby mode; please restart instead") retErr = multierror.Append(retErr, errors.New("vault cannot seal when in standby mode; please restart instead")) c.stateLock.RUnlock() return retErr @@ -1349,7 +1349,7 @@ func (c *Core) sealInitCommon(ctx context.Context, req *logical.Request) (retErr Request: req, } if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil { - c.logger.Error("core: failed to audit request", "request_path", req.Path, "error", err) + c.logger.Error("failed to audit request", "request_path", req.Path, "error", err) retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue")) c.stateLock.RUnlock() return retErr @@ -1360,7 +1360,7 @@ func (c *Core) sealInitCommon(ctx context.Context, req *logical.Request) (retErr if te != nil { te, err = c.tokenStore.UseToken(ctx, te) if err != nil { - c.logger.Error("core: failed to use token", "error", err) + c.logger.Error("failed to use token", "error", err) retErr = multierror.Append(retErr, ErrInternalError) c.stateLock.RUnlock() return retErr @@ -1393,7 +1393,7 @@ func (c *Core) sealInitCommon(ctx context.Context, req *logical.Request) (retErr // we won't have a token store after sealing. err = c.tokenStore.Revoke(c.activeContext, te.ID) if err != nil { - c.logger.Error("core: token needed revocation before seal but failed to revoke", "error", err) + c.logger.Error("token needed revocation before seal but failed to revoke", "error", err) retErr = multierror.Append(retErr, ErrInternalError) } } @@ -1458,7 +1458,7 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) { Request: req, } if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil { - c.logger.Error("core: failed to audit request", "request_path", req.Path, "error", err) + c.logger.Error("failed to audit request", "request_path", req.Path, "error", err) retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue")) return retErr } @@ -1467,7 +1467,7 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) { if te != nil { te, err = c.tokenStore.UseToken(ctx, te) if err != nil { - c.logger.Error("core: failed to use token", "error", err) + c.logger.Error("failed to use token", "error", err) retErr = multierror.Append(retErr, ErrInternalError) return retErr } @@ -1496,7 +1496,7 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) { // we won't have a token store after sealing. err = c.tokenStore.Revoke(c.activeContext, te.ID) if err != nil { - c.logger.Error("core: token needed revocation before step-down but failed to revoke", "error", err) + c.logger.Error("token needed revocation before step-down but failed to revoke", "error", err) retErr = multierror.Append(retErr, ErrInternalError) } } @@ -1504,7 +1504,7 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) { select { case c.manualStepDownCh <- struct{}{}: default: - c.logger.Warn("core: manual step-down operation already queued") + c.logger.Warn("manual step-down operation already queued") } return retErr @@ -1520,7 +1520,7 @@ func (c *Core) sealInternal(keepLock bool) error { // Enable that we are sealed to prevent further transactions c.sealed = true - c.logger.Debug("core: marked as sealed") + c.logger.Debug("marked as sealed") // Clear forwarding clients c.requestForwardingConnectionLock.Lock() @@ -1532,7 +1532,7 @@ func (c *Core) sealInternal(keepLock bool) error { // Even in a non-HA context we key off of this for some things c.standby = true if err := c.preSeal(); err != nil { - c.logger.Error("core: pre-seal teardown failed", "error", err) + c.logger.Error("pre-seal teardown failed", "error", err) return fmt.Errorf("internal error") } } else { @@ -1545,17 +1545,17 @@ func (c *Core) sealInternal(keepLock bool) error { // for completion. We have the state lock here so nothing else should // be toggling standby status. close(c.standbyStopCh) - c.logger.Trace("core: finished triggering standbyStopCh for runStandby") + c.logger.Debug("finished triggering standbyStopCh for runStandby") // Wait for runStandby to stop <-c.standbyDoneCh atomic.StoreUint32(&c.keepHALockOnStepDown, 0) - c.logger.Trace("core: runStandby done") + c.logger.Debug("runStandby done") } - c.logger.Debug("core: sealing barrier") + c.logger.Debug("sealing barrier") if err := c.barrier.Seal(); err != nil { - c.logger.Error("core: error sealing barrier", "error", err) + c.logger.Error("error sealing barrier", "error", err) return err } @@ -1564,13 +1564,13 @@ func (c *Core) sealInternal(keepLock bool) error { if ok { if err := sd.NotifySealedStateChange(); err != nil { if c.logger.IsWarn() { - c.logger.Warn("core: failed to notify sealed status", "error", err) + c.logger.Warn("failed to notify sealed status", "error", err) } } } } - c.logger.Info("core: vault is sealed") + c.logger.Info("vault is sealed") return nil } @@ -1591,7 +1591,7 @@ func (c *Core) postUnseal() (retErr error) { c.preSeal() } }() - c.logger.Info("core: post-unseal setup starting") + c.logger.Info("post-unseal setup starting") // Clear forwarding clients; we're active c.requestForwardingConnectionLock.Lock() @@ -1669,7 +1669,7 @@ func (c *Core) postUnseal() (retErr error) { } c.metricsCh = make(chan struct{}) go c.emitMetrics(c.metricsCh) - c.logger.Info("core: post-unseal setup complete") + c.logger.Info("post-unseal setup complete") return nil } @@ -1677,7 +1677,7 @@ func (c *Core) postUnseal() (retErr error) { // for any state teardown required. func (c *Core) preSeal() error { defer metrics.MeasureSince([]string{"core", "pre_seal"}, time.Now()) - c.logger.Info("core: pre-seal teardown starting") + c.logger.Info("pre-seal teardown starting") // Clear any rekey progress c.barrierRekeyConfig = nil @@ -1726,7 +1726,7 @@ func (c *Core) preSeal() error { c.physicalCache.SetEnabled(false) c.physicalCache.Purge(c.activeContext) - c.logger.Info("core: pre-seal teardown complete") + c.logger.Info("pre-seal teardown complete") return result } @@ -1752,7 +1752,7 @@ func stopReplicationImpl(c *Core) error { func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { defer close(doneCh) defer close(manualStepDownCh) - c.logger.Info("core: entering standby mode") + c.logger.Info("entering standby mode") // Monitor for key rotation keyRotateDone := make(chan struct{}) @@ -1763,13 +1763,13 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { checkLeaderStop := make(chan struct{}) go c.periodicLeaderRefresh(checkLeaderDone, checkLeaderStop) defer func() { - c.logger.Trace("core: closed periodic key rotation checker stop channel") + c.logger.Debug("closed periodic key rotation checker stop channel") close(keyRotateStop) <-keyRotateDone close(checkLeaderStop) - c.logger.Trace("core: closed periodic leader refresh stop channel") + c.logger.Debug("closed periodic leader refresh stop channel") <-checkLeaderDone - c.logger.Trace("core: periodic leader refresh returned") + c.logger.Debug("periodic leader refresh returned") }() var manualStepDown bool @@ -1777,7 +1777,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { // Check for a shutdown select { case <-stopCh: - c.logger.Trace("core: stop channel triggered in runStandby") + c.logger.Debug("stop channel triggered in runStandby") return default: // If we've just down, we could instantly grab the lock again. Give @@ -1791,12 +1791,12 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { // Create a lock uuid, err := uuid.GenerateUUID() if err != nil { - c.logger.Error("core: failed to generate uuid", "error", err) + c.logger.Error("failed to generate uuid", "error", err) return } lock, err := c.ha.LockWith(coreLockPath, uuid) if err != nil { - c.logger.Error("core: failed to create lock", "error", err) + c.logger.Error("failed to create lock", "error", err) return } @@ -1807,7 +1807,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { if leaderLostCh == nil { return } - c.logger.Info("core: acquired lock, enabling active operation") + c.logger.Info("acquired lock, enabling active operation") // This is used later to log a metrics event; this can be helpful to // detect flapping @@ -1842,7 +1842,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { } if c.sealed { - c.logger.Warn("core: grabbed HA lock but already sealed, exiting") + c.logger.Warn("grabbed HA lock but already sealed, exiting") lock.Unlock() c.stateLock.Unlock() metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime) @@ -1869,7 +1869,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { // statelock and have this shut us down; sealInternal has a // workflow where it watches for the stopCh to close so we want // to return from here - c.logger.Error("core: error performing key upgrades", "error", err) + c.logger.Error("error performing key upgrades", "error", err) go c.Shutdown() c.heldHALock = nil lock.Unlock() @@ -1889,7 +1889,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { c.heldHALock = nil lock.Unlock() c.stateLock.Unlock() - c.logger.Error("core: cluster setup failed", "error", err) + c.logger.Error("cluster setup failed", "error", err) metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime) continue } @@ -1899,7 +1899,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { c.heldHALock = nil lock.Unlock() c.stateLock.Unlock() - c.logger.Error("core: leader advertisement setup failed", "error", err) + c.logger.Error("leader advertisement setup failed", "error", err) metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime) continue } @@ -1914,7 +1914,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { // Handle a failure to unseal if err != nil { - c.logger.Error("core: post-unseal setup failed", "error", err) + c.logger.Error("post-unseal setup failed", "error", err) lock.Unlock() metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime) continue @@ -1925,7 +1925,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { grabStateLock := true select { case <-leaderLostCh: - c.logger.Warn("core: leadership lost, stopping active operation") + c.logger.Warn("leadership lost, stopping active operation") case <-stopCh: // This case comes from sealInternal; we will already be having the // state lock held so we do toggle grabStateLock to false @@ -1934,7 +1934,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { } grabStateLock = false case <-manualStepDownCh: - c.logger.Warn("core: stepping down from active operation to standby") + c.logger.Warn("stepping down from active operation to standby") manualStepDown = true } @@ -1957,7 +1957,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { if releaseHALock { if err := c.clearLeader(uuid); err != nil { - c.logger.Error("core: clearing leader advertisement failed", "error", err) + c.logger.Error("clearing leader advertisement failed", "error", err) } c.heldHALock.Unlock() c.heldHALock = nil @@ -1965,7 +1965,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { // Check for a failure to prepare to seal if preSealErr != nil { - c.logger.Error("core: pre-seal teardown failed", "error", err) + c.logger.Error("pre-seal teardown failed", "error", err) } } } @@ -2027,13 +2027,13 @@ func (c *Core) periodicCheckKeyUpgrade(ctx context.Context, doneCh, stopCh chan // be unsealed again. entry, _ := c.barrier.Get(ctx, poisonPillPath) if entry != nil && len(entry.Value) > 0 { - c.logger.Warn("core: encryption keys have changed out from underneath us (possibly due to replication enabling), must be unsealed again") + c.logger.Warn("encryption keys have changed out from underneath us (possibly due to replication enabling), must be unsealed again") go c.Shutdown() return } if err := c.checkKeyUpgrades(ctx); err != nil { - c.logger.Error("core: key rotation periodic upgrade check failed", "error", err) + c.logger.Error("key rotation periodic upgrade check failed", "error", err) } }() case <-stopCh: @@ -2057,7 +2057,7 @@ func (c *Core) checkKeyUpgrades(ctx context.Context) error { break } if c.logger.IsInfo() { - c.logger.Info("core: upgraded to new key term", "term", newTerm) + c.logger.Info("upgraded to new key term", "term", newTerm) } } return nil @@ -2081,17 +2081,17 @@ func (c *Core) scheduleUpgradeCleanup(ctx context.Context) error { time.AfterFunc(keyRotateGracePeriod, func() { sealed, err := c.barrier.Sealed() if err != nil { - c.logger.Warn("core: failed to check barrier status at upgrade cleanup time") + c.logger.Warn("failed to check barrier status at upgrade cleanup time") return } if sealed { - c.logger.Warn("core: barrier sealed at upgrade cleanup time") + c.logger.Warn("barrier sealed at upgrade cleanup time") return } for _, upgrade := range upgrades { path := fmt.Sprintf("%s%s", keyringUpgradePrefix, upgrade) if err := c.barrier.Delete(ctx, path); err != nil { - c.logger.Error("core: failed to cleanup upgrade", "path", path, "error", err) + c.logger.Error("failed to cleanup upgrade", "path", path, "error", err) } } }) @@ -2128,7 +2128,7 @@ func (c *Core) acquireLock(lock physical.Lock, stopCh <-chan struct{}) <-chan st } // Retry the acquisition - c.logger.Error("core: failed to acquire lock", "error", err) + c.logger.Error("failed to acquire lock", "error", err) select { case <-time.After(lockRetryInterval): case <-stopCh: @@ -2146,7 +2146,7 @@ func (c *Core) advertiseLeader(ctx context.Context, uuid string, leaderLostCh <- case *ecdsa.PrivateKey: key = c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey) default: - c.logger.Error("core: unknown cluster private key type", "key_type", fmt.Sprintf("%T", c.localClusterPrivateKey.Load())) + c.logger.Error("unknown cluster private key type", "key_type", fmt.Sprintf("%T", c.localClusterPrivateKey.Load())) return fmt.Errorf("unknown cluster private key type %T", c.localClusterPrivateKey.Load()) } @@ -2183,7 +2183,7 @@ func (c *Core) advertiseLeader(ctx context.Context, uuid string, leaderLostCh <- if ok { if err := sd.NotifyActiveStateChange(); err != nil { if c.logger.IsWarn() { - c.logger.Warn("core: failed to notify active status", "error", err) + c.logger.Warn("failed to notify active status", "error", err) } } } @@ -2193,7 +2193,7 @@ func (c *Core) advertiseLeader(ctx context.Context, uuid string, leaderLostCh <- func (c *Core) cleanLeaderPrefix(ctx context.Context, uuid string, leaderLostCh <-chan struct{}) { keys, err := c.barrier.List(ctx, coreLeaderPrefix) if err != nil { - c.logger.Error("core: failed to list entries in core/leader", "error", err) + c.logger.Error("failed to list entries in core/leader", "error", err) return } for len(keys) > 0 { @@ -2219,7 +2219,7 @@ func (c *Core) clearLeader(uuid string) error { if ok { if err := sd.NotifyActiveStateChange(); err != nil { if c.logger.IsWarn() { - c.logger.Warn("core: failed to notify standby status", "error", err) + c.logger.Warn("failed to notify standby status", "error", err) } } } diff --git a/vault/core_test.go b/vault/core_test.go index f890df3134..a58fc3da75 100644 --- a/vault/core_test.go +++ b/vault/core_test.go @@ -7,14 +7,14 @@ import ( "time" "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/helper/consts" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/physical" "github.com/hashicorp/vault/physical/inmem" - log "github.com/mgutz/logxi/v1" ) var ( @@ -23,7 +23,7 @@ var ( ) func TestNewCore_badRedirectAddr(t *testing.T) { - logger = logformat.NewVaultLogger(log.LevelTrace) + logger = logging.NewVaultLogger(log.Trace) inm, err := inmem.NewInmem(nil, logger) if err != nil { @@ -1117,7 +1117,7 @@ func TestCore_LimitedUseToken(t *testing.T) { func TestCore_Standby_Seal(t *testing.T) { // Create the first core and initialize it - logger = logformat.NewVaultLogger(log.LevelTrace) + logger = logging.NewVaultLogger(log.Trace) inm, err := inmem.NewInmemHA(nil, logger) if err != nil { @@ -1235,7 +1235,7 @@ func TestCore_Standby_Seal(t *testing.T) { func TestCore_StepDown(t *testing.T) { // Create the first core and initialize it - logger = logformat.NewVaultLogger(log.LevelTrace) + logger = logging.NewVaultLogger(log.Trace) inm, err := inmem.NewInmemHA(nil, logger) if err != nil { @@ -1433,7 +1433,7 @@ func TestCore_StepDown(t *testing.T) { func TestCore_CleanLeaderPrefix(t *testing.T) { // Create the first core and initialize it - logger = logformat.NewVaultLogger(log.LevelTrace) + logger = logging.NewVaultLogger(log.Trace) inm, err := inmem.NewInmemHA(nil, logger) if err != nil { @@ -1602,7 +1602,7 @@ func TestCore_CleanLeaderPrefix(t *testing.T) { } func TestCore_Standby(t *testing.T) { - logger = logformat.NewVaultLogger(log.LevelTrace) + logger = logging.NewVaultLogger(log.Trace) inmha, err := inmem.NewInmemHA(nil, logger) if err != nil { @@ -1613,7 +1613,7 @@ func TestCore_Standby(t *testing.T) { } func TestCore_Standby_SeparateHA(t *testing.T) { - logger = logformat.NewVaultLogger(log.LevelTrace) + logger = logging.NewVaultLogger(log.Trace) inmha, err := inmem.NewInmemHA(nil, logger) if err != nil { @@ -2192,7 +2192,7 @@ func TestCore_HandleRequest_MountPointType(t *testing.T) { func TestCore_Standby_Rotate(t *testing.T) { // Create the first core and initialize it - logger = logformat.NewVaultLogger(log.LevelTrace) + logger = logging.NewVaultLogger(log.Trace) inm, err := inmem.NewInmemHA(nil, logger) if err != nil { diff --git a/vault/dynamic_system_view.go b/vault/dynamic_system_view.go index a1b3052bc6..e54e9f005e 100644 --- a/vault/dynamic_system_view.go +++ b/vault/dynamic_system_view.go @@ -32,7 +32,7 @@ func (d dynamicSystemView) SudoPrivilege(ctx context.Context, path string, token // Resolve the token policy te, err := d.core.tokenStore.Lookup(ctx, token) if err != nil { - d.core.logger.Error("core: failed to lookup token", "error", err) + d.core.logger.Error("failed to lookup token", "error", err) return false } diff --git a/vault/expiration.go b/vault/expiration.go index dcb8f0f1b4..d9b0f76405 100644 --- a/vault/expiration.go +++ b/vault/expiration.go @@ -12,7 +12,7 @@ import ( "time" "github.com/armon/go-metrics" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/errwrap" multierror "github.com/hashicorp/go-multierror" @@ -80,13 +80,13 @@ type ExpirationManager struct { // NewExpirationManager creates a new ExpirationManager that is backed // using a given view, and uses the provided router for revocation. -func NewExpirationManager(c *Core, view *BarrierView) *ExpirationManager { +func NewExpirationManager(c *Core, view *BarrierView, logger log.Logger) *ExpirationManager { exp := &ExpirationManager{ router: c.router, idView: view.SubView(leaseViewPrefix), tokenView: view.SubView(tokenViewPrefix), tokenStore: c.tokenStore, - logger: c.logger, + logger: logger, pending: make(map[string]*time.Timer), // new instances of the expiration manager will go immediately into @@ -101,7 +101,8 @@ func NewExpirationManager(c *Core, view *BarrierView) *ExpirationManager { } if exp.logger == nil { - exp.logger = log.New("expiration_manager") + opts := log.LoggerOptions{Name: "expiration_manager"} + exp.logger = log.New(&opts) } return exp @@ -116,18 +117,18 @@ func (c *Core) setupExpiration() error { view := c.systemBarrierView.SubView(expirationSubPath) // Create the manager - mgr := NewExpirationManager(c, view) + mgr := NewExpirationManager(c, view, c.logger.ResetNamed("expiration")) c.expiration = mgr // Link the token store to this c.tokenStore.SetExpirationManager(mgr) // Restore the existing state - c.logger.Info("expiration: restoring leases") + c.logger.Info("restoring leases") errorFunc := func() { - c.logger.Error("expiration: shutting down") + c.logger.Error("shutting down") if err := c.Shutdown(); err != nil { - c.logger.Error("expiration: error shutting down core: %v", err) + c.logger.Error("error shutting down core: %v", err) } } go c.expiration.Restore(errorFunc) @@ -178,14 +179,14 @@ func (m *ExpirationManager) Tidy() error { var tidyErrors *multierror.Error if !atomic.CompareAndSwapInt32(&m.tidyLock, 0, 1) { - m.logger.Warn("expiration: tidy operation on leases is already in progress") + m.logger.Warn("tidy operation on leases is already in progress") return fmt.Errorf("tidy operation on leases is already in progress") } defer atomic.CompareAndSwapInt32(&m.tidyLock, 1, 0) - m.logger.Info("expiration: beginning tidy operation on leases") - defer m.logger.Info("expiration: finished tidy operation on leases") + m.logger.Info("beginning tidy operation on leases") + defer m.logger.Info("finished tidy operation on leases") // Create a cache to keep track of looked up tokens tokenCache := make(map[string]bool) @@ -194,7 +195,7 @@ func (m *ExpirationManager) Tidy() error { tidyFunc := func(leaseID string) { countLease++ if countLease%500 == 0 { - m.logger.Info("expiration: tidying leases", "progress", countLease) + m.logger.Info("tidying leases", "progress", countLease) } le, err := m.loadEntry(leaseID) @@ -211,7 +212,7 @@ func (m *ExpirationManager) Tidy() error { var isValid, ok bool revokeLease := false if le.ClientToken == "" { - m.logger.Trace("expiration: revoking lease which has an empty token", "lease_id", leaseID) + m.logger.Debug("revoking lease which has an empty token", "lease_id", leaseID) revokeLease = true deletedCountEmptyToken++ goto REVOKE_CHECK @@ -235,7 +236,7 @@ func (m *ExpirationManager) Tidy() error { } if te == nil { - m.logger.Trace("expiration: revoking lease which holds an invalid token", "lease_id", leaseID) + m.logger.Debug("revoking lease which holds an invalid token", "lease_id", leaseID) revokeLease = true deletedCountInvalidToken++ tokenCache[le.ClientToken] = false @@ -248,7 +249,7 @@ func (m *ExpirationManager) Tidy() error { return } - m.logger.Trace("expiration: revoking lease which contains an invalid token", "lease_id", leaseID) + m.logger.Debug("revoking lease which contains an invalid token", "lease_id", leaseID) revokeLease = true deletedCountInvalidToken++ goto REVOKE_CHECK @@ -271,10 +272,10 @@ func (m *ExpirationManager) Tidy() error { return err } - m.logger.Debug("expiration: number of leases scanned", "count", countLease) - m.logger.Debug("expiration: number of leases which had empty tokens", "count", deletedCountEmptyToken) - m.logger.Debug("expiration: number of leases which had invalid tokens", "count", deletedCountInvalidToken) - m.logger.Debug("expiration: number of leases successfully revoked", "count", revokedCount) + m.logger.Info("number of leases scanned", "count", countLease) + m.logger.Info("number of leases which had empty tokens", "count", deletedCountEmptyToken) + m.logger.Info("number of leases which had invalid tokens", "count", deletedCountInvalidToken) + m.logger.Info("number of leases successfully revoked", "count", revokedCount) return tidyErrors.ErrorOrNil() } @@ -293,10 +294,10 @@ func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) { case retErr == nil: case errwrap.Contains(retErr, ErrBarrierSealed.Error()): // Don't run error func because we're likely already shutting down - m.logger.Warn("expiration: barrier sealed while restoring leases, stopping lease loading") + m.logger.Warn("barrier sealed while restoring leases, stopping lease loading") retErr = nil default: - m.logger.Error("expiration: error restoring leases", "error", retErr) + m.logger.Error("error restoring leases", "error", retErr) if errorFunc != nil { errorFunc() } @@ -304,12 +305,12 @@ func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) { }() // Accumulate existing leases - m.logger.Debug("expiration: collecting leases") + m.logger.Debug("collecting leases") existing, err := logical.CollectKeys(m.quitContext, m.idView) if err != nil { return errwrap.Wrapf("failed to scan for leases: {{err}}", err) } - m.logger.Debug("expiration: leases collected", "num_existing", len(existing)) + m.logger.Debug("leases collected", "num_existing", len(existing)) // Make the channels used for the worker pool broker := make(chan string) @@ -361,7 +362,7 @@ func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) { defer wg.Done() for i, leaseID := range existing { if i > 0 && i%500 == 0 { - m.logger.Trace("expiration: leases loading", "progress", i) + m.logger.Debug("leases loading", "progress", i) } select { @@ -405,7 +406,7 @@ func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) { atomic.StoreInt32(&m.restoreMode, 0) m.restoreModeLock.Unlock() - m.logger.Info("expiration: lease restore complete") + m.logger.Info("lease restore complete") return nil } @@ -440,8 +441,8 @@ func (m *ExpirationManager) processRestore(leaseID string) error { // This must be called before sealing the view. func (m *ExpirationManager) Stop() error { // Stop all the pending expiration timers - m.logger.Debug("expiration: stop triggered") - defer m.logger.Debug("expiration: finished stopping") + m.logger.Debug("stop triggered") + defer m.logger.Debug("finished stopping") // Do this before stopping pending timers to avoid potential races with // expiring timers @@ -523,7 +524,7 @@ func (m *ExpirationManager) revokeCommon(leaseID string, force, skipToken bool) m.pendingLock.Unlock() if m.logger.IsInfo() { - m.logger.Info("expiration: revoked lease", "lease_id", leaseID) + m.logger.Info("revoked lease", "lease_id", leaseID) } return nil @@ -1017,14 +1018,14 @@ func (m *ExpirationManager) expireID(leaseID string) { for attempt := uint(0); attempt < maxRevokeAttempts; attempt++ { select { case <-m.quitCh: - m.logger.Error("expiration: shutting down, not attempting further revocation of lease", "lease_id", leaseID) + m.logger.Error("shutting down, not attempting further revocation of lease", "lease_id", leaseID) return default: } m.coreStateLock.RLock() if m.quitContext.Err() == context.Canceled { - m.logger.Error("expiration: core context canceled, not attempting further revocation of lease", "lease_id", leaseID) + m.logger.Error("core context canceled, not attempting further revocation of lease", "lease_id", leaseID) m.coreStateLock.RUnlock() return } @@ -1036,10 +1037,10 @@ func (m *ExpirationManager) expireID(leaseID string) { } m.coreStateLock.RUnlock() - m.logger.Error("expiration: failed to revoke lease", "lease_id", leaseID, "error", err) + m.logger.Error("failed to revoke lease", "lease_id", leaseID, "error", err) time.Sleep((1 << attempt) * revokeRetryBase) } - m.logger.Error("expiration: maximum revoke attempts reached", "lease_id", leaseID) + m.logger.Error("maximum revoke attempts reached", "lease_id", leaseID) } // revokeEntry is used to attempt revocation of an internal entry @@ -1279,7 +1280,7 @@ func (m *ExpirationManager) emitMetrics() { // Check if lease count is greater than the threshold if num > maxLeaseThreshold { if atomic.LoadUint32(&m.leaseCheckCounter) > 59 { - m.logger.Warn("expiration: lease count exceeds warning lease threshold") + m.logger.Warn("lease count exceeds warning lease threshold") atomic.StoreUint32(&m.leaseCheckCounter, 0) } else { atomic.AddUint32(&m.leaseCheckCounter, 1) diff --git a/vault/expiration_test.go b/vault/expiration_test.go index 3f336f262a..fbe7d08683 100644 --- a/vault/expiration_test.go +++ b/vault/expiration_test.go @@ -10,13 +10,13 @@ import ( "testing" "time" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" "github.com/hashicorp/vault/physical" "github.com/hashicorp/vault/physical/inmem" - log "github.com/mgutz/logxi/v1" ) var ( @@ -252,7 +252,7 @@ func BenchmarkExpiration_Restore_Etcd(b *testing.B) { addr := os.Getenv("PHYSICAL_BACKEND_BENCHMARK_ADDR") randPath := fmt.Sprintf("vault-%d/", time.Now().Unix()) - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Trace) physicalBackend, err := physEtcd.NewEtcdBackend(map[string]string{ "address": addr, "path": randPath, @@ -269,7 +269,7 @@ func BenchmarkExpiration_Restore_Consul(b *testing.B) { addr := os.Getenv("PHYSICAL_BACKEND_BENCHMARK_ADDR") randPath := fmt.Sprintf("vault-%d/", time.Now().Unix()) - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Trace) physicalBackend, err := physConsul.NewConsulBackend(map[string]string{ "address": addr, "path": randPath, @@ -284,7 +284,7 @@ func BenchmarkExpiration_Restore_Consul(b *testing.B) { */ func BenchmarkExpiration_Restore_InMem(b *testing.B) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Trace) inm, err := inmem.NewInmem(nil, logger) if err != nil { b.Fatal(err) diff --git a/vault/generate_root.go b/vault/generate_root.go index e611b21bf2..8df81be132 100644 --- a/vault/generate_root.go +++ b/vault/generate_root.go @@ -34,11 +34,11 @@ type generateStandardRootToken struct{} func (g generateStandardRootToken) generate(ctx context.Context, c *Core) (string, func(), error) { te, err := c.tokenStore.rootToken(ctx) if err != nil { - c.logger.Error("core: root token generation failed", "error", err) + c.logger.Error("root token generation failed", "error", err) return "", nil, err } if te == nil { - c.logger.Error("core: got nil token entry back from root generation") + c.logger.Error("got nil token entry back from root generation") return "", nil, fmt.Errorf("got nil token entry back from root generation") } @@ -170,7 +170,7 @@ func (c *Core) GenerateRootInit(otp, pgpKey string, strategy GenerateRootStrateg } if c.logger.IsInfo() { - c.logger.Info("core: root generation initialized", "nonce", c.generateRootConfig.Nonce) + c.logger.Info("root generation initialized", "nonce", c.generateRootConfig.Nonce) } return nil } @@ -247,7 +247,7 @@ func (c *Core) GenerateRootUpdate(ctx context.Context, key []byte, nonce string, // Check if we don't have enough keys to unlock if len(c.generateRootProgress) < config.SecretThreshold { if c.logger.IsDebug() { - c.logger.Debug("core: cannot generate root, not enough keys", "keys", progress, "threshold", config.SecretThreshold) + c.logger.Debug("cannot generate root, not enough keys", "keys", progress, "threshold", config.SecretThreshold) } return &GenerateRootResult{ Progress: progress, @@ -272,12 +272,12 @@ func (c *Core) GenerateRootUpdate(ctx context.Context, key []byte, nonce string, // Verify the master key if c.seal.RecoveryKeySupported() { if err := c.seal.VerifyRecoveryKey(ctx, masterKey); err != nil { - c.logger.Error("core: root generation aborted, recovery key verification failed", "error", err) + c.logger.Error("root generation aborted, recovery key verification failed", "error", err) return nil, err } } else { if err := c.barrier.VerifyMaster(masterKey); err != nil { - c.logger.Error("core: root generation aborted, master key verification failed", "error", err) + c.logger.Error("root generation aborted, master key verification failed", "error", err) return nil, err } } @@ -291,12 +291,12 @@ func (c *Core) GenerateRootUpdate(ctx context.Context, key []byte, nonce string, uuidBytes, err := uuid.ParseUUID(tokenUUID) if err != nil { cleanupFunc() - c.logger.Error("core: error getting generated token bytes", "error", err) + c.logger.Error("error getting generated token bytes", "error", err) return nil, err } if uuidBytes == nil { cleanupFunc() - c.logger.Error("core: got nil parsed UUID bytes") + c.logger.Error("got nil parsed UUID bytes") return nil, fmt.Errorf("got nil parsed UUID bytes") } @@ -310,7 +310,7 @@ func (c *Core) GenerateRootUpdate(ctx context.Context, key []byte, nonce string, tokenBytes, err = xor.XORBase64(c.generateRootConfig.OTP, base64.StdEncoding.EncodeToString(uuidBytes)) if err != nil { cleanupFunc() - c.logger.Error("core: xor of root token failed", "error", err) + c.logger.Error("xor of root token failed", "error", err) return nil, err } @@ -318,7 +318,7 @@ func (c *Core) GenerateRootUpdate(ctx context.Context, key []byte, nonce string, _, tokenBytesArr, err := pgpkeys.EncryptShares([][]byte{[]byte(tokenUUID)}, []string{c.generateRootConfig.PGPKey}) if err != nil { cleanupFunc() - c.logger.Error("core: error encrypting new root token", "error", err) + c.logger.Error("error encrypting new root token", "error", err) return nil, err } tokenBytes = tokenBytesArr[0] @@ -336,7 +336,7 @@ func (c *Core) GenerateRootUpdate(ctx context.Context, key []byte, nonce string, } if c.logger.IsInfo() { - c.logger.Info("core: root generation finished", "nonce", c.generateRootConfig.Nonce) + c.logger.Info("root generation finished", "nonce", c.generateRootConfig.Nonce) } c.generateRootProgress = nil diff --git a/vault/identity_store.go b/vault/identity_store.go index 60cd0262fb..63e29f33ea 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/golang/protobuf/ptypes" + log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/helper/locksutil" @@ -23,7 +24,7 @@ func (c *Core) IdentityStore() *IdentityStore { } // NewIdentityStore creates a new identity store -func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendConfig) (*IdentityStore, error) { +func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendConfig, logger log.Logger) (*IdentityStore, error) { var err error // Create a new in-memory database for the identity store @@ -36,7 +37,7 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo view: config.StorageView, db: db, entityLocks: locksutil.CreateLocks(), - logger: core.logger, + logger: logger, validateMountAccessorFunc: core.router.validateMountByAccessor, } @@ -76,7 +77,7 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo // storage entries that get updated. The value needs to be read and MemDB needs // to be updated accordingly. func (i *IdentityStore) Invalidate(ctx context.Context, key string) { - i.logger.Debug("identity: invalidate notification received", "key", key) + i.logger.Debug("invalidate notification received", "key", key) switch { // Check if the key is a storage entry key for an entity bucket @@ -326,7 +327,7 @@ func (i *IdentityStore) CreateOrFetchEntity(alias *logical.Alias) (*identity.Ent return entity, nil } - i.logger.Debug("identity: creating a new entity", "alias", alias) + i.logger.Debug("creating a new entity", "alias", alias) entity = &identity.Entity{} diff --git a/vault/identity_store_structs.go b/vault/identity_store_structs.go index 067fdcdf9d..a6b4430fb6 100644 --- a/vault/identity_store_structs.go +++ b/vault/identity_store_structs.go @@ -4,13 +4,13 @@ import ( "regexp" "sync" + log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/helper/locksutil" "github.com/hashicorp/vault/helper/storagepacker" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" - log "github.com/mgutz/logxi/v1" ) const ( diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index fe3f9d9b91..6c41465cb1 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -42,7 +42,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to scan for groups: %v", err) } - i.logger.Debug("identity: groups collected", "num_existing", len(existing)) + i.logger.Debug("groups collected", "num_existing", len(existing)) i.groupLock.Lock() defer i.groupLock.Unlock() @@ -66,8 +66,8 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { continue } - if i.logger.IsTrace() { - i.logger.Trace("loading group", "name", group.Name, "id", group.ID) + if i.logger.IsDebug() { + i.logger.Debug("loading group", "name", group.Name, "id", group.ID) } txn := i.db.Txn(true) @@ -83,7 +83,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { } if i.logger.IsInfo() { - i.logger.Info("identity: groups restored") + i.logger.Info("groups restored") } return nil @@ -91,12 +91,12 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { func (i *IdentityStore) loadEntities(ctx context.Context) error { // Accumulate existing entities - i.logger.Debug("identity: loading entities") + i.logger.Debug("loading entities") existing, err := i.entityPacker.View().List(ctx, storagepacker.StoragePackerBucketsPrefix) if err != nil { return fmt.Errorf("failed to scan for entities: %v", err) } - i.logger.Debug("identity: entities collected", "num_existing", len(existing)) + i.logger.Debug("entities collected", "num_existing", len(existing)) // Make the channels used for the worker pool broker := make(chan string) @@ -146,7 +146,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { defer wg.Done() for j, bucketKey := range existing { if j%500 == 0 { - i.logger.Trace("identity: entities loading", "progress", j) + i.logger.Debug("entities loading", "progress", j) } select { @@ -200,7 +200,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { wg.Wait() if i.logger.IsInfo() { - i.logger.Info("identity: entities restored") + i.logger.Info("entities restored") } return nil diff --git a/vault/init.go b/vault/init.go index 3a26ea6906..837531d23c 100644 --- a/vault/init.go +++ b/vault/init.go @@ -31,11 +31,11 @@ func (c *Core) Initialized(ctx context.Context) (bool, error) { // Check the barrier first init, err := c.barrier.Initialized(ctx) if err != nil { - c.logger.Error("core: barrier init check failed", "error", err) + c.logger.Error("barrier init check failed", "error", err) return false, err } if !init { - c.logger.Info("core: security barrier not initialized") + c.logger.Info("security barrier not initialized") return false, nil } @@ -104,14 +104,14 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes // Check if the seal configuration is valid if err := recoveryConfig.Validate(); err != nil { - c.logger.Error("core: invalid recovery configuration", "error", err) + c.logger.Error("invalid recovery configuration", "error", err) return nil, fmt.Errorf("invalid recovery configuration: %v", err) } } // Check if the seal configuration is valid if err := barrierConfig.Validate(); err != nil { - c.logger.Error("core: invalid seal configuration", "error", err) + c.logger.Error("invalid seal configuration", "error", err) return nil, fmt.Errorf("invalid seal configuration: %v", err) } @@ -130,28 +130,28 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes err = c.seal.Init(ctx) if err != nil { - c.logger.Error("core: failed to initialize seal", "error", err) + c.logger.Error("failed to initialize seal", "error", err) return nil, fmt.Errorf("error initializing seal: %v", err) } barrierKey, barrierUnsealKeys, err := c.generateShares(barrierConfig) if err != nil { - c.logger.Error("core: error generating shares", "error", err) + c.logger.Error("error generating shares", "error", err) return nil, err } // Initialize the barrier if err := c.barrier.Initialize(ctx, barrierKey); err != nil { - c.logger.Error("core: failed to initialize barrier", "error", err) + c.logger.Error("failed to initialize barrier", "error", err) return nil, fmt.Errorf("failed to initialize barrier: %v", err) } if c.logger.IsInfo() { - c.logger.Info("core: security barrier initialized", "shares", barrierConfig.SecretShares, "threshold", barrierConfig.SecretThreshold) + c.logger.Info("security barrier initialized", "shares", barrierConfig.SecretShares, "threshold", barrierConfig.SecretThreshold) } // Unseal the barrier if err := c.barrier.Unseal(ctx, barrierKey); err != nil { - c.logger.Error("core: failed to unseal barrier", "error", err) + c.logger.Error("failed to unseal barrier", "error", err) return nil, fmt.Errorf("failed to unseal barrier: %v", err) } @@ -161,13 +161,13 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes // happens before sealing. preSeal also stops, so we just make the // stopping safe against multiple calls. if err := c.barrier.Seal(); err != nil { - c.logger.Error("core: failed to seal barrier", "error", err) + c.logger.Error("failed to seal barrier", "error", err) } }() err = c.seal.SetBarrierConfig(ctx, barrierConfig) if err != nil { - c.logger.Error("core: failed to save barrier configuration", "error", err) + c.logger.Error("failed to save barrier configuration", "error", err) return nil, fmt.Errorf("barrier configuration saving failed: %v", err) } @@ -180,7 +180,7 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes barrierUnsealKeys = barrierUnsealKeys[1:] } if err := c.seal.SetStoredKeys(ctx, keysToStore); err != nil { - c.logger.Error("core: failed to store keys", "error", err) + c.logger.Error("failed to store keys", "error", err) return nil, fmt.Errorf("failed to store keys: %v", err) } } @@ -191,11 +191,11 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes // Perform initial setup if err := c.setupCluster(ctx); err != nil { - c.logger.Error("core: cluster setup failed during init", "error", err) + c.logger.Error("cluster setup failed during init", "error", err) return nil, err } if err := c.postUnseal(); err != nil { - c.logger.Error("core: post-unseal setup failed during init", "error", err) + c.logger.Error("post-unseal setup failed during init", "error", err) return nil, err } @@ -205,14 +205,14 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes if c.seal.RecoveryKeySupported() { err = c.seal.SetRecoveryConfig(ctx, recoveryConfig) if err != nil { - c.logger.Error("core: failed to save recovery configuration", "error", err) + c.logger.Error("failed to save recovery configuration", "error", err) return nil, fmt.Errorf("recovery configuration saving failed: %v", err) } if recoveryConfig.SecretShares > 0 { recoveryKey, recoveryUnsealKeys, err := c.generateShares(recoveryConfig) if err != nil { - c.logger.Error("core: failed to generate recovery shares", "error", err) + c.logger.Error("failed to generate recovery shares", "error", err) return nil, err } @@ -228,16 +228,16 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes // Generate a new root token rootToken, err := c.tokenStore.rootToken(ctx) if err != nil { - c.logger.Error("core: root token generation failed", "error", err) + c.logger.Error("root token generation failed", "error", err) return nil, err } results.RootToken = rootToken.ID - c.logger.Info("core: root token generated") + c.logger.Info("root token generated") if initParams.RootTokenPGPKey != "" { _, encryptedVals, err := pgpkeys.EncryptShares([][]byte{[]byte(results.RootToken)}, []string{initParams.RootTokenPGPKey}) if err != nil { - c.logger.Error("core: root token encryption failed", "error", err) + c.logger.Error("root token encryption failed", "error", err) return nil, err } results.RootToken = base64.StdEncoding.EncodeToString(encryptedVals[0]) @@ -245,7 +245,7 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes // Prepare to re-seal if err := c.preSeal(); err != nil { - c.logger.Error("core: pre-seal teardown failed", "error", err) + c.logger.Error("pre-seal teardown failed", "error", err) return nil, err } @@ -260,28 +260,28 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error { sealed, err := c.Sealed() if err != nil { - c.logger.Error("core: error checking sealed status in auto-unseal", "error", err) + c.logger.Error("error checking sealed status in auto-unseal", "error", err) return fmt.Errorf("error checking sealed status in auto-unseal: %s", err) } if !sealed { return nil } - c.logger.Info("core: stored unseal keys supported, attempting fetch") + c.logger.Info("stored unseal keys supported, attempting fetch") keys, err := c.seal.GetStoredKeys(ctx) if err != nil { - c.logger.Error("core: fetching stored unseal keys failed", "error", err) + c.logger.Error("fetching stored unseal keys failed", "error", err) return &NonFatalError{Err: fmt.Errorf("fetching stored unseal keys failed: %v", err)} } if len(keys) == 0 { - c.logger.Warn("core: stored unseal key(s) supported but none found") + c.logger.Warn("stored unseal key(s) supported but none found") } else { unsealed := false keysUsed := 0 for _, key := range keys { unsealed, err = c.Unseal(key) if err != nil { - c.logger.Error("core: unseal with stored unseal key failed", "error", err) + c.logger.Error("unseal with stored unseal key failed", "error", err) return &NonFatalError{Err: fmt.Errorf("unseal with stored key failed: %v", err)} } keysUsed += 1 @@ -291,11 +291,11 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error { } if !unsealed { if c.logger.IsWarn() { - c.logger.Warn("core: stored unseal key(s) used but Vault not unsealed yet", "stored_keys_used", keysUsed) + c.logger.Warn("stored unseal key(s) used but Vault not unsealed yet", "stored_keys_used", keysUsed) } } else { if c.logger.IsInfo() { - c.logger.Info("core: successfully unsealed with stored key(s)", "stored_keys_used", keysUsed) + c.logger.Info("successfully unsealed with stored key(s)", "stored_keys_used", keysUsed) } } } diff --git a/vault/init_test.go b/vault/init_test.go index 51dace3336..8ed537da1a 100644 --- a/vault/init_test.go +++ b/vault/init_test.go @@ -5,9 +5,9 @@ import ( "reflect" "testing" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/physical/inmem" ) @@ -22,7 +22,7 @@ func TestCore_Init(t *testing.T) { } func testCore_NewTestCore(t *testing.T, seal Seal) (*Core, *CoreConfig) { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Trace) inm, err := inmem.NewInmem(nil, logger) if err != nil { diff --git a/vault/logical_system.go b/vault/logical_system.go index 1d04ebca7c..9d2772f80d 100644 --- a/vault/logical_system.go +++ b/vault/logical_system.go @@ -17,6 +17,7 @@ import ( "time" "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/compressutil" "github.com/hashicorp/vault/helper/consts" @@ -24,7 +25,6 @@ import ( "github.com/hashicorp/vault/helper/wrapping" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" - log "github.com/mgutz/logxi/v1" "github.com/mitchellh/mapstructure" ) @@ -55,10 +55,10 @@ var ( } ) -func NewSystemBackend(core *Core) *SystemBackend { +func NewSystemBackend(core *Core, logger log.Logger) *SystemBackend { b := &SystemBackend{ Core: core, - logger: core.logger, + logger: logger, } b.Backend = &framework.Backend{ @@ -1116,7 +1116,7 @@ func (b *SystemBackend) handleCORSDelete(ctx context.Context, req *logical.Reque func (b *SystemBackend) handleTidyLeases(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { err := b.Core.expiration.Tidy() if err != nil { - b.Backend.Logger().Error("sys: failed to tidy leases", "error", err) + b.Backend.Logger().Error("failed to tidy leases", "error", err) return handleErrorNoReadOnlyForward(err) } return nil, err @@ -1125,7 +1125,7 @@ func (b *SystemBackend) handleTidyLeases(ctx context.Context, req *logical.Reque func (b *SystemBackend) invalidate(ctx context.Context, key string) { /* if b.Core.logger.IsTrace() { - b.Core.logger.Trace("sys: invalidating key", "key", key) + b.Core.logger.Trace("invalidating key", "key", key) } */ switch { @@ -1642,7 +1642,7 @@ func (b *SystemBackend) handleMount(ctx context.Context, req *logical.Request, d // Attempt mount if err := b.Core.mount(ctx, me); err != nil { - b.Backend.Logger().Error("sys: mount failed", "path", me.Path, "error", err) + b.Backend.Logger().Error("mount failed", "path", me.Path, "error", err) return handleError(err) } @@ -1698,7 +1698,7 @@ func (b *SystemBackend) handleUnmount(ctx context.Context, req *logical.Request, // Attempt unmount if err := b.Core.unmount(ctx, path); err != nil { - b.Backend.Logger().Error("sys: unmount failed", "path", path, "error", err) + b.Backend.Logger().Error("unmount failed", "path", path, "error", err) return handleError(err) } @@ -1728,7 +1728,7 @@ func (b *SystemBackend) handleRemount(ctx context.Context, req *logical.Request, // Attempt remount if err := b.Core.remount(ctx, fromPath, toPath); err != nil { - b.Backend.Logger().Error("sys: remount failed", "from_path", fromPath, "to_path", toPath, "error", err) + b.Backend.Logger().Error("remount failed", "from_path", fromPath, "to_path", toPath, "error", err) return handleError(err) } @@ -1767,13 +1767,13 @@ func (b *SystemBackend) handleTuneReadCommon(path string) (*logical.Response, er sysView := b.Core.router.MatchingSystemView(path) if sysView == nil { - b.Backend.Logger().Error("sys: cannot fetch sysview", "path", path) + b.Backend.Logger().Error("cannot fetch sysview", "path", path) return handleError(fmt.Errorf("sys: cannot fetch sysview for path %s", path)) } mountEntry := b.Core.router.MatchingMountEntry(path) if mountEntry == nil { - b.Backend.Logger().Error("sys: cannot fetch mount entry", "path", path) + b.Backend.Logger().Error("cannot fetch mount entry", "path", path) return handleError(fmt.Errorf("sys: cannot fetch mount entry for path %s", path)) } @@ -1840,14 +1840,14 @@ func (b *SystemBackend) handleTuneWriteCommon(ctx context.Context, path string, // Prevent protected paths from being changed for _, p := range untunableMounts { if strings.HasPrefix(path, p) { - b.Backend.Logger().Error("sys: cannot tune this mount", "path", path) + b.Backend.Logger().Error("cannot tune this mount", "path", path) return handleError(fmt.Errorf("sys: cannot tune '%s'", path)) } } mountEntry := b.Core.router.MatchingMountEntry(path) if mountEntry == nil { - b.Backend.Logger().Error("sys: tune failed: no mount entry found", "path", path) + b.Backend.Logger().Error("tune failed: no mount entry found", "path", path) return handleError(fmt.Errorf("sys: tune of path '%s' failed: no mount entry found", path)) } if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { @@ -1868,7 +1868,7 @@ func (b *SystemBackend) handleTuneWriteCommon(ctx context.Context, path string, // Check again after grabbing the lock mountEntry = b.Core.router.MatchingMountEntry(path) if mountEntry == nil { - b.Backend.Logger().Error("sys: tune failed: no mount entry found", "path", path) + b.Backend.Logger().Error("tune failed: no mount entry found", "path", path) return handleError(fmt.Errorf("sys: tune of path '%s' failed: no mount entry found", path)) } if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) { @@ -1910,7 +1910,7 @@ func (b *SystemBackend) handleTuneWriteCommon(ctx context.Context, path string, newMax != mountEntry.Config.MaxLeaseTTL { if err := b.tuneMountTTLs(ctx, path, mountEntry, newDefault, newMax); err != nil { - b.Backend.Logger().Error("sys: tuning failed", "path", path, "error", err) + b.Backend.Logger().Error("tuning failed", "path", path, "error", err) return handleError(err) } } @@ -2109,7 +2109,7 @@ func (b *SystemBackend) handleLeaseLookup(ctx context.Context, req *logical.Requ leaseTimes, err := b.Core.expiration.FetchLeaseTimes(leaseID) if err != nil { - b.Backend.Logger().Error("sys: error retrieving lease", "lease_id", leaseID, "error", err) + b.Backend.Logger().Error("error retrieving lease", "lease_id", leaseID, "error", err) return handleError(err) } if leaseTimes == nil { @@ -2146,7 +2146,7 @@ func (b *SystemBackend) handleLeaseLookupList(ctx context.Context, req *logical. keys, err := b.Core.expiration.idView.List(ctx, prefix) if err != nil { - b.Backend.Logger().Error("sys: error listing leases", "prefix", prefix, "error", err) + b.Backend.Logger().Error("error listing leases", "prefix", prefix, "error", err) return handleErrorNoReadOnlyForward(err) } return logical.ListResponse(keys), nil @@ -2171,7 +2171,7 @@ func (b *SystemBackend) handleRenew(ctx context.Context, req *logical.Request, d // Invoke the expiration manager directly resp, err := b.Core.expiration.Renew(leaseID, increment) if err != nil { - b.Backend.Logger().Error("sys: lease renewal failed", "lease_id", leaseID, "error", err) + b.Backend.Logger().Error("lease renewal failed", "lease_id", leaseID, "error", err) return handleErrorNoReadOnlyForward(err) } return resp, err @@ -2191,7 +2191,7 @@ func (b *SystemBackend) handleRevoke(ctx context.Context, req *logical.Request, // Invoke the expiration manager directly if err := b.Core.expiration.Revoke(leaseID); err != nil { - b.Backend.Logger().Error("sys: lease revocation failed", "lease_id", leaseID, "error", err) + b.Backend.Logger().Error("lease revocation failed", "lease_id", leaseID, "error", err) return handleErrorNoReadOnlyForward(err) } return nil, nil @@ -2221,7 +2221,7 @@ func (b *SystemBackend) handleRevokePrefixCommon( err = b.Core.expiration.RevokePrefix(prefix) } if err != nil { - b.Backend.Logger().Error("sys: revoke prefix failed", "prefix", prefix, "error", err) + b.Backend.Logger().Error("revoke prefix failed", "prefix", prefix, "error", err) return handleErrorNoReadOnlyForward(err) } return nil, nil @@ -2388,7 +2388,7 @@ func (b *SystemBackend) handleEnableAuth(ctx context.Context, req *logical.Reque // Attempt enabling if err := b.Core.enableCredential(ctx, me); err != nil { - b.Backend.Logger().Error("sys: enable auth mount failed", "path", me.Path, "error", err) + b.Backend.Logger().Error("enable auth mount failed", "path", me.Path, "error", err) return handleError(err) } return nil, nil @@ -2416,7 +2416,7 @@ func (b *SystemBackend) handleDisableAuth(ctx context.Context, req *logical.Requ // Attempt disable if err := b.Core.disableCredential(ctx, path); err != nil { - b.Backend.Logger().Error("sys: disable auth mount failed", "path", path, "error", err) + b.Backend.Logger().Error("disable auth mount failed", "path", path, "error", err) return handleError(err) } return nil, nil @@ -2672,7 +2672,7 @@ func (b *SystemBackend) handleEnableAudit(ctx context.Context, req *logical.Requ // Attempt enabling if err := b.Core.enableAudit(ctx, me); err != nil { - b.Backend.Logger().Error("sys: enable audit mount failed", "path", me.Path, "error", err) + b.Backend.Logger().Error("enable audit mount failed", "path", me.Path, "error", err) return handleError(err) } return nil, nil @@ -2684,7 +2684,7 @@ func (b *SystemBackend) handleDisableAudit(ctx context.Context, req *logical.Req // Attempt disable if existed, err := b.Core.disableAudit(ctx, path); existed && err != nil { - b.Backend.Logger().Error("sys: disable audit mount failed", "path", path, "error", err) + b.Backend.Logger().Error("disable audit mount failed", "path", path, "error", err) return handleError(err) } return nil, nil @@ -2822,22 +2822,22 @@ func (b *SystemBackend) handleRotate(ctx context.Context, req *logical.Request, // Rotate to the new term newTerm, err := b.Core.barrier.Rotate(ctx) if err != nil { - b.Backend.Logger().Error("sys: failed to create new encryption key", "error", err) + b.Backend.Logger().Error("failed to create new encryption key", "error", err) return handleError(err) } - b.Backend.Logger().Info("sys: installed new encryption key") + b.Backend.Logger().Info("installed new encryption key") // In HA mode, we need to an upgrade path for the standby instances if b.Core.ha != nil { // Create the upgrade path to the new term if err := b.Core.barrier.CreateUpgrade(ctx, newTerm); err != nil { - b.Backend.Logger().Error("sys: failed to create new upgrade", "term", newTerm, "error", err) + b.Backend.Logger().Error("failed to create new upgrade", "term", newTerm, "error", err) } // Schedule the destroy of the upgrade path time.AfterFunc(keyRotateGracePeriod, func() { if err := b.Core.barrier.DestroyUpgrade(ctx, newTerm); err != nil { - b.Backend.Logger().Error("sys: failed to destroy upgrade", "term", newTerm, "error", err) + b.Backend.Logger().Error("failed to destroy upgrade", "term", newTerm, "error", err) } }) } diff --git a/vault/logical_system_helpers.go b/vault/logical_system_helpers.go index 56052de1f1..b0ba8602bb 100644 --- a/vault/logical_system_helpers.go +++ b/vault/logical_system_helpers.go @@ -48,7 +48,7 @@ func (b *SystemBackend) tuneMountTTLs(ctx context.Context, path string, me *Moun return fmt.Errorf("failed to update mount table, rolling back TTL changes") } if b.Core.logger.IsInfo() { - b.Core.logger.Info("core: mount tuning of leases successful", "path", path) + b.Core.logger.Info("mount tuning of leases successful", "path", path) } return nil diff --git a/vault/logical_system_test.go b/vault/logical_system_test.go index 7c2ad71656..6c6472eec6 100644 --- a/vault/logical_system_test.go +++ b/vault/logical_system_test.go @@ -15,6 +15,7 @@ import ( "time" "github.com/fatih/structs" + hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/helper/builtinplugins" "github.com/hashicorp/vault/helper/salt" @@ -1271,7 +1272,7 @@ func TestSystemBackend_revokePrefixAuth(t *testing.T) { MaxLeaseTTLVal: time.Hour * 24 * 32, }, } - b := NewSystemBackend(core) + b := NewSystemBackend(core, hclog.New(&hclog.LoggerOptions{})) err := b.Backend.Setup(context.Background(), bc) if err != nil { t.Fatal(err) @@ -1335,7 +1336,7 @@ func TestSystemBackend_revokePrefixAuth_origUrl(t *testing.T) { MaxLeaseTTLVal: time.Hour * 24 * 32, }, } - b := NewSystemBackend(core) + b := NewSystemBackend(core, hclog.New(&hclog.LoggerOptions{})) err := b.Backend.Setup(context.Background(), bc) if err != nil { t.Fatal(err) @@ -1956,7 +1957,7 @@ func testSystemBackendInternal(t *testing.T, c *Core) logical.Backend { }, } - b := NewSystemBackend(c) + b := NewSystemBackend(c, hclog.New(&hclog.LoggerOptions{})) err := b.Backend.Setup(context.Background(), bc) if err != nil { t.Fatal(err) diff --git a/vault/mount.go b/vault/mount.go index 21c0b353a8..80dd331fdd 100644 --- a/vault/mount.go +++ b/vault/mount.go @@ -337,7 +337,7 @@ func (c *Core) mountInternal(ctx context.Context, entry *MountEntry) error { newTable := c.mounts.shallowClone() newTable.Entries = append(newTable.Entries, entry) if err := c.persistMounts(ctx, newTable, entry.Local); err != nil { - c.logger.Error("core: failed to update mount table", "error", err) + c.logger.Error("failed to update mount table", "error", err) return logical.CodedError(500, "failed to update mount table") } c.mounts = newTable @@ -347,7 +347,7 @@ func (c *Core) mountInternal(ctx context.Context, entry *MountEntry) error { } if c.logger.IsInfo() { - c.logger.Info("core: successful mount", "path", entry.Path, "type", entry.Type) + c.logger.Info("successful mount", "path", entry.Path, "type", entry.Type) } return nil } @@ -386,7 +386,7 @@ func (c *Core) unmountInternal(ctx context.Context, path string) error { // Mark the entry as tainted if err := c.taintMountEntry(ctx, path); err != nil { - c.logger.Error("core: failed to taint mount entry for path being unmounted", "error", err, "path", path) + c.logger.Error("failed to taint mount entry for path being unmounted", "error", err, "path", path) return err } @@ -420,19 +420,19 @@ func (c *Core) unmountInternal(ctx context.Context, path string) error { case entry.Local, !c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary): // Have writable storage, remove the whole thing if err := logical.ClearView(ctx, view); err != nil { - c.logger.Error("core: failed to clear view for path being unmounted", "error", err, "path", path) + c.logger.Error("failed to clear view for path being unmounted", "error", err, "path", path) return err } } // Remove the mount table entry if err := c.removeMountEntry(ctx, path); err != nil { - c.logger.Error("core: failed to remove mount entry for path being unmounted", "error", err, "path", path) + c.logger.Error("failed to remove mount entry for path being unmounted", "error", err, "path", path) return err } if c.logger.IsInfo() { - c.logger.Info("core: successfully unmounted", "path", path) + c.logger.Info("successfully unmounted", "path", path) } return nil } @@ -446,7 +446,7 @@ func (c *Core) removeMountEntry(ctx context.Context, path string) error { newTable := c.mounts.shallowClone() entry := newTable.remove(path) if entry == nil { - c.logger.Error("core: nil entry found removing entry in mounts table", "path", path) + c.logger.Error("nil entry found removing entry in mounts table", "path", path) return logical.CodedError(500, "failed to remove entry in mounts table") } @@ -458,7 +458,7 @@ func (c *Core) removeMountEntry(ctx context.Context, path string) error { // Update the mount table if err := c.persistMounts(ctx, newTable, entry.Local); err != nil { - c.logger.Error("core: failed to remove entry from mounts table", "error", err) + c.logger.Error("failed to remove entry from mounts table", "error", err) return logical.CodedError(500, "failed to remove entry from mounts table") } @@ -475,13 +475,13 @@ func (c *Core) taintMountEntry(ctx context.Context, path string) error { // we simply use the original entry := c.mounts.setTaint(path, true) if entry == nil { - c.logger.Error("core: nil entry found tainting entry in mounts table", "path", path) + c.logger.Error("nil entry found tainting entry in mounts table", "path", path) return logical.CodedError(500, "failed to taint entry in mounts table") } // Update the mount table if err := c.persistMounts(ctx, c.mounts, entry.Local); err != nil { - c.logger.Error("core: failed to taint entry in mounts table", "error", err) + c.logger.Error("failed to taint entry in mounts table", "error", err) return logical.CodedError(500, "failed to taint entry in mounts table") } @@ -566,7 +566,7 @@ func (c *Core) remount(ctx context.Context, src, dst string) error { if entry == nil { c.mountsLock.Unlock() - c.logger.Error("core: failed to find entry in mounts table") + c.logger.Error("failed to find entry in mounts table") return logical.CodedError(500, "failed to find entry in mounts table") } @@ -575,7 +575,7 @@ func (c *Core) remount(ctx context.Context, src, dst string) error { entry.Path = src entry.Tainted = true c.mountsLock.Unlock() - c.logger.Error("core: failed to update mounts table", "error", err) + c.logger.Error("failed to update mounts table", "error", err) return logical.CodedError(500, "failed to update mounts table") } c.mountsLock.Unlock() @@ -591,7 +591,7 @@ func (c *Core) remount(ctx context.Context, src, dst string) error { } if c.logger.IsInfo() { - c.logger.Info("core: successful remount", "old_path", src, "new_path", dst) + c.logger.Info("successful remount", "old_path", src, "new_path", dst) } return nil } @@ -603,12 +603,12 @@ func (c *Core) loadMounts(ctx context.Context) error { // Load the existing mount table raw, err := c.barrier.Get(ctx, coreMountConfigPath) if err != nil { - c.logger.Error("core: failed to read mount table", "error", err) + c.logger.Error("failed to read mount table", "error", err) return errLoadMountsFailed } rawLocal, err := c.barrier.Get(ctx, coreLocalMountConfigPath) if err != nil { - c.logger.Error("core: failed to read local mount table", "error", err) + c.logger.Error("failed to read local mount table", "error", err) return errLoadMountsFailed } @@ -620,7 +620,7 @@ func (c *Core) loadMounts(ctx context.Context) error { // yes, decompress the table and then JSON decode it. If not, // simply JSON decode it. if err := jsonutil.DecodeJSON(raw.Value, mountTable); err != nil { - c.logger.Error("core: failed to decompress and/or decode the mount table", "error", err) + c.logger.Error("failed to decompress and/or decode the mount table", "error", err) return err } c.mounts = mountTable @@ -634,7 +634,7 @@ func (c *Core) loadMounts(ctx context.Context) error { if rawLocal != nil { if err := jsonutil.DecodeJSON(rawLocal.Value, localMountTable); err != nil { - c.logger.Error("core: failed to decompress and/or decode the local mount table", "error", err) + c.logger.Error("failed to decompress and/or decode the local mount table", "error", err) return err } if localMountTable != nil && len(localMountTable.Entries) > 0 { @@ -711,7 +711,7 @@ func (c *Core) loadMounts(ctx context.Context) error { } if err := c.persistMounts(ctx, c.mounts, false); err != nil { - c.logger.Error("core: failed to persist mount table", "error", err) + c.logger.Error("failed to persist mount table", "error", err) return errLoadMountsFailed } return nil @@ -720,13 +720,13 @@ func (c *Core) loadMounts(ctx context.Context) error { // persistMounts is used to persist the mount table after modification func (c *Core) persistMounts(ctx context.Context, table *MountTable, localOnly bool) error { if table.Type != mountTableType { - c.logger.Error("core: given table to persist has wrong type", "actual_type", table.Type, "expected_type", mountTableType) + c.logger.Error("given table to persist has wrong type", "actual_type", table.Type, "expected_type", mountTableType) return fmt.Errorf("invalid table type given, not persisting") } for _, entry := range table.Entries { if entry.Table != table.Type { - c.logger.Error("core: given entry to persist in mount table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type) + c.logger.Error("given entry to persist in mount table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type) return fmt.Errorf("invalid mount entry found, not persisting") } } @@ -751,7 +751,7 @@ func (c *Core) persistMounts(ctx context.Context, table *MountTable, localOnly b // Encode the mount table into JSON and compress it (lzw). compressedBytes, err := jsonutil.EncodeJSONAndCompress(nonLocalMounts, nil) if err != nil { - c.logger.Error("core: failed to encode and/or compress the mount table", "error", err) + c.logger.Error("failed to encode and/or compress the mount table", "error", err) return err } @@ -763,7 +763,7 @@ func (c *Core) persistMounts(ctx context.Context, table *MountTable, localOnly b // Write to the physical backend if err := c.barrier.Put(ctx, entry); err != nil { - c.logger.Error("core: failed to persist mount table", "error", err) + c.logger.Error("failed to persist mount table", "error", err) return err } } @@ -771,7 +771,7 @@ func (c *Core) persistMounts(ctx context.Context, table *MountTable, localOnly b // Repeat with local mounts compressedBytes, err := jsonutil.EncodeJSONAndCompress(localMounts, nil) if err != nil { - c.logger.Error("core: failed to encode and/or compress the local mount table", "error", err) + c.logger.Error("failed to encode and/or compress the local mount table", "error", err) return err } @@ -781,7 +781,7 @@ func (c *Core) persistMounts(ctx context.Context, table *MountTable, localOnly b } if err := c.barrier.Put(ctx, entry); err != nil { - c.logger.Error("core: failed to persist local mount table", "error", err) + c.logger.Error("failed to persist local mount table", "error", err) return err } @@ -820,12 +820,12 @@ func (c *Core) setupMounts(ctx context.Context) error { // Create the new backend backend, err = c.newLogicalBackend(ctx, entry, sysView, view) if err != nil { - c.logger.Error("core: failed to create mount entry", "path", entry.Path, "error", err) + c.logger.Error("failed to create mount entry", "path", entry.Path, "error", err) if entry.Type == "plugin" { // If we encounter an error instantiating the backend due to an error, // skip backend initialization but register the entry to the mount table // to preserve storage and path. - c.logger.Warn("core: skipping plugin-based mount entry", "path", entry.Path) + c.logger.Warn("skipping plugin-based mount entry", "path", entry.Path) goto ROUTER_MOUNT } return errLoadMountsFailed @@ -846,12 +846,12 @@ func (c *Core) setupMounts(ctx context.Context) error { // Mount the backend err = c.router.Mount(backend, entry.Path, entry, view) if err != nil { - c.logger.Error("core: failed to mount entry", "path", entry.Path, "error", err) + c.logger.Error("failed to mount entry", "path", entry.Path, "error", err) return errLoadMountsFailed } if c.logger.IsInfo() { - c.logger.Info("core: successfully mounted backend", "type", entry.Type, "path", entry.Path) + c.logger.Info("successfully mounted backend", "type", entry.Type, "path", entry.Path) } // Ensure the path is tainted if set in the mount table @@ -906,7 +906,7 @@ func (c *Core) newLogicalBackend(ctx context.Context, entry *MountEntry, sysView config := &logical.BackendConfig{ StorageView: view, - Logger: c.logger, + Logger: c.logger.ResetNamed(fmt.Sprintf("secrets.%s.%s", t, entry.Accessor)), Config: conf, System: sysView, BackendUUID: entry.BackendAwareUUID, diff --git a/vault/plugin_catalog.go b/vault/plugin_catalog.go index fc8e8ca200..28af949915 100644 --- a/vault/plugin_catalog.go +++ b/vault/plugin_catalog.go @@ -40,7 +40,7 @@ func (c *Core) setupPluginCatalog() error { } if c.logger.IsInfo() { - c.logger.Info("core: successfully setup plugin catalog", "plugin-directory", c.pluginDirectory) + c.logger.Info("successfully setup plugin catalog", "plugin-directory", c.pluginDirectory) } return nil diff --git a/vault/plugin_reload.go b/vault/plugin_reload.go index 1d3bdd5729..6636420f2d 100644 --- a/vault/plugin_reload.go +++ b/vault/plugin_reload.go @@ -36,7 +36,7 @@ func (c *Core) reloadMatchingPluginMounts(ctx context.Context, mounts []string) errors = multierror.Append(errors, fmt.Errorf("cannot reload plugin on %s: %v", mount, err)) continue } - c.logger.Info("core: successfully reloaded plugin", "plugin", entry.Config.PluginName, "path", entry.Path) + c.logger.Info("successfully reloaded plugin", "plugin", entry.Config.PluginName, "path", entry.Path) } } return errors @@ -56,7 +56,7 @@ func (c *Core) reloadMatchingPlugin(ctx context.Context, pluginName string) erro if err != nil { return err } - c.logger.Info("core: successfully reloaded plugin", "plugin", pluginName, "path", entry.Path) + c.logger.Info("successfully reloaded plugin", "plugin", pluginName, "path", entry.Path) } } @@ -67,7 +67,7 @@ func (c *Core) reloadMatchingPlugin(ctx context.Context, pluginName string) erro if err != nil { return err } - c.logger.Info("core: successfully reloaded plugin", "plugin", pluginName, "path", entry.Path) + c.logger.Info("successfully reloaded plugin", "plugin", pluginName, "path", entry.Path) } } diff --git a/vault/policy_store.go b/vault/policy_store.go index 98646ad6f7..328a24a84d 100644 --- a/vault/policy_store.go +++ b/vault/policy_store.go @@ -9,11 +9,11 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/golang-lru" "github.com/hashicorp/vault/helper/consts" "github.com/hashicorp/vault/helper/strutil" "github.com/hashicorp/vault/logical" - log "github.com/mgutz/logxi/v1" ) const ( @@ -173,7 +173,7 @@ func NewPolicyStore(ctx context.Context, core *Core, baseView *BarrierView, syst keys, err := logical.CollectKeys(ctx, ps.aclView) if err != nil { - ps.logger.Error("policy: error collecting acl policy keys", "error", err) + ps.logger.Error("error collecting acl policy keys", "error", err) return nil } for _, key := range keys { @@ -189,7 +189,7 @@ func NewPolicyStore(ctx context.Context, core *Core, baseView *BarrierView, syst func (c *Core) setupPolicyStore(ctx context.Context) error { // Create the policy store sysView := &dynamicSystemView{core: c} - c.policyStore = NewPolicyStore(ctx, c, c.systemBarrierView, sysView, c.logger) + c.policyStore = NewPolicyStore(ctx, c, c.systemBarrierView, sysView, c.logger.ResetNamed("policy")) if c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) { // Policies will sync from the primary @@ -235,7 +235,7 @@ func (ps *PolicyStore) invalidate(ctx context.Context, name string, policyType P // Force a reload _, err := ps.GetPolicy(ctx, name, policyType) if err != nil { - ps.logger.Error("policy: error fetching policy after invalidation", "name", saneName) + ps.logger.Error("error fetching policy after invalidation", "name", saneName) } } diff --git a/vault/policy_store_test.go b/vault/policy_store_test.go index 6d16029a87..24798e26ae 100644 --- a/vault/policy_store_test.go +++ b/vault/policy_store_test.go @@ -5,15 +5,15 @@ import ( "reflect" "testing" - "github.com/hashicorp/vault/helper/logformat" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/logical" - log "github.com/mgutz/logxi/v1" ) func mockPolicyStore(t *testing.T) *PolicyStore { _, barrier, _ := mockBarrier(t) view := NewBarrierView(barrier, "foo/") - p := NewPolicyStore(context.Background(), nil, view, logical.TestSystemView(), logformat.NewVaultLogger(log.LevelTrace)) + p := NewPolicyStore(context.Background(), nil, view, logical.TestSystemView(), logging.NewVaultLogger(log.Trace)) return p } @@ -22,7 +22,7 @@ func mockPolicyStoreNoCache(t *testing.T) *PolicyStore { sysView.CachingDisabledVal = true _, barrier, _ := mockBarrier(t) view := NewBarrierView(barrier, "foo/") - p := NewPolicyStore(context.Background(), nil, view, sysView, logformat.NewVaultLogger(log.LevelTrace)) + p := NewPolicyStore(context.Background(), nil, view, sysView, logging.NewVaultLogger(log.Trace)) return p } diff --git a/vault/rekey.go b/vault/rekey.go index 6fbb8abaf5..d70119b248 100644 --- a/vault/rekey.go +++ b/vault/rekey.go @@ -150,12 +150,12 @@ func (c *Core) BarrierRekeyInit(config *SealConfig) error { } if c.seal.RecoveryKeySupported() && c.seal.RecoveryType() == config.Type { - c.logger.Debug("core: using recovery seal configuration to rekey barrier key") + c.logger.Debug("using recovery seal configuration to rekey barrier key") } // Check if the seal configuration is valid if err := config.Validate(); err != nil { - c.logger.Error("core: invalid rekey seal configuration", "error", err) + c.logger.Error("invalid rekey seal configuration", "error", err) return fmt.Errorf("invalid rekey seal configuration: %v", err) } @@ -188,7 +188,7 @@ func (c *Core) BarrierRekeyInit(config *SealConfig) error { c.barrierRekeyConfig.Nonce = nonce if c.logger.IsInfo() { - c.logger.Info("core: rekey initialized", "nonce", c.barrierRekeyConfig.Nonce, "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold) + c.logger.Info("rekey initialized", "nonce", c.barrierRekeyConfig.Nonce, "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold) } return nil } @@ -201,7 +201,7 @@ func (c *Core) RecoveryRekeyInit(config *SealConfig) error { // Check if the seal configuration is valid if err := config.Validate(); err != nil { - c.logger.Error("core: invalid recovery configuration", "error", err) + c.logger.Error("invalid recovery configuration", "error", err) return fmt.Errorf("invalid recovery configuration: %v", err) } @@ -238,7 +238,7 @@ func (c *Core) RecoveryRekeyInit(config *SealConfig) error { c.recoveryRekeyConfig.Nonce = nonce if c.logger.IsInfo() { - c.logger.Info("core: rekey initialized", "nonce", c.recoveryRekeyConfig.Nonce, "shares", c.recoveryRekeyConfig.SecretShares, "threshold", c.recoveryRekeyConfig.SecretThreshold) + c.logger.Info("rekey initialized", "nonce", c.recoveryRekeyConfig.Nonce, "shares", c.recoveryRekeyConfig.SecretShares, "threshold", c.recoveryRekeyConfig.SecretThreshold) } return nil } @@ -321,7 +321,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) // Check if we don't have enough keys to unlock if len(c.barrierRekeyProgress) < existingConfig.SecretThreshold { if c.logger.IsDebug() { - c.logger.Debug("core: cannot rekey yet, not enough keys", "keys", len(c.barrierRekeyProgress), "threshold", existingConfig.SecretThreshold) + c.logger.Debug("cannot rekey yet, not enough keys", "keys", len(c.barrierRekeyProgress), "threshold", existingConfig.SecretThreshold) } return nil, nil } @@ -341,12 +341,12 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) if useRecovery { if err := c.seal.VerifyRecoveryKey(ctx, recoveredKey); err != nil { - c.logger.Error("core: rekey aborted, recovery key verification failed", "error", err) + c.logger.Error("rekey aborted, recovery key verification failed", "error", err) return nil, err } } else { if err := c.barrier.VerifyMaster(recoveredKey); err != nil { - c.logger.Error("core: rekey aborted, master key verification failed", "error", err) + c.logger.Error("rekey aborted, master key verification failed", "error", err) return nil, err } } @@ -354,7 +354,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) // Generate a new master key newMasterKey, err := c.barrier.GenerateKey() if err != nil { - c.logger.Error("core: failed to generate master key", "error", err) + c.logger.Error("failed to generate master key", "error", err) return nil, fmt.Errorf("master key generation failed: %v", err) } @@ -369,7 +369,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) // Split the master key using the Shamir algorithm shares, err := shamir.Split(newMasterKey, c.barrierRekeyConfig.SecretShares, c.barrierRekeyConfig.SecretThreshold) if err != nil { - c.logger.Error("core: failed to generate shares", "error", err) + c.logger.Error("failed to generate shares", "error", err) return nil, fmt.Errorf("failed to generate shares: %v", err) } results.SecretShares = shares @@ -414,7 +414,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) } buf, err := json.Marshal(backupVals) if err != nil { - c.logger.Error("core: failed to marshal unseal key backup", "error", err) + c.logger.Error("failed to marshal unseal key backup", "error", err) return nil, fmt.Errorf("failed to marshal unseal key backup: %v", err) } pe := &physical.Entry{ @@ -422,7 +422,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) Value: buf, } if err = c.physical.Put(ctx, pe); err != nil { - c.logger.Error("core: failed to save unseal key backup", "error", err) + c.logger.Error("failed to save unseal key backup", "error", err) return nil, fmt.Errorf("failed to save unseal key backup: %v", err) } } @@ -430,21 +430,21 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) if keysToStore != nil { if err := c.seal.SetStoredKeys(ctx, keysToStore); err != nil { - c.logger.Error("core: failed to store keys", "error", err) + c.logger.Error("failed to store keys", "error", err) return nil, fmt.Errorf("failed to store keys: %v", err) } } // Rekey the barrier if err := c.barrier.Rekey(ctx, newMasterKey); err != nil { - c.logger.Error("core: failed to rekey barrier", "error", err) + c.logger.Error("failed to rekey barrier", "error", err) return nil, fmt.Errorf("failed to rekey barrier: %v", err) } if c.logger.IsInfo() { - c.logger.Info("core: security barrier rekeyed", "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold) + c.logger.Info("security barrier rekeyed", "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold) } if err := c.seal.SetBarrierConfig(ctx, c.barrierRekeyConfig); err != nil { - c.logger.Error("core: error saving rekey seal configuration", "error", err) + c.logger.Error("error saving rekey seal configuration", "error", err) return nil, fmt.Errorf("failed to save rekey seal configuration: %v", err) } @@ -454,7 +454,7 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) Key: coreKeyringCanaryPath, Value: []byte(c.barrierRekeyConfig.Nonce), }); err != nil { - c.logger.Error("core: error saving keyring canary", "error", err) + c.logger.Error("error saving keyring canary", "error", err) return nil, fmt.Errorf("failed to save keyring canary: %v", err) } @@ -522,7 +522,7 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string // Check if we don't have enough keys to unlock if len(c.recoveryRekeyProgress) < existingConfig.SecretThreshold { if c.logger.IsDebug() { - c.logger.Debug("core: cannot rekey yet, not enough keys", "keys", len(c.recoveryRekeyProgress), "threshold", existingConfig.SecretThreshold) + c.logger.Debug("cannot rekey yet, not enough keys", "keys", len(c.recoveryRekeyProgress), "threshold", existingConfig.SecretThreshold) } return nil, nil } @@ -542,14 +542,14 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string // Verify the recovery key if err := c.seal.VerifyRecoveryKey(ctx, recoveryKey); err != nil { - c.logger.Error("core: rekey aborted, recovery key verification failed", "error", err) + c.logger.Error("rekey aborted, recovery key verification failed", "error", err) return nil, err } // Generate a new master key newMasterKey, err := c.barrier.GenerateKey() if err != nil { - c.logger.Error("core: failed to generate recovery key", "error", err) + c.logger.Error("failed to generate recovery key", "error", err) return nil, fmt.Errorf("recovery key generation failed: %v", err) } @@ -564,7 +564,7 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string // Split the master key using the Shamir algorithm shares, err := shamir.Split(newMasterKey, c.recoveryRekeyConfig.SecretShares, c.recoveryRekeyConfig.SecretThreshold) if err != nil { - c.logger.Error("core: failed to generate shares", "error", err) + c.logger.Error("failed to generate shares", "error", err) return nil, fmt.Errorf("failed to generate shares: %v", err) } results.SecretShares = shares @@ -597,7 +597,7 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string } buf, err := json.Marshal(backupVals) if err != nil { - c.logger.Error("core: failed to marshal recovery key backup", "error", err) + c.logger.Error("failed to marshal recovery key backup", "error", err) return nil, fmt.Errorf("failed to marshal recovery key backup: %v", err) } pe := &physical.Entry{ @@ -605,19 +605,19 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string Value: buf, } if err = c.physical.Put(ctx, pe); err != nil { - c.logger.Error("core: failed to save unseal key backup", "error", err) + c.logger.Error("failed to save unseal key backup", "error", err) return nil, fmt.Errorf("failed to save unseal key backup: %v", err) } } } if err := c.seal.SetRecoveryKey(ctx, newMasterKey); err != nil { - c.logger.Error("core: failed to set recovery key", "error", err) + c.logger.Error("failed to set recovery key", "error", err) return nil, fmt.Errorf("failed to set recovery key: %v", err) } if err := c.seal.SetRecoveryConfig(ctx, c.recoveryRekeyConfig); err != nil { - c.logger.Error("core: error saving rekey seal configuration", "error", err) + c.logger.Error("error saving rekey seal configuration", "error", err) return nil, fmt.Errorf("failed to save rekey seal configuration: %v", err) } @@ -627,7 +627,7 @@ func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string Key: coreKeyringCanaryPath, Value: []byte(c.recoveryRekeyConfig.Nonce), }); err != nil { - c.logger.Error("core: error saving keyring canary", "error", err) + c.logger.Error("error saving keyring canary", "error", err) return nil, fmt.Errorf("failed to save keyring canary: %v", err) } diff --git a/vault/rekey_test.go b/vault/rekey_test.go index a3650f1c2f..3cafdd9697 100644 --- a/vault/rekey_test.go +++ b/vault/rekey_test.go @@ -6,9 +6,9 @@ import ( "reflect" "testing" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/physical" "github.com/hashicorp/vault/physical/inmem" ) @@ -365,7 +365,7 @@ func testCore_Rekey_Invalid_Common(t *testing.T, c *Core, keys [][]byte, recover func TestCore_Standby_Rekey(t *testing.T) { // Create the first core and initialize it - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Trace) inm, err := inmem.NewInmemHA(nil, logger) if err != nil { diff --git a/vault/request_forwarding.go b/vault/request_forwarding.go index f0f6a4c4bc..472ed6cc40 100644 --- a/vault/request_forwarding.go +++ b/vault/request_forwarding.go @@ -33,8 +33,8 @@ var ( // Starts the listeners and servers necessary to handle forwarded requests func (c *Core) startForwarding(ctx context.Context) error { - c.logger.Trace("core: cluster listener setup function") - defer c.logger.Trace("core: leaving cluster listener setup function") + c.logger.Debug("cluster listener setup function") + defer c.logger.Debug("leaving cluster listener setup function") // Clean up in case we have transitioned from a client to a server c.requestForwardingConnectionLock.Lock() @@ -47,7 +47,7 @@ func (c *Core) startForwarding(ctx context.Context) error { // Get our TLS config tlsConfig, err := c.ClusterTLSConfig(ctx, nil) if err != nil { - c.logger.Error("core: failed to get tls configuration when starting forwarding", "error", err) + c.logger.Error("failed to get tls configuration when starting forwarding", "error", err) return err } @@ -55,7 +55,7 @@ func (c *Core) startForwarding(ctx context.Context) error { tlsConfig.NextProtos = []string{"h2", requestForwardingALPN} if !atomic.CompareAndSwapUint32(c.rpcServerActive, 0, 1) { - c.logger.Warn("core: forwarding rpc server already running") + c.logger.Warn("forwarding rpc server already running") return nil } @@ -133,7 +133,7 @@ func (c *Core) startForwarding(ctx context.Context) error { conn, err := tlsLn.Accept() if err != nil { if err, ok := err.(net.Error); ok && !err.Timeout() { - c.logger.Debug("core: non-timeout error accepting on cluster port", "error", err) + c.logger.Debug("non-timeout error accepting on cluster port", "error", err) } if conn != nil { conn.Close() @@ -150,7 +150,7 @@ func (c *Core) startForwarding(ctx context.Context) error { err = tlsConn.Handshake() if err != nil { if c.logger.IsDebug() { - c.logger.Debug("core: error handshaking cluster connection", "error", err) + c.logger.Debug("error handshaking cluster connection", "error", err) } tlsConn.Close() continue @@ -163,7 +163,7 @@ func (c *Core) startForwarding(ctx context.Context) error { continue } - c.logger.Trace("core: got request forwarding connection") + c.logger.Debug("got request forwarding connection") shutdownWg.Add(2) // quitCh is used to close the connection and the second @@ -189,7 +189,7 @@ func (c *Core) startForwarding(ctx context.Context) error { }() default: - c.logger.Debug("core: unknown negotiated protocol on cluster port") + c.logger.Debug("unknown negotiated protocol on cluster port") tlsConn.Close() continue } @@ -208,17 +208,17 @@ func (c *Core) startForwarding(ctx context.Context) error { <-c.clusterListenerShutdownCh // Stop the RPC server - c.logger.Info("core: shutting down forwarding rpc listeners") + c.logger.Info("shutting down forwarding rpc listeners") fwRPCServer.Stop() // Set the shutdown flag. This will cause the listeners to shut down // within the deadline in clusterListenerAcceptDeadline atomic.StoreUint32(&shutdown, 1) - c.logger.Info("core: forwarding rpc listeners stopped") + c.logger.Info("forwarding rpc listeners stopped") // Wait for them all to shut down shutdownWg.Wait() - c.logger.Info("core: rpc listeners successfully shut down") + c.logger.Info("rpc listeners successfully shut down") // Clear us up to run this function again atomic.StoreUint32(c.rpcServerActive, 0) @@ -234,8 +234,8 @@ func (c *Core) startForwarding(ctx context.Context) error { // alive and that the current active address value matches the most // recently-known address. func (c *Core) refreshRequestForwardingConnection(ctx context.Context, clusterAddr string) error { - c.logger.Trace("core: refreshing forwarding connection") - defer c.logger.Trace("core: done refreshing forwarding connection") + c.logger.Debug("refreshing forwarding connection") + defer c.logger.Debug("done refreshing forwarding connection") c.requestForwardingConnectionLock.Lock() defer c.requestForwardingConnectionLock.Unlock() @@ -250,7 +250,7 @@ func (c *Core) refreshRequestForwardingConnection(ctx context.Context, clusterAd clusterURL, err := url.Parse(clusterAddr) if err != nil { - c.logger.Error("core: error parsing cluster address attempting to refresh forwarding connection", "error", err) + c.logger.Error("error parsing cluster address attempting to refresh forwarding connection", "error", err) return err } @@ -271,7 +271,7 @@ func (c *Core) refreshRequestForwardingConnection(ctx context.Context, clusterAd )) if err != nil { cancelFunc() - c.logger.Error("core: err setting up forwarding rpc client", "error", err) + c.logger.Error("err setting up forwarding rpc client", "error", err) return err } c.rpcClientConnContext = dctx @@ -288,8 +288,8 @@ func (c *Core) refreshRequestForwardingConnection(ctx context.Context, clusterAd } func (c *Core) clearForwardingClients() { - c.logger.Trace("core: clearing forwarding clients") - defer c.logger.Trace("core: done clearing forwarding clients") + c.logger.Debug("clearing forwarding clients") + defer c.logger.Debug("done clearing forwarding clients") if c.rpcClientConnCancelFunc != nil { c.rpcClientConnCancelFunc() @@ -316,16 +316,16 @@ func (c *Core) ForwardRequest(req *http.Request) (int, http.Header, []byte, erro freq, err := forwarding.GenerateForwardedRequest(req) if err != nil { - c.logger.Error("core: error creating forwarding RPC request", "error", err) + c.logger.Error("error creating forwarding RPC request", "error", err) return 0, nil, nil, fmt.Errorf("error creating forwarding RPC request") } if freq == nil { - c.logger.Error("core: got nil forwarding RPC request") + c.logger.Error("got nil forwarding RPC request") return 0, nil, nil, fmt.Errorf("got nil forwarding RPC request") } resp, err := c.rpcForwardingClient.ForwardRequest(c.rpcClientConnContext, freq) if err != nil { - c.logger.Error("core: error during forwarded RPC request", "error", err) + c.logger.Error("error during forwarded RPC request", "error", err) return 0, nil, nil, fmt.Errorf("error during forwarding RPC request") } @@ -347,7 +347,7 @@ func (c *Core) getGRPCDialer(ctx context.Context, alpnProto, serverName string, return func(addr string, timeout time.Duration) (net.Conn, error) { tlsConfig, err := c.ClusterTLSConfig(ctx, repClusters) if err != nil { - c.logger.Error("core: failed to get tls configuration", "error", err) + c.logger.Error("failed to get tls configuration", "error", err) return nil, err } if serverName != "" { @@ -359,7 +359,7 @@ func (c *Core) getGRPCDialer(ctx context.Context, alpnProto, serverName string, tlsConfig.RootCAs = pool tlsConfig.ClientCAs = pool } - c.logger.Trace("core: creating rpc dialer", "host", tlsConfig.ServerName) + c.logger.Debug("creating rpc dialer", "host", tlsConfig.ServerName) tlsConfig.NextProtos = []string{alpnProto} dialer := &net.Dialer{ @@ -375,7 +375,7 @@ type forwardedRequestRPCServer struct { } func (s *forwardedRequestRPCServer) ForwardRequest(ctx context.Context, freq *forwarding.Request) (*forwarding.Response, error) { - //s.core.logger.Trace("forwarding: serving rpc forwarded request") + //s.core.logger.Debug("forwarding: serving rpc forwarded request") // Parse an http.Request out of it req, err := forwarding.ParseForwardedRequest(freq) @@ -468,7 +468,7 @@ func (c *forwardingClient) startHeartbeat() { // Store the active node's replication state to display in // sys/health calls atomic.StoreUint32(c.core.activeNodeReplicationState, resp.ReplicationState) - //c.core.logger.Trace("forwarding: successful heartbeat") + //c.core.logger.Debug("forwarding: successful heartbeat") } tick() @@ -477,7 +477,7 @@ func (c *forwardingClient) startHeartbeat() { select { case <-c.echoContext.Done(): c.echoTicker.Stop() - c.core.logger.Trace("forwarding: stopping heartbeating") + c.core.logger.Debug("forwarding: stopping heartbeating") atomic.StoreUint32(c.core.activeNodeReplicationState, uint32(consts.ReplicationUnknown)) return case <-c.echoTicker.C: diff --git a/vault/request_handling.go b/vault/request_handling.go index 8cbc010446..c360778f5a 100644 --- a/vault/request_handling.go +++ b/vault/request_handling.go @@ -107,7 +107,7 @@ func (c *Core) HandleRequest(req *logical.Request) (resp *logical.Response, err httpResp := &logical.HTTPResponse{} err := jsonutil.DecodeJSON(resp.Data[logical.HTTPRawBody].([]byte), httpResp) if err != nil { - c.logger.Error("core: failed to unmarshal wrapped HTTP response for audit logging", "error", err) + c.logger.Error("failed to unmarshal wrapped HTTP response for audit logging", "error", err) return nil, ErrInternalError } @@ -141,7 +141,7 @@ func (c *Core) HandleRequest(req *logical.Request) (resp *logical.Response, err NonHMACRespDataKeys: nonHMACRespDataKeys, } if auditErr := c.auditBroker.LogResponse(ctx, logInput, c.auditedHeaders); auditErr != nil { - c.logger.Error("core: failed to audit response", "request_path", req.Path, "error", auditErr) + c.logger.Error("failed to audit response", "request_path", req.Path, "error", auditErr) return nil, ErrInternalError } @@ -168,7 +168,7 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp var err error te, err = c.tokenStore.UseToken(ctx, te) if err != nil { - c.logger.Error("core: failed to use token", "error", err) + c.logger.Error("failed to use token", "error", err) retErr = multierror.Append(retErr, ErrInternalError) return nil, nil, retErr } @@ -184,7 +184,7 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp defer func(id string) { err = c.tokenStore.Revoke(ctx, id) if err != nil { - c.logger.Error("core: failed to revoke token", "error", err) + c.logger.Error("failed to revoke token", "error", err) retResp = nil retAuth = nil retErr = multierror.Append(retErr, ErrInternalError) @@ -214,7 +214,7 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp NonHMACReqDataKeys: nonHMACReqDataKeys, } if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil { - c.logger.Error("core: failed to audit request", "path", req.Path, "error", err) + c.logger.Error("failed to audit request", "path", req.Path, "error", err) } if errType != nil { @@ -236,7 +236,7 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp NonHMACReqDataKeys: nonHMACReqDataKeys, } if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil { - c.logger.Error("core: failed to audit request", "path", req.Path, "error", err) + c.logger.Error("failed to audit request", "path", req.Path, "error", err) retErr = multierror.Append(retErr, ErrInternalError) return nil, auth, retErr } @@ -293,7 +293,7 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp // Get the SystemView for the mount sysView := c.router.MatchingSystemView(req.Path) if sysView == nil { - c.logger.Error("core: unable to retrieve system view from router") + c.logger.Error("unable to retrieve system view from router") retErr = multierror.Append(retErr, ErrInternalError) return nil, auth, retErr } @@ -315,7 +315,7 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp matchingMountEntry := c.router.MatchingMountEntry(req.Path) if matchingMountEntry == nil { - c.logger.Error("core: unable to retrieve kv mount entry from router") + c.logger.Error("unable to retrieve kv mount entry from router") retErr = multierror.Append(retErr, ErrInternalError) return nil, auth, retErr } @@ -326,7 +326,7 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp // backend, and otherwise check the mount entry options. matchingBackend := c.router.MatchingBackend(req.Path) if matchingBackend == nil { - c.logger.Error("core: unable to retrieve kv backend from router") + c.logger.Error("unable to retrieve kv backend from router") retErr = multierror.Append(retErr, ErrInternalError) return nil, auth, retErr } @@ -353,7 +353,7 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp if registerLease { leaseID, err := c.expiration.Register(req, resp) if err != nil { - c.logger.Error("core: failed to register lease", "request_path", req.Path, "error", err) + c.logger.Error("failed to register lease", "request_path", req.Path, "error", err) retErr = multierror.Append(retErr, ErrInternalError) return nil, auth, retErr } @@ -370,7 +370,7 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp resp.Auth.GroupAliases != nil { err := c.identityStore.refreshExternalGroupMembershipsByEntityID(resp.Auth.EntityID, resp.Auth.GroupAliases) if err != nil { - c.logger.Error("core: failed to refresh external group memberships", "error", err) + c.logger.Error("failed to refresh external group memberships", "error", err) retErr = multierror.Append(retErr, ErrInternalError) return nil, auth, retErr } @@ -381,7 +381,7 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp // since it does not need to be re-registered if resp != nil && resp.Auth != nil && !strings.HasPrefix(req.Path, "auth/token/renew") { if !strings.HasPrefix(req.Path, "auth/token/") { - c.logger.Error("core: unexpected Auth response for non-token backend", "request_path", req.Path) + c.logger.Error("unexpected Auth response for non-token backend", "request_path", req.Path) retErr = multierror.Append(retErr, ErrInternalError) return nil, auth, retErr } @@ -390,14 +390,14 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp // here because roles allow suffixes. te, err := c.tokenStore.Lookup(ctx, resp.Auth.ClientToken) if err != nil { - c.logger.Error("core: failed to look up token", "error", err) + c.logger.Error("failed to look up token", "error", err) retErr = multierror.Append(retErr, ErrInternalError) return nil, auth, retErr } if err := c.expiration.RegisterAuth(te.Path, resp.Auth); err != nil { c.tokenStore.Revoke(ctx, te.ID) - c.logger.Error("core: failed to register token lease", "request_path", req.Path, "error", err) + c.logger.Error("failed to register token lease", "request_path", req.Path, "error", err) retErr = multierror.Append(retErr, ErrInternalError) return nil, auth, retErr } @@ -434,14 +434,14 @@ func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (re Request: req, } if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil { - c.logger.Error("core: failed to audit request", "path", req.Path, "error", err) + c.logger.Error("failed to audit request", "path", req.Path, "error", err) return nil, nil, ErrInternalError } // The token store uses authentication even when creating a new token, // so it's handled in handleRequest. It should not be reached here. if strings.HasPrefix(req.Path, "auth/token/") { - c.logger.Error("core: unexpected login request for token backend", "request_path", req.Path) + c.logger.Error("unexpected login request for token backend", "request_path", req.Path) return nil, nil, ErrInternalError } @@ -490,7 +490,7 @@ func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (re // A login request should never return a secret! if resp != nil && resp.Secret != nil { - c.logger.Error("core: unexpected Secret response for login path", "request_path", req.Path) + c.logger.Error("unexpected Secret response for login path", "request_path", req.Path) return nil, nil, ErrInternalError } @@ -545,7 +545,7 @@ func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (re sysView := c.router.MatchingSystemView(req.Path) if sysView == nil { - c.logger.Error("core: unable to look up sys view for login path", "request_path", req.Path) + c.logger.Error("unable to look up sys view for login path", "request_path", req.Path) return nil, nil, ErrInternalError } @@ -594,7 +594,7 @@ func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (re } if err := c.tokenStore.create(ctx, &te); err != nil { - c.logger.Error("core: failed to create token", "error", err) + c.logger.Error("failed to create token", "error", err) return nil, auth, ErrInternalError } @@ -607,7 +607,7 @@ func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (re // Register with the expiration manager if err := c.expiration.RegisterAuth(te.Path, auth); err != nil { c.tokenStore.Revoke(ctx, te.ID) - c.logger.Error("core: failed to register token lease", "request_path", req.Path, "error", err) + c.logger.Error("failed to register token lease", "request_path", req.Path, "error", err) return nil, auth, ErrInternalError } diff --git a/vault/rollback.go b/vault/rollback.go index 4c85aaba0e..9954246319 100644 --- a/vault/rollback.go +++ b/vault/rollback.go @@ -6,7 +6,7 @@ import ( "sync" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/armon/go-metrics" "github.com/hashicorp/vault/logical" @@ -92,7 +92,7 @@ func (m *RollbackManager) Stop() { // run is a long running routine to periodically invoke rollback func (m *RollbackManager) run() { - m.logger.Info("rollback: starting rollback manager") + m.logger.Info("starting rollback manager") tick := time.NewTicker(m.period) defer tick.Stop() defer close(m.doneCh) @@ -102,7 +102,7 @@ func (m *RollbackManager) run() { m.triggerRollbacks() case <-m.shutdownCh: - m.logger.Info("rollback: stopping rollback manager") + m.logger.Info("stopping rollback manager") return } } @@ -150,8 +150,8 @@ func (m *RollbackManager) startRollback(path string) *rollbackState { // attemptRollback invokes a RollbackOperation for the given path func (m *RollbackManager) attemptRollback(ctx context.Context, path string, rs *rollbackState) (err error) { defer metrics.MeasureSince([]string{"rollback", "attempt", strings.Replace(path, "/", "-", -1)}, time.Now()) - if m.logger.IsTrace() { - m.logger.Trace("rollback: attempting rollback", "path", path) + if m.logger.IsDebug() { + m.logger.Debug("attempting rollback", "path", path) } defer func() { @@ -180,7 +180,7 @@ func (m *RollbackManager) attemptRollback(ctx context.Context, path string, rs * err = nil } if err != nil { - m.logger.Error("rollback: error rolling back", "path", path, "error", err) + m.logger.Error("error rolling back", "path", path, "error", err) } return } @@ -229,7 +229,7 @@ func (c *Core) startRollback() error { } return ret } - c.rollback = NewRollbackManager(c.logger, backendsFunc, c.router, c.activeContext) + c.rollback = NewRollbackManager(c.logger.ResetNamed("rollback"), backendsFunc, c.router, c.activeContext) c.rollback.Start() return nil } diff --git a/vault/rollback_test.go b/vault/rollback_test.go index e78740b64a..b30756189e 100644 --- a/vault/rollback_test.go +++ b/vault/rollback_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" ) // mockRollback returns a mock rollback manager @@ -38,7 +38,7 @@ func mockRollback(t *testing.T) (*RollbackManager, *NoopBackend) { return mounts.Entries } - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Trace) rb := NewRollbackManager(logger, mountsFunc, router, context.Background()) rb.period = 10 * time.Millisecond diff --git a/vault/router_test.go b/vault/router_test.go index 2d1fdf705c..b5957ad819 100644 --- a/vault/router_test.go +++ b/vault/router_test.go @@ -3,17 +3,15 @@ package vault import ( "context" "fmt" - "io/ioutil" "reflect" "strings" "sync" "testing" "time" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/logformat" "github.com/hashicorp/vault/logical" - log "github.com/mgutz/logxi/v1" ) type NoopBackend struct { @@ -84,7 +82,7 @@ func (n *NoopBackend) Setup(ctx context.Context, config *logical.BackendConfig) } func (n *NoopBackend) Logger() log.Logger { - return logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelOff) + return log.NewNullLogger() } func (n *NoopBackend) Initialize(ctx context.Context) error { diff --git a/vault/seal.go b/vault/seal.go index fba2b2777b..98b997dd32 100644 --- a/vault/seal.go +++ b/vault/seal.go @@ -147,13 +147,13 @@ func (d *defaultSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) { // Fetch the core configuration pe, err := d.core.physical.Get(ctx, barrierSealConfigPath) if err != nil { - d.core.logger.Error("core: failed to read seal configuration", "error", err) + d.core.logger.Error("failed to read seal configuration", "error", err) return nil, fmt.Errorf("failed to check seal configuration: %v", err) } // If the seal configuration is missing, we are not initialized if pe == nil { - d.core.logger.Info("core: seal configuration missing, not initialized") + d.core.logger.Info("seal configuration missing, not initialized") return nil, nil } @@ -161,7 +161,7 @@ func (d *defaultSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) { // Decode the barrier entry if err := jsonutil.DecodeJSON(pe.Value, &conf); err != nil { - d.core.logger.Error("core: failed to decode seal configuration", "error", err) + d.core.logger.Error("failed to decode seal configuration", "error", err) return nil, fmt.Errorf("failed to decode seal configuration: %v", err) } @@ -171,13 +171,13 @@ func (d *defaultSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) { conf.Type = d.BarrierType() case d.BarrierType(): default: - d.core.logger.Error("core: barrier seal type does not match loaded type", "barrier_seal_type", conf.Type, "loaded_seal_type", d.BarrierType()) + d.core.logger.Error("barrier seal type does not match loaded type", "barrier_seal_type", conf.Type, "loaded_seal_type", d.BarrierType()) return nil, fmt.Errorf("barrier seal type of %s does not match loaded type of %s", conf.Type, d.BarrierType()) } // Check for a valid seal configuration if err := conf.Validate(); err != nil { - d.core.logger.Error("core: invalid seal configuration", "error", err) + d.core.logger.Error("invalid seal configuration", "error", err) return nil, fmt.Errorf("seal validation failed: %v", err) } @@ -212,7 +212,7 @@ func (d *defaultSeal) SetBarrierConfig(ctx context.Context, config *SealConfig) } if err := d.core.physical.Put(ctx, pe); err != nil { - d.core.logger.Error("core: failed to write seal configuration", "error", err) + d.core.logger.Error("failed to write seal configuration", "error", err) return fmt.Errorf("failed to write seal configuration: %v", err) } diff --git a/vault/sealunwrapper.go b/vault/sealunwrapper.go index f635b4c547..5a884ae92a 100644 --- a/vault/sealunwrapper.go +++ b/vault/sealunwrapper.go @@ -11,9 +11,9 @@ import ( "sync/atomic" proto "github.com/golang/protobuf/proto" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/locksutil" "github.com/hashicorp/vault/physical" - log "github.com/mgutz/logxi/v1" ) // NewSealUnwrapper creates a new seal unwrapper diff --git a/vault/sealunwrapper_test.go b/vault/sealunwrapper_test.go index e148df2189..60857e64de 100644 --- a/vault/sealunwrapper_test.go +++ b/vault/sealunwrapper_test.go @@ -12,39 +12,38 @@ import ( "testing" proto "github.com/golang/protobuf/proto" - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/logbridge" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/physical" "github.com/hashicorp/vault/physical/inmem" ) func TestSealUnwrapper(t *testing.T) { - logger := logbridge.NewLogger(hclog.New(&hclog.LoggerOptions{ + logger := log.New(&log.LoggerOptions{ Mutex: &sync.Mutex{}, - })) + }) // Test without transactions - phys, err := inmem.NewInmemHA(nil, logger.LogxiLogger()) + phys, err := inmem.NewInmemHA(nil, logger) if err != nil { t.Fatal(err) } performTestSealUnwrapper(t, phys, logger) // Test with transactions - tPhys, err := inmem.NewTransactionalInmemHA(nil, logger.LogxiLogger()) + tPhys, err := inmem.NewTransactionalInmemHA(nil, logger) if err != nil { t.Fatal(err) } performTestSealUnwrapper(t, tPhys, logger) } -func performTestSealUnwrapper(t *testing.T, phys physical.Backend, logger *logbridge.Logger) { +func performTestSealUnwrapper(t *testing.T, phys physical.Backend, logger log.Logger) { ctx := context.Background() base := &CoreConfig{ Physical: phys, } cluster := NewTestCluster(t, base, &TestClusterOptions{ - RawLogger: logger, + Logger: logger, }) cluster.Start() defer cluster.Cleanup() diff --git a/vault/testing.go b/vault/testing.go index 77168d0de1..e7804621b0 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -25,7 +25,7 @@ import ( "sync" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/mitchellh/copystructure" "golang.org/x/crypto/ssh" @@ -35,8 +35,7 @@ import ( "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/helper/logbridge" - "github.com/hashicorp/vault/helper/logformat" + "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/reload" "github.com/hashicorp/vault/helper/salt" "github.com/hashicorp/vault/logical" @@ -106,7 +105,7 @@ func TestCoreNewSeal(t testing.T) *Core { // TestCoreWithSeal returns a pure in-memory, uninitialized core with the // specified seal for testing. func TestCoreWithSeal(t testing.T, testSeal Seal, enableRaw bool) *Core { - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Trace) physicalBackend, err := physInmem.NewInmem(nil, logger) if err != nil { t.Fatal(err) @@ -274,7 +273,7 @@ func testCoreUnsealed(t testing.T, core *Core) (*Core, [][]byte, string) { func TestCoreUnsealedBackend(t testing.T, backend physical.Backend) (*Core, [][]byte, string) { t.Helper() - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Trace) conf := testCoreConfig(t, backend, logger) conf.Seal = NewTestSeal(t, nil) @@ -677,7 +676,7 @@ func (n *rawHTTP) System() logical.SystemView { } func (n *rawHTTP) Logger() log.Logger { - return logformat.NewVaultLogger(log.LevelTrace) + return logging.NewVaultLogger(log.Trace) } func (n *rawHTTP) Cleanup(ctx context.Context) { @@ -920,7 +919,7 @@ type TestClusterOptions struct { BaseListenAddress string NumCores int SealFunc func() Seal - RawLogger interface{} + Logger log.Logger TempDir string CACert []byte CAKey *ecdsa.PrivateKey @@ -1107,7 +1106,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te // // Listener setup // - logger := logformat.NewVaultLogger(log.LevelTrace) + logger := logging.NewVaultLogger(log.Trace) ports := make([]int, numCores) if baseAddr != nil { for i := 0; i < numCores; i++ { @@ -1273,13 +1272,8 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te coreConfig.Seal = opts.SealFunc() } - if opts != nil && opts.RawLogger != nil { - switch opts.RawLogger.(type) { - case *logbridge.Logger: - coreConfig.Logger = opts.RawLogger.(*logbridge.Logger).Named(fmt.Sprintf("core%d", i)).LogxiLogger() - case *logbridge.LogxiLogger: - coreConfig.Logger = opts.RawLogger.(*logbridge.LogxiLogger).Named(fmt.Sprintf("core%d", i)) - } + if opts != nil && opts.Logger != nil { + coreConfig.Logger = opts.Logger.Named(fmt.Sprintf("core%d", i)) } c, err := NewCore(coreConfig) diff --git a/vault/token_store.go b/vault/token_store.go index 14fcdc53f6..9377d474cf 100644 --- a/vault/token_store.go +++ b/vault/token_store.go @@ -11,7 +11,7 @@ import ( "strings" "time" - log "github.com/mgutz/logxi/v1" + log "github.com/hashicorp/go-hclog" "github.com/armon/go-metrics" "github.com/hashicorp/go-multierror" @@ -107,7 +107,7 @@ type TokenStore struct { // NewTokenStore is used to construct a token store that is // backed by the given barrier view. -func NewTokenStore(ctx context.Context, c *Core, config *logical.BackendConfig) (*TokenStore, error) { +func NewTokenStore(ctx context.Context, logger log.Logger, c *Core, config *logical.BackendConfig) (*TokenStore, error) { // Create a sub-view view := c.systemBarrierView.SubView(tokenSubPath) @@ -115,7 +115,7 @@ func NewTokenStore(ctx context.Context, c *Core, config *logical.BackendConfig) t := &TokenStore{ view: view, cubbyholeDestroyer: destroyCubbyhole, - logger: c.logger, + logger: logger, tokenLocks: locksutil.CreateLocks(), saltLock: sync.RWMutex{}, } @@ -481,7 +481,7 @@ func NewTokenStore(ctx context.Context, c *Core, config *logical.BackendConfig) } func (ts *TokenStore) Invalidate(ctx context.Context, key string) { - //ts.logger.Trace("token: invalidating key", "key", key) + //ts.logger.Debug("invalidating key", "key", key) switch key { case tokenSubPath + salt.DefaultLocation: @@ -1334,14 +1334,14 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data var tidyErrors *multierror.Error if !atomic.CompareAndSwapInt64(&ts.tidyLock, 0, 1) { - ts.logger.Warn("token: tidy operation on tokens is already in progress") + ts.logger.Warn("tidy operation on tokens is already in progress") return nil, fmt.Errorf("tidy operation on tokens is already in progress") } defer atomic.CompareAndSwapInt64(&ts.tidyLock, 1, 0) - ts.logger.Info("token: beginning tidy operation on tokens") - defer ts.logger.Info("token: finished tidy operation on tokens") + ts.logger.Info("beginning tidy operation on tokens") + defer ts.logger.Info("finished tidy operation on tokens") // List out all the accessors saltedAccessorList, err := ts.view.List(ctx, accessorPrefix) @@ -1375,14 +1375,14 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data originalChildrenCount := int64(len(children)) exists, _ := ts.lookupSalted(ctx, strings.TrimSuffix(parent, "/"), true) if exists == nil { - ts.logger.Trace("token: deleting invalid parent prefix entry", "index", parentPrefix+parent) + ts.logger.Debug("deleting invalid parent prefix entry", "index", parentPrefix+parent) } var deletedChildrenCount int64 for _, child := range children { countParentList++ if countParentList%500 == 0 { - ts.logger.Info("token: checking validity of tokens in secondary index list", "progress", countParentList) + ts.logger.Info("checking validity of tokens in secondary index list", "progress", countParentList) } // Look up tainted entries so we can be sure that if this isn't @@ -1408,7 +1408,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data // on with the delete on the secondary index if te == nil || exists == nil { index := parentPrefix + parent + child - ts.logger.Trace("token: deleting invalid secondary index", "index", index) + ts.logger.Debug("deleting invalid secondary index", "index", index) err = ts.view.Delete(ctx, index) if err != nil { tidyErrors = multierror.Append(tidyErrors, fmt.Errorf("failed to delete secondary index: %v", err)) @@ -1438,7 +1438,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data for _, saltedAccessor := range saltedAccessorList { countAccessorList++ if countAccessorList%500 == 0 { - ts.logger.Info("token: checking if accessors contain valid tokens", "progress", countAccessorList) + ts.logger.Info("checking if accessors contain valid tokens", "progress", countAccessorList) } accessorEntry, err := ts.lookupBySaltedAccessor(ctx, saltedAccessor, true) @@ -1486,7 +1486,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data // more and conclude that accessor, leases, and secondary index entries // for this token should not exist as well. if te == nil { - ts.logger.Info("token: deleting token with nil entry", "salted_token", saltedId) + ts.logger.Info("deleting token with nil entry", "salted_token", saltedId) // RevokeByToken expects a '*TokenEntry'. For the // purposes of tidying, it is sufficient if the token @@ -1519,14 +1519,14 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data } } - ts.logger.Debug("token: number of entries scanned in parent prefix", "count", countParentEntries) - ts.logger.Debug("token: number of entries deleted in parent prefix", "count", deletedCountParentEntries) - ts.logger.Debug("token: number of tokens scanned in parent index list", "count", countParentList) - ts.logger.Debug("token: number of tokens revoked in parent index list", "count", deletedCountParentList) - ts.logger.Debug("token: number of accessors scanned", "count", countAccessorList) - ts.logger.Debug("token: number of deleted accessors which had empty tokens", "count", deletedCountAccessorEmptyToken) - ts.logger.Debug("token: number of revoked tokens which were invalid but present in accessors", "count", deletedCountInvalidTokenInAccessor) - ts.logger.Debug("token: number of deleted accessors which had invalid tokens", "count", deletedCountAccessorInvalidToken) + ts.logger.Info("number of entries scanned in parent prefix", "count", countParentEntries) + ts.logger.Info("number of entries deleted in parent prefix", "count", deletedCountParentEntries) + ts.logger.Info("number of tokens scanned in parent index list", "count", countParentList) + ts.logger.Info("number of tokens revoked in parent index list", "count", deletedCountParentList) + ts.logger.Info("number of accessors scanned", "count", countAccessorList) + ts.logger.Info("number of deleted accessors which had empty tokens", "count", deletedCountAccessorEmptyToken) + ts.logger.Info("number of revoked tokens which were invalid but present in accessors", "count", deletedCountInvalidTokenInAccessor) + ts.logger.Info("number of deleted accessors which had invalid tokens", "count", deletedCountAccessorInvalidToken) return nil, tidyErrors.ErrorOrNil() } diff --git a/vault/token_store_test.go b/vault/token_store_test.go index dee26268f6..f933caed87 100644 --- a/vault/token_store_test.go +++ b/vault/token_store_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/locksutil" "github.com/hashicorp/vault/logical" @@ -451,7 +452,7 @@ func TestTokenStore_CreateLookup(t *testing.T) { } // New store should share the salt - ts2, err := NewTokenStore(context.Background(), c, getBackendConfig(c)) + ts2, err := NewTokenStore(context.Background(), hclog.New(&hclog.LoggerOptions{}), c, getBackendConfig(c)) if err != nil { t.Fatalf("err: %v", err) } @@ -494,7 +495,7 @@ func TestTokenStore_CreateLookup_ProvidedID(t *testing.T) { } // New store should share the salt - ts2, err := NewTokenStore(context.Background(), c, getBackendConfig(c)) + ts2, err := NewTokenStore(context.Background(), hclog.New(&hclog.LoggerOptions{}), c, getBackendConfig(c)) if err != nil { t.Fatalf("err: %v", err) } diff --git a/vault/wrapping.go b/vault/wrapping.go index e8daaaa080..fd48d60293 100644 --- a/vault/wrapping.go +++ b/vault/wrapping.go @@ -68,7 +68,7 @@ func (c *Core) ensureWrappingKey(ctx context.Context) error { D: keyParams.D, } - c.logger.Info("core: loaded wrapping token key") + c.logger.Info("loaded wrapping token key") return nil } @@ -111,7 +111,7 @@ func (c *Core) wrapInCubbyhole(ctx context.Context, req *logical.Request, resp * } if err := c.tokenStore.create(ctx, &te); err != nil { - c.logger.Error("core: failed to create wrapping token", "error", err) + c.logger.Error("failed to create wrapping token", "error", err) return nil, ErrInternalError } @@ -151,7 +151,7 @@ func (c *Core) wrapInCubbyhole(ctx context.Context, req *logical.Request, resp * jwt := jws.NewJWT(claims, crypto.SigningMethodES512) serWebToken, err := jwt.Serialize(c.wrappingJWTKey) if err != nil { - c.logger.Error("core: failed to serialize JWT", "error", err) + c.logger.Error("failed to serialize JWT", "error", err) return nil, ErrInternalError } resp.WrapInfo.Token = string(serWebToken) @@ -191,7 +191,7 @@ func (c *Core) wrapInCubbyhole(ctx context.Context, req *logical.Request, resp * marshaledResponse, err := json.Marshal(httpResponse) if err != nil { - c.logger.Error("core: failed to marshal wrapped response", "error", err) + c.logger.Error("failed to marshal wrapped response", "error", err) return nil, ErrInternalError } @@ -204,12 +204,12 @@ func (c *Core) wrapInCubbyhole(ctx context.Context, req *logical.Request, resp * if err != nil { // Revoke since it's not yet being tracked for expiration c.tokenStore.Revoke(ctx, te.ID) - c.logger.Error("core: failed to store wrapped response information", "error", err) + c.logger.Error("failed to store wrapped response information", "error", err) return nil, ErrInternalError } if cubbyResp != nil && cubbyResp.IsError() { c.tokenStore.Revoke(ctx, te.ID) - c.logger.Error("core: failed to store wrapped response information", "error", cubbyResp.Data["error"]) + c.logger.Error("failed to store wrapped response information", "error", cubbyResp.Data["error"]) return cubbyResp, nil } @@ -230,12 +230,12 @@ func (c *Core) wrapInCubbyhole(ctx context.Context, req *logical.Request, resp * if err != nil { // Revoke since it's not yet being tracked for expiration c.tokenStore.Revoke(ctx, te.ID) - c.logger.Error("core: failed to store wrapping information", "error", err) + c.logger.Error("failed to store wrapping information", "error", err) return nil, ErrInternalError } if cubbyResp != nil && cubbyResp.IsError() { c.tokenStore.Revoke(ctx, te.ID) - c.logger.Error("core: failed to store wrapping information", "error", cubbyResp.Data["error"]) + c.logger.Error("failed to store wrapping information", "error", cubbyResp.Data["error"]) return cubbyResp, nil } @@ -252,7 +252,7 @@ func (c *Core) wrapInCubbyhole(ctx context.Context, req *logical.Request, resp * if err := c.expiration.RegisterAuth(te.Path, wAuth); err != nil { // Revoke since it's not yet being tracked for expiration c.tokenStore.Revoke(ctx, te.ID) - c.logger.Error("core: failed to register cubbyhole wrapping token lease", "request_path", req.Path, "error", err) + c.logger.Error("failed to register cubbyhole wrapping token lease", "request_path", req.Path, "error", err) return nil, ErrInternalError } diff --git a/vendor/github.com/mgutz/logxi/LICENSE b/vendor/github.com/mgutz/logxi/LICENSE deleted file mode 100644 index 7e601d4a94..0000000000 --- a/vendor/github.com/mgutz/logxi/LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -The MIT License (MIT) -Copyright (c) 2016 Mario Gutierrez - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mgutz/logxi/v1/callstack.go b/vendor/github.com/mgutz/logxi/v1/callstack.go deleted file mode 100644 index 208eb4054e..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/callstack.go +++ /dev/null @@ -1,261 +0,0 @@ -package log - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/mgutz/ansi" -) - -type sourceLine struct { - lineno int - line string -} - -type frameInfo struct { - filename string - lineno int - method string - context []*sourceLine - contextLines int -} - -func (ci *frameInfo) readSource(contextLines int) error { - if ci.lineno == 0 || disableCallstack { - return nil - } - start := maxInt(1, ci.lineno-contextLines) - end := ci.lineno + contextLines - - f, err := os.Open(ci.filename) - if err != nil { - // if we can't read a file, it means user is running this in production - disableCallstack = true - return err - } - defer f.Close() - - lineno := 1 - scanner := bufio.NewScanner(f) - for scanner.Scan() { - if start <= lineno && lineno <= end { - line := scanner.Text() - line = expandTabs(line, 4) - ci.context = append(ci.context, &sourceLine{lineno: lineno, line: line}) - } - lineno++ - } - - if err := scanner.Err(); err != nil { - InternalLog.Warn("scanner error", "file", ci.filename, "err", err) - } - return nil -} - -func (ci *frameInfo) String(color string, sourceColor string) string { - buf := pool.Get() - defer pool.Put(buf) - - if disableCallstack { - buf.WriteString(color) - buf.WriteString(Separator) - buf.WriteString(indent) - buf.WriteString(ci.filename) - buf.WriteRune(':') - buf.WriteString(strconv.Itoa(ci.lineno)) - return buf.String() - } - - // skip anything in the logxi package - if isLogxiCode(ci.filename) { - return "" - } - - // make path relative to current working directory or home - tildeFilename, err := filepath.Rel(wd, ci.filename) - if err != nil { - InternalLog.Warn("Could not make path relative", "path", ci.filename) - return "" - } - // ../../../ is too complex. Make path relative to home - if strings.HasPrefix(tildeFilename, strings.Repeat(".."+string(os.PathSeparator), 3)) { - tildeFilename = strings.Replace(tildeFilename, home, "~", 1) - } - - buf.WriteString(color) - buf.WriteString(Separator) - buf.WriteString(indent) - buf.WriteString("in ") - buf.WriteString(ci.method) - buf.WriteString("(") - buf.WriteString(tildeFilename) - buf.WriteRune(':') - buf.WriteString(strconv.Itoa(ci.lineno)) - buf.WriteString(")") - - if ci.contextLines == -1 { - return buf.String() - } - buf.WriteString("\n") - - // the width of the printed line number - var linenoWidth int - // trim spaces at start of source code based on common spaces - var skipSpaces = 1000 - - // calculate width of lineno and number of leading spaces that can be - // removed - for _, li := range ci.context { - linenoWidth = maxInt(linenoWidth, len(fmt.Sprintf("%d", li.lineno))) - index := indexOfNonSpace(li.line) - if index > -1 && index < skipSpaces { - skipSpaces = index - } - } - - for _, li := range ci.context { - var format string - format = fmt.Sprintf("%%s%%%dd: %%s\n", linenoWidth) - - if li.lineno == ci.lineno { - buf.WriteString(color) - if ci.contextLines > 2 { - format = fmt.Sprintf("%%s=> %%%dd: %%s\n", linenoWidth) - } - } else { - buf.WriteString(sourceColor) - if ci.contextLines > 2 { - // account for "=> " - format = fmt.Sprintf("%%s%%%dd: %%s\n", linenoWidth+3) - } - } - // trim spaces at start - idx := minInt(len(li.line), skipSpaces) - buf.WriteString(fmt.Sprintf(format, Separator+indent+indent, li.lineno, li.line[idx:])) - } - // get rid of last \n - buf.Truncate(buf.Len() - 1) - if !disableColors { - buf.WriteString(ansi.Reset) - } - return buf.String() -} - -// parseDebugStack parases a stack created by debug.Stack() -// -// This is what the string looks like -// /Users/mgutz/go/src/github.com/mgutz/logxi/v1/jsonFormatter.go:45 (0x5fa70) -// (*JSONFormatter).writeError: jf.writeString(buf, err.Error()+"\n"+string(debug.Stack())) -// /Users/mgutz/go/src/github.com/mgutz/logxi/v1/jsonFormatter.go:82 (0x5fdc3) -// (*JSONFormatter).appendValue: jf.writeError(buf, err) -// /Users/mgutz/go/src/github.com/mgutz/logxi/v1/jsonFormatter.go:109 (0x605ca) -// (*JSONFormatter).set: jf.appendValue(buf, val) -// ... -// /Users/mgutz/goroot/src/runtime/asm_amd64.s:2232 (0x38bf1) -// goexit: -func parseDebugStack(stack string, skip int, ignoreRuntime bool) []*frameInfo { - frames := []*frameInfo{} - // BUG temporarily disable since there is a bug with embedded newlines - if true { - return frames - } - - lines := strings.Split(stack, "\n") - - for i := skip * 2; i < len(lines); i += 2 { - ci := &frameInfo{} - sourceLine := lines[i] - if sourceLine == "" { - break - } - if ignoreRuntime && strings.Contains(sourceLine, filepath.Join("src", "runtime")) { - break - } - - colon := strings.Index(sourceLine, ":") - slash := strings.Index(sourceLine, "/") - if colon < slash { - // must be on Windows where paths look like c:/foo/bar.go:lineno - colon = strings.Index(sourceLine[slash:], ":") + slash - } - space := strings.Index(sourceLine, " ") - ci.filename = sourceLine[0:colon] - - // BUG with callstack where the error message has embedded newlines - // if colon > space { - // fmt.Println("lines", lines) - // } - // fmt.Println("SOURCELINE", sourceLine, "len", len(sourceLine), "COLON", colon, "SPACE", space) - numstr := sourceLine[colon+1 : space] - lineno, err := strconv.Atoi(numstr) - if err != nil { - InternalLog.Warn("Could not parse line number", "sourceLine", sourceLine, "numstr", numstr) - continue - } - ci.lineno = lineno - - methodLine := lines[i+1] - colon = strings.Index(methodLine, ":") - ci.method = strings.Trim(methodLine[0:colon], "\t ") - frames = append(frames, ci) - } - return frames -} - -// parseDebugStack parases a stack created by debug.Stack() -// -// This is what the string looks like -// /Users/mgutz/go/src/github.com/mgutz/logxi/v1/jsonFormatter.go:45 (0x5fa70) -// (*JSONFormatter).writeError: jf.writeString(buf, err.Error()+"\n"+string(debug.Stack())) -// /Users/mgutz/go/src/github.com/mgutz/logxi/v1/jsonFormatter.go:82 (0x5fdc3) -// (*JSONFormatter).appendValue: jf.writeError(buf, err) -// /Users/mgutz/go/src/github.com/mgutz/logxi/v1/jsonFormatter.go:109 (0x605ca) -// (*JSONFormatter).set: jf.appendValue(buf, val) -// ... -// /Users/mgutz/goroot/src/runtime/asm_amd64.s:2232 (0x38bf1) -// goexit: -func trimDebugStack(stack string) string { - buf := pool.Get() - defer pool.Put(buf) - lines := strings.Split(stack, "\n") - for i := 0; i < len(lines); i += 2 { - sourceLine := lines[i] - if sourceLine == "" { - break - } - - colon := strings.Index(sourceLine, ":") - slash := strings.Index(sourceLine, "/") - if colon < slash { - // must be on Windows where paths look like c:/foo/bar.go:lineno - colon = strings.Index(sourceLine[slash:], ":") + slash - } - filename := sourceLine[0:colon] - // skip anything in the logxi package - if isLogxiCode(filename) { - continue - } - buf.WriteString(sourceLine) - buf.WriteRune('\n') - buf.WriteString(lines[i+1]) - buf.WriteRune('\n') - } - return buf.String() -} - -func parseLogxiStack(entry map[string]interface{}, skip int, ignoreRuntime bool) []*frameInfo { - kv := entry[KeyMap.CallStack] - if kv == nil { - return nil - } - - var frames []*frameInfo - if stack, ok := kv.(string); ok { - frames = parseDebugStack(stack, skip, ignoreRuntime) - } - return frames -} diff --git a/vendor/github.com/mgutz/logxi/v1/concurrentWriter.go b/vendor/github.com/mgutz/logxi/v1/concurrentWriter.go deleted file mode 100644 index 960f97e7c2..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/concurrentWriter.go +++ /dev/null @@ -1,25 +0,0 @@ -package log - -import ( - "io" - "sync" -) - -// ConcurrentWriter is a concurrent safe wrapper around io.Writer -type ConcurrentWriter struct { - writer io.Writer - sync.Mutex -} - -// NewConcurrentWriter crates a new concurrent writer wrapper around existing writer. -func NewConcurrentWriter(writer io.Writer) io.Writer { - return &ConcurrentWriter{writer: writer} -} - -func (cw *ConcurrentWriter) Write(p []byte) (n int, err error) { - cw.Lock() - defer cw.Unlock() - // This is basically the same logic as in go's log.Output() which - // doesn't look at the returned number of bytes returned - return cw.writer.Write(p) -} diff --git a/vendor/github.com/mgutz/logxi/v1/defaultLogger.go b/vendor/github.com/mgutz/logxi/v1/defaultLogger.go deleted file mode 100644 index 40fb5132ab..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/defaultLogger.go +++ /dev/null @@ -1,149 +0,0 @@ -package log - -import ( - "fmt" - "io" -) - -// DefaultLogger is the default logger for this package. -type DefaultLogger struct { - writer io.Writer - name string - level int - formatter Formatter -} - -// NewLogger creates a new default logger. If writer is not concurrent -// safe, wrap it with NewConcurrentWriter. -func NewLogger(writer io.Writer, name string) Logger { - formatter, err := createFormatter(name, logxiFormat) - if err != nil { - panic("Could not create formatter") - } - return NewLogger3(writer, name, formatter) -} - -// NewLogger3 creates a new logger with a writer, name and formatter. If writer is not concurrent -// safe, wrap it with NewConcurrentWriter. -func NewLogger3(writer io.Writer, name string, formatter Formatter) Logger { - var level int - if name != "__logxi" { - // if err is returned, then it means the log is disabled - level = getLogLevel(name) - if level == LevelOff { - return NullLog - } - } - - log := &DefaultLogger{ - formatter: formatter, - writer: writer, - name: name, - level: level, - } - - // TODO loggers will be used when watching changes to configuration such - // as in consul, etcd - loggers.Lock() - loggers.loggers[name] = log - loggers.Unlock() - return log -} - -// New creates a colorable default logger. -func New(name string) Logger { - return NewLogger(colorableStdout, name) -} - -// Trace logs a debug entry. -func (l *DefaultLogger) Trace(msg string, args ...interface{}) { - l.Log(LevelTrace, msg, args) -} - -// Debug logs a debug entry. -func (l *DefaultLogger) Debug(msg string, args ...interface{}) { - l.Log(LevelDebug, msg, args) -} - -// Info logs an info entry. -func (l *DefaultLogger) Info(msg string, args ...interface{}) { - l.Log(LevelInfo, msg, args) -} - -// Warn logs a warn entry. -func (l *DefaultLogger) Warn(msg string, args ...interface{}) error { - if l.IsWarn() { - defer l.Log(LevelWarn, msg, args) - - for _, arg := range args { - if err, ok := arg.(error); ok { - return err - } - } - - return nil - } - return nil -} - -func (l *DefaultLogger) extractLogError(level int, msg string, args []interface{}) error { - defer l.Log(level, msg, args) - - for _, arg := range args { - if err, ok := arg.(error); ok { - return err - } - } - return fmt.Errorf(msg) -} - -// Error logs an error entry. -func (l *DefaultLogger) Error(msg string, args ...interface{}) error { - return l.extractLogError(LevelError, msg, args) -} - -// Fatal logs a fatal entry then panics. -func (l *DefaultLogger) Fatal(msg string, args ...interface{}) { - l.extractLogError(LevelFatal, msg, args) - defer panic("Exit due to fatal error: ") -} - -// Log logs a leveled entry. -func (l *DefaultLogger) Log(level int, msg string, args []interface{}) { - // log if the log level (warn=4) >= level of message (err=3) - if l.level < level || silent { - return - } - l.formatter.Format(l.writer, level, msg, args) -} - -// IsTrace determines if this logger logs a debug statement. -func (l *DefaultLogger) IsTrace() bool { - // DEBUG(7) >= TRACE(10) - return l.level >= LevelTrace -} - -// IsDebug determines if this logger logs a debug statement. -func (l *DefaultLogger) IsDebug() bool { - return l.level >= LevelDebug -} - -// IsInfo determines if this logger logs an info statement. -func (l *DefaultLogger) IsInfo() bool { - return l.level >= LevelInfo -} - -// IsWarn determines if this logger logs a warning statement. -func (l *DefaultLogger) IsWarn() bool { - return l.level >= LevelWarn -} - -// SetLevel sets the level of this logger. -func (l *DefaultLogger) SetLevel(level int) { - l.level = level -} - -// SetFormatter set the formatter for this logger. -func (l *DefaultLogger) SetFormatter(formatter Formatter) { - l.formatter = formatter -} diff --git a/vendor/github.com/mgutz/logxi/v1/env.go b/vendor/github.com/mgutz/logxi/v1/env.go deleted file mode 100644 index c61c452a69..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/env.go +++ /dev/null @@ -1,166 +0,0 @@ -package log - -import ( - "os" - "strconv" - "strings" -) - -var contextLines int - -// Configuration comes from environment or external services like -// consul, etcd. -type Configuration struct { - Format string `json:"format"` - Colors string `json:"colors"` - Levels string `json:"levels"` -} - -func readFromEnviron() *Configuration { - conf := &Configuration{} - - var envOrDefault = func(name, val string) string { - result := os.Getenv(name) - if result == "" { - result = val - } - return result - } - - conf.Levels = envOrDefault("LOGXI", defaultLogxiEnv) - conf.Format = envOrDefault("LOGXI_FORMAT", defaultLogxiFormatEnv) - conf.Colors = envOrDefault("LOGXI_COLORS", defaultLogxiColorsEnv) - return conf -} - -// ProcessEnv (re)processes environment. -func ProcessEnv(env *Configuration) { - // TODO: allow reading from etcd - - ProcessLogxiEnv(env.Levels) - ProcessLogxiColorsEnv(env.Colors) - ProcessLogxiFormatEnv(env.Format) -} - -// ProcessLogxiFormatEnv parses LOGXI_FORMAT -func ProcessLogxiFormatEnv(env string) { - logxiFormat = env - m := parseKVList(logxiFormat, ",") - formatterFormat := "" - tFormat := "" - for key, value := range m { - switch key { - default: - formatterFormat = key - case "t": - tFormat = value - case "pretty": - isPretty = value != "false" && value != "0" - case "maxcol": - col, err := strconv.Atoi(value) - if err == nil { - maxCol = col - } else { - maxCol = defaultMaxCol - } - case "context": - lines, err := strconv.Atoi(value) - if err == nil { - contextLines = lines - } else { - contextLines = defaultContextLines - } - case "LTSV": - formatterFormat = "text" - AssignmentChar = ltsvAssignmentChar - Separator = ltsvSeparator - } - } - if formatterFormat == "" || formatterCreators[formatterFormat] == nil { - formatterFormat = defaultFormat - } - logxiFormat = formatterFormat - if tFormat == "" { - tFormat = defaultTimeFormat - } - timeFormat = tFormat -} - -// ProcessLogxiEnv parses LOGXI variable -func ProcessLogxiEnv(env string) { - logxiEnable := env - if logxiEnable == "" { - logxiEnable = defaultLogxiEnv - } - - logxiNameLevelMap = map[string]int{} - m := parseKVList(logxiEnable, ",") - if m == nil { - logxiNameLevelMap["*"] = defaultLevel - } - for key, value := range m { - if strings.HasPrefix(key, "-") { - // LOGXI=*,-foo => disable foo - logxiNameLevelMap[key[1:]] = LevelOff - } else if value == "" { - // LOGXI=* => default to all - logxiNameLevelMap[key] = LevelAll - } else { - // LOGXI=*=ERR => use user-specified level - level := LevelAtoi[value] - if level == 0 { - InternalLog.Error("Unknown level in LOGXI environment variable", "key", key, "value", value, "LOGXI", env) - level = defaultLevel - } - logxiNameLevelMap[key] = level - } - } - - // must always have global default, otherwise errs may get eaten up - if _, ok := logxiNameLevelMap["*"]; !ok { - logxiNameLevelMap["*"] = LevelError - } -} - -func getLogLevel(name string) int { - var wildcardLevel int - var result int - - for k, v := range logxiNameLevelMap { - if k == name { - result = v - } else if k == "*" { - wildcardLevel = v - } else if strings.HasPrefix(k, "*") && strings.HasSuffix(name, k[1:]) { - result = v - } else if strings.HasSuffix(k, "*") && strings.HasPrefix(name, k[:len(k)-1]) { - result = v - } - } - - if result == LevelOff { - return LevelOff - } - - if result > 0 { - return result - } - - if wildcardLevel > 0 { - return wildcardLevel - } - - return LevelOff -} - -// ProcessLogxiColorsEnv parases LOGXI_COLORS -func ProcessLogxiColorsEnv(env string) { - colors := env - if colors == "" { - colors = defaultLogxiColorsEnv - } else if colors == "*=off" { - // disable all colors - disableColors = true - } - theme = parseTheme(colors) -} diff --git a/vendor/github.com/mgutz/logxi/v1/formatter.go b/vendor/github.com/mgutz/logxi/v1/formatter.go deleted file mode 100644 index 93573948c1..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/formatter.go +++ /dev/null @@ -1,61 +0,0 @@ -package log - -var formatterCreators = map[string]CreateFormatterFunc{} - -// CreateFormatterFunc is a function which creates a new instance -// of a Formatter. -type CreateFormatterFunc func(name, kind string) (Formatter, error) - -// createFormatter creates formatters. It accepts a kind in {"text", "JSON"} -// which correspond to TextFormatter and JSONFormatter, and the name of the -// logger. -func createFormatter(name string, kind string) (Formatter, error) { - if kind == FormatEnv { - kind = logxiFormat - } - if kind == "" { - kind = FormatText - } - - fn := formatterCreators[kind] - if fn == nil { - fn = formatterCreators[FormatText] - } - - formatter, err := fn(name, kind) - if err != nil { - return nil, err - } - // custom formatter may have not returned a formatter - if formatter == nil { - formatter, err = formatFactory(name, FormatText) - } - return formatter, err -} - -func formatFactory(name string, kind string) (Formatter, error) { - var formatter Formatter - var err error - switch kind { - default: - formatter = NewTextFormatter(name) - case FormatHappy: - formatter = NewHappyDevFormatter(name) - case FormatText: - formatter = NewTextFormatter(name) - case FormatJSON: - formatter = NewJSONFormatter(name) - } - return formatter, err -} - -// RegisterFormatFactory registers a format factory function. -func RegisterFormatFactory(kind string, fn CreateFormatterFunc) { - if kind == "" { - panic("kind is empty string") - } - if fn == nil { - panic("creator is nil") - } - formatterCreators[kind] = fn -} diff --git a/vendor/github.com/mgutz/logxi/v1/happyDevFormatter.go b/vendor/github.com/mgutz/logxi/v1/happyDevFormatter.go deleted file mode 100644 index 3931b3691c..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/happyDevFormatter.go +++ /dev/null @@ -1,373 +0,0 @@ -package log - -import ( - "encoding/json" - "fmt" - "io" - "runtime/debug" - "strings" - - "github.com/mgutz/ansi" -) - -// colorScheme defines a color theme for HappyDevFormatter -type colorScheme struct { - Key string - Message string - Value string - Misc string - Source string - - Trace string - Debug string - Info string - Warn string - Error string -} - -var indent = " " -var maxCol = defaultMaxCol -var theme *colorScheme - -func parseKVList(s, separator string) map[string]string { - pairs := strings.Split(s, separator) - if len(pairs) == 0 { - return nil - } - m := map[string]string{} - for _, pair := range pairs { - if pair == "" { - continue - } - parts := strings.Split(pair, "=") - switch len(parts) { - case 1: - m[parts[0]] = "" - case 2: - m[parts[0]] = parts[1] - } - } - return m -} - -func parseTheme(theme string) *colorScheme { - m := parseKVList(theme, ",") - cs := &colorScheme{} - var wildcard string - - var color = func(key string) string { - if disableColors { - return "" - } - style := m[key] - c := ansi.ColorCode(style) - if c == "" { - c = wildcard - } - //fmt.Printf("plain=%b [%s] %s=%q\n", ansi.DefaultFG, key, style, c) - - return c - } - wildcard = color("*") - - if wildcard != ansi.Reset { - cs.Key = wildcard - cs.Value = wildcard - cs.Misc = wildcard - cs.Source = wildcard - cs.Message = wildcard - - cs.Trace = wildcard - cs.Debug = wildcard - cs.Warn = wildcard - cs.Info = wildcard - cs.Error = wildcard - } - - cs.Key = color("key") - cs.Value = color("value") - cs.Misc = color("misc") - cs.Source = color("source") - cs.Message = color("message") - - cs.Trace = color("TRC") - cs.Debug = color("DBG") - cs.Warn = color("WRN") - cs.Info = color("INF") - cs.Error = color("ERR") - return cs -} - -// HappyDevFormatter is the formatter used for terminals. It is -// colorful, dev friendly and provides meaningful logs when -// warnings and errors occur. -// -// HappyDevFormatter does not worry about performance. It's at least 3-4X -// slower than JSONFormatter since it delegates to JSONFormatter to marshal -// then unmarshal JSON. Then it does other stuff like read source files, sort -// keys all to give a developer more information. -// -// SHOULD NOT be used in production for extended period of time. However, it -// works fine in SSH terminals and binary deployments. -type HappyDevFormatter struct { - name string - col int - // always use the production formatter - jsonFormatter *JSONFormatter -} - -// NewHappyDevFormatter returns a new instance of HappyDevFormatter. -func NewHappyDevFormatter(name string) *HappyDevFormatter { - jf := NewJSONFormatter(name) - return &HappyDevFormatter{ - name: name, - jsonFormatter: jf, - } -} - -func (hd *HappyDevFormatter) writeKey(buf bufferWriter, key string) { - // assumes this is not the first key - hd.writeString(buf, Separator) - if key == "" { - return - } - buf.WriteString(theme.Key) - hd.writeString(buf, key) - hd.writeString(buf, AssignmentChar) - if !disableColors { - buf.WriteString(ansi.Reset) - } -} - -func (hd *HappyDevFormatter) set(buf bufferWriter, key string, value interface{}, color string) { - var str string - if s, ok := value.(string); ok { - str = s - } else if s, ok := value.(fmt.Stringer); ok { - str = s.String() - } else { - str = fmt.Sprintf("%v", value) - } - val := strings.Trim(str, "\n ") - if (isPretty && key != "") || hd.col+len(key)+2+len(val) >= maxCol { - buf.WriteString("\n") - hd.col = 0 - hd.writeString(buf, indent) - } - hd.writeKey(buf, key) - if color != "" { - buf.WriteString(color) - } - hd.writeString(buf, val) - if color != "" && !disableColors { - buf.WriteString(ansi.Reset) - } -} - -// Write a string and tracks the position of the string so we can break lines -// cleanly. Do not send ANSI escape sequences, just raw strings -func (hd *HappyDevFormatter) writeString(buf bufferWriter, s string) { - buf.WriteString(s) - hd.col += len(s) -} - -func (hd *HappyDevFormatter) getContext(color string) string { - if disableCallstack { - return "" - } - frames := parseDebugStack(string(debug.Stack()), 5, true) - if len(frames) == 0 { - return "" - } - for _, frame := range frames { - context := frame.String(color, theme.Source) - if context != "" { - return context - } - } - return "" -} - -func (hd *HappyDevFormatter) getLevelContext(level int, entry map[string]interface{}) (message string, context string, color string) { - - switch level { - case LevelTrace: - color = theme.Trace - context = hd.getContext(color) - context += "\n" - case LevelDebug: - color = theme.Debug - case LevelInfo: - color = theme.Info - // case LevelWarn: - // color = theme.Warn - // context = hd.getContext(color) - // context += "\n" - case LevelWarn, LevelError, LevelFatal: - - // warnings return an error but if it does not have an error - // then print line info only - if level == LevelWarn { - color = theme.Warn - kv := entry[KeyMap.CallStack] - if kv == nil { - context = hd.getContext(color) - context += "\n" - break - } - } else { - color = theme.Error - } - - if disableCallstack || contextLines == -1 { - context = trimDebugStack(string(debug.Stack())) - break - } - frames := parseLogxiStack(entry, 4, true) - if frames == nil { - frames = parseDebugStack(string(debug.Stack()), 4, true) - } - - if len(frames) == 0 { - break - } - errbuf := pool.Get() - defer pool.Put(errbuf) - lines := 0 - for _, frame := range frames { - err := frame.readSource(contextLines) - if err != nil { - // by setting to empty, the original stack is used - errbuf.Reset() - break - } - ctx := frame.String(color, theme.Source) - if ctx == "" { - continue - } - errbuf.WriteString(ctx) - errbuf.WriteRune('\n') - lines++ - } - context = errbuf.String() - default: - panic("should never get here") - } - return message, context, color -} - -// Format a log entry. -func (hd *HappyDevFormatter) Format(writer io.Writer, level int, msg string, args []interface{}) { - buf := pool.Get() - defer pool.Put(buf) - - if len(args) == 1 { - args = append(args, 0) - copy(args[1:], args[0:]) - args[0] = singleArgKey - } - - // warn about reserved, bad and complex keys - for i := 0; i < len(args); i += 2 { - isReserved, err := isReservedKey(args[i]) - if err != nil { - InternalLog.Error("Key is not a string.", "err", fmt.Errorf("args[%d]=%v", i, args[i])) - } else if isReserved { - InternalLog.Fatal("Key conflicts with reserved key. Avoiding using single rune keys.", "key", args[i].(string)) - } else { - // Ensure keys are simple strings. The JSONFormatter doesn't escape - // keys as a performance tradeoff. This panics if the JSON key - // value has a different value than a simple quoted string. - key := args[i].(string) - b, err := json.Marshal(key) - if err != nil { - panic("Key is invalid. " + err.Error()) - } - if string(b) != `"`+key+`"` { - panic("Key is complex. Use simpler key for: " + fmt.Sprintf("%q", key)) - } - } - } - - // use the production JSON formatter to format the log first. This - // ensures JSON will marshal/unmarshal correctly in production. - entry := hd.jsonFormatter.LogEntry(level, msg, args) - - // reset the column tracker used for fancy formatting - hd.col = 0 - - // timestamp - buf.WriteString(theme.Misc) - hd.writeString(buf, entry[KeyMap.Time].(string)) - if !disableColors { - buf.WriteString(ansi.Reset) - } - - // emphasize warnings and errors - message, context, color := hd.getLevelContext(level, entry) - if message == "" { - message = entry[KeyMap.Message].(string) - } - - // DBG, INF ... - hd.set(buf, "", entry[KeyMap.Level].(string), color) - // logger name - hd.set(buf, "", entry[KeyMap.Name], theme.Misc) - // message from user - hd.set(buf, "", message, theme.Message) - - // Preserve key order in the sequencethey were added by developer.This - // makes it easier for developers to follow the log. - order := []string{} - lenArgs := len(args) - for i := 0; i < len(args); i += 2 { - if i+1 >= lenArgs { - continue - } - if key, ok := args[i].(string); ok { - order = append(order, key) - } else { - order = append(order, badKeyAtIndex(i)) - } - } - - for _, key := range order { - // skip reserved keys which were already added to buffer above - isReserved, err := isReservedKey(key) - if err != nil { - panic("key is invalid. Should never get here. " + err.Error()) - } else if isReserved { - continue - } - hd.set(buf, key, entry[key], theme.Value) - } - - addLF := true - hasCallStack := entry[KeyMap.CallStack] != nil - // WRN,ERR file, line number context - - if context != "" { - // warnings and traces are single line, space can be optimized - if level == LevelTrace || (level == LevelWarn && !hasCallStack) { - // gets rid of "in " - idx := strings.IndexRune(context, 'n') - hd.set(buf, "in", context[idx+2:], color) - } else { - buf.WriteRune('\n') - if !disableColors { - buf.WriteString(color) - } - addLF = context[len(context)-1:len(context)] != "\n" - buf.WriteString(context) - if !disableColors { - buf.WriteString(ansi.Reset) - } - } - } else if hasCallStack { - hd.set(buf, "", entry[KeyMap.CallStack], color) - } - if addLF { - buf.WriteRune('\n') - } - buf.WriteTo(writer) -} diff --git a/vendor/github.com/mgutz/logxi/v1/init.go b/vendor/github.com/mgutz/logxi/v1/init.go deleted file mode 100644 index 57c914049c..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/init.go +++ /dev/null @@ -1,200 +0,0 @@ -package log - -import ( - "fmt" - "io" - "os" - "runtime" - "strconv" - "sync" - - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" -) - -// scream so user fixes it -const warnImbalancedKey = "FIX_IMBALANCED_PAIRS" -const warnImbalancedPairs = warnImbalancedKey + " => " -const singleArgKey = "_" - -func badKeyAtIndex(i int) string { - return "BAD_KEY_AT_INDEX_" + strconv.Itoa(i) -} - -// DefaultLogLog is the default log for this package. -var DefaultLog Logger - -// Suppress supresses logging and is useful to supress output in -// in unit tests. -// -// Example -// log.Suppress(true) -// defer log.suppress(false) -func Suppress(quiet bool) { - silent = quiet -} - -var silent bool - -// internalLog is the logger used by logxi itself -var InternalLog Logger - -type loggerMap struct { - sync.Mutex - loggers map[string]Logger -} - -var loggers = &loggerMap{ - loggers: map[string]Logger{}, -} - -func (lm *loggerMap) set(name string, logger Logger) { - lm.loggers[name] = logger -} - -// The assignment character between key-value pairs -var AssignmentChar = ": " - -// Separator is the separator to use between key value pairs -//var Separator = "{~}" -var Separator = " " - -const ltsvAssignmentChar = ":" -const ltsvSeparator = "\t" - -// logxiEnabledMap maps log name patterns to levels -var logxiNameLevelMap map[string]int - -// logxiFormat is the formatter kind to create -var logxiFormat string - -var colorableStdout io.Writer -var defaultContextLines = 2 -var defaultFormat string -var defaultLevel int -var defaultLogxiEnv string -var defaultLogxiFormatEnv string -var defaultMaxCol = 80 -var defaultPretty = false -var defaultLogxiColorsEnv string -var defaultTimeFormat string -var disableCallstack bool -var disableCheckKeys bool -var disableColors bool -var home string -var isPretty bool -var isTerminal bool -var isWindows = runtime.GOOS == "windows" -var pkgMutex sync.Mutex -var pool = NewBufferPool() -var timeFormat string -var wd string -var pid = os.Getpid() -var pidStr = strconv.Itoa(os.Getpid()) - -// KeyMapping is the key map used to print built-in log entry fields. -type KeyMapping struct { - Level string - Message string - Name string - PID string - Time string - CallStack string -} - -// KeyMap is the key map to use when printing log statements. -var KeyMap = &KeyMapping{ - Level: "_l", - Message: "_m", - Name: "_n", - PID: "_p", - Time: "_t", - CallStack: "_c", -} - -var logxiKeys []string - -func setDefaults(isTerminal bool) { - var err error - contextLines = defaultContextLines - wd, err = os.Getwd() - if err != nil { - InternalLog.Error("Could not get working directory") - } - - logxiKeys = []string{KeyMap.Level, KeyMap.Message, KeyMap.Name, KeyMap.Time, KeyMap.CallStack, KeyMap.PID} - - if isTerminal { - defaultLogxiEnv = "*=WRN" - defaultLogxiFormatEnv = "happy,fit,maxcol=80,t=15:04:05.000000,context=-1" - defaultFormat = FormatHappy - defaultLevel = LevelWarn - defaultTimeFormat = "15:04:05.000000" - } else { - defaultLogxiEnv = "*=ERR" - defaultLogxiFormatEnv = "JSON,t=2006-01-02T15:04:05-0700" - defaultFormat = FormatJSON - defaultLevel = LevelError - defaultTimeFormat = "2006-01-02T15:04:05-0700" - disableColors = true - } - - if isWindows { - home = os.Getenv("HOMEPATH") - if os.Getenv("ConEmuANSI") == "ON" { - defaultLogxiColorsEnv = "key=cyan+h,value,misc=blue+h,source=yellow,TRC,DBG,WRN=yellow+h,INF=green+h,ERR=red+h" - } else { - colorableStdout = NewConcurrentWriter(colorable.NewColorableStdout()) - defaultLogxiColorsEnv = "ERR=red,misc=cyan,key=cyan" - } - // DefaultScheme is a color scheme optimized for dark background - // but works well with light backgrounds - } else { - home = os.Getenv("HOME") - term := os.Getenv("TERM") - if term == "xterm-256color" { - defaultLogxiColorsEnv = "key=cyan+h,value,misc=blue,source=88,TRC,DBG,WRN=yellow,INF=green+h,ERR=red+h,message=magenta+h" - } else { - defaultLogxiColorsEnv = "key=cyan+h,value,misc=blue,source=magenta,TRC,DBG,WRN=yellow,INF=green,ERR=red+h" - } - } -} - -func isReservedKey(k interface{}) (bool, error) { - key, ok := k.(string) - if !ok { - return false, fmt.Errorf("Key is not a string") - } - - // check if reserved - for _, key2 := range logxiKeys { - if key == key2 { - return true, nil - } - } - return false, nil -} - -func init() { - colorableStdout = NewConcurrentWriter(os.Stdout) - - isTerminal = isatty.IsTerminal(os.Stdout.Fd()) - - // the internal logger to report errors - if isTerminal { - InternalLog = NewLogger3(NewConcurrentWriter(os.Stdout), "__logxi", NewTextFormatter("__logxi")) - } else { - InternalLog = NewLogger3(NewConcurrentWriter(os.Stdout), "__logxi", NewJSONFormatter("__logxi")) - } - InternalLog.SetLevel(LevelError) - - setDefaults(isTerminal) - - RegisterFormatFactory(FormatHappy, formatFactory) - RegisterFormatFactory(FormatText, formatFactory) - RegisterFormatFactory(FormatJSON, formatFactory) - ProcessEnv(readFromEnviron()) - - // package logger for users - DefaultLog = New("~") -} diff --git a/vendor/github.com/mgutz/logxi/v1/jsonFormatter.go b/vendor/github.com/mgutz/logxi/v1/jsonFormatter.go deleted file mode 100644 index b21dd08ca3..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/jsonFormatter.go +++ /dev/null @@ -1,205 +0,0 @@ -package log - -import ( - "encoding/json" - "fmt" - "io" - "reflect" - "runtime/debug" - "strconv" - "time" -) - -type bufferWriter interface { - Write(p []byte) (nn int, err error) - WriteRune(r rune) (n int, err error) - WriteString(s string) (n int, err error) -} - -// JSONFormatter is a fast, efficient JSON formatter optimized for logging. -// -// * log entry keys are not escaped -// Who uses complex keys when coding? Checked by HappyDevFormatter in case user does. -// Nested object keys are escaped by json.Marshal(). -// * Primitive types uses strconv -// * Logger reserved key values (time, log name, level) require no conversion -// * sync.Pool buffer for bytes.Buffer -type JSONFormatter struct { - name string -} - -// NewJSONFormatter creates a new instance of JSONFormatter. -func NewJSONFormatter(name string) *JSONFormatter { - return &JSONFormatter{name: name} -} - -func (jf *JSONFormatter) writeString(buf bufferWriter, s string) { - b, err := json.Marshal(s) - if err != nil { - InternalLog.Error("Could not json.Marshal string.", "str", s) - buf.WriteString(`"Could not marshal this key's string"`) - return - } - buf.Write(b) -} - -func (jf *JSONFormatter) writeError(buf bufferWriter, err error) { - jf.writeString(buf, err.Error()) - jf.set(buf, KeyMap.CallStack, string(debug.Stack())) - return -} - -func (jf *JSONFormatter) appendValue(buf bufferWriter, val interface{}) { - if val == nil { - buf.WriteString("null") - return - } - - // always show error stack even at cost of some performance. there's - // nothing worse than looking at production logs without a clue - if err, ok := val.(error); ok { - jf.writeError(buf, err) - return - } - - value := reflect.ValueOf(val) - kind := value.Kind() - if kind == reflect.Ptr { - if value.IsNil() { - buf.WriteString("null") - return - } - value = value.Elem() - kind = value.Kind() - } - switch kind { - case reflect.Bool: - if value.Bool() { - buf.WriteString("true") - } else { - buf.WriteString("false") - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - buf.WriteString(strconv.FormatInt(value.Int(), 10)) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - buf.WriteString(strconv.FormatUint(value.Uint(), 10)) - - case reflect.Float32: - buf.WriteString(strconv.FormatFloat(value.Float(), 'g', -1, 32)) - - case reflect.Float64: - buf.WriteString(strconv.FormatFloat(value.Float(), 'g', -1, 64)) - - default: - var err error - var b []byte - if stringer, ok := val.(fmt.Stringer); ok { - b, err = json.Marshal(stringer.String()) - } else { - b, err = json.Marshal(val) - } - - if err != nil { - InternalLog.Error("Could not json.Marshal value: ", "formatter", "JSONFormatter", "err", err.Error()) - if s, ok := val.(string); ok { - b, err = json.Marshal(s) - } else if s, ok := val.(fmt.Stringer); ok { - b, err = json.Marshal(s.String()) - } else { - b, err = json.Marshal(fmt.Sprintf("%#v", val)) - } - - if err != nil { - // should never get here, but JSONFormatter should never panic - msg := "Could not Sprintf value" - InternalLog.Error(msg) - buf.WriteString(`"` + msg + `"`) - return - } - } - buf.Write(b) - } -} - -func (jf *JSONFormatter) set(buf bufferWriter, key string, val interface{}) { - // WARNING: assumes this is not first key - buf.WriteString(`, "`) - buf.WriteString(key) - buf.WriteString(`":`) - jf.appendValue(buf, val) -} - -// Format formats log entry as JSON. -func (jf *JSONFormatter) Format(writer io.Writer, level int, msg string, args []interface{}) { - buf := pool.Get() - defer pool.Put(buf) - - const lead = `", "` - const colon = `":"` - - buf.WriteString(`{"`) - buf.WriteString(KeyMap.Time) - buf.WriteString(`":"`) - buf.WriteString(time.Now().Format(timeFormat)) - - buf.WriteString(`", "`) - buf.WriteString(KeyMap.PID) - buf.WriteString(`":"`) - buf.WriteString(pidStr) - - buf.WriteString(`", "`) - buf.WriteString(KeyMap.Level) - buf.WriteString(`":"`) - buf.WriteString(LevelMap[level]) - - buf.WriteString(`", "`) - buf.WriteString(KeyMap.Name) - buf.WriteString(`":"`) - buf.WriteString(jf.name) - - buf.WriteString(`", "`) - buf.WriteString(KeyMap.Message) - buf.WriteString(`":`) - jf.appendValue(buf, msg) - - var lenArgs = len(args) - if lenArgs > 0 { - if lenArgs == 1 { - jf.set(buf, singleArgKey, args[0]) - } else if lenArgs%2 == 0 { - for i := 0; i < lenArgs; i += 2 { - if key, ok := args[i].(string); ok { - if key == "" { - // show key is invalid - jf.set(buf, badKeyAtIndex(i), args[i+1]) - } else { - jf.set(buf, key, args[i+1]) - } - } else { - // show key is invalid - jf.set(buf, badKeyAtIndex(i), args[i+1]) - } - } - } else { - jf.set(buf, warnImbalancedKey, args) - } - } - buf.WriteString("}\n") - buf.WriteTo(writer) -} - -// LogEntry returns the JSON log entry object built by Format(). Used by -// HappyDevFormatter to ensure any data logged while developing properly -// logs in production. -func (jf *JSONFormatter) LogEntry(level int, msg string, args []interface{}) map[string]interface{} { - buf := pool.Get() - defer pool.Put(buf) - jf.Format(buf, level, msg, args) - var entry map[string]interface{} - err := json.Unmarshal(buf.Bytes(), &entry) - if err != nil { - panic("Unable to unmarhsal entry from JSONFormatter: " + err.Error() + " \"" + string(buf.Bytes()) + "\"") - } - return entry -} diff --git a/vendor/github.com/mgutz/logxi/v1/logger.go b/vendor/github.com/mgutz/logxi/v1/logger.go deleted file mode 100644 index 113a38ace2..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/logger.go +++ /dev/null @@ -1,153 +0,0 @@ -package log - -/* -http://en.wikipedia.org/wiki/Syslog - -Code Severity Keyword -0 Emergency emerg (panic) System is unusable. - - A "panic" condition usually affecting multiple apps/servers/sites. At this - level it would usually notify all tech staff on call. - -1 Alert alert Action must be taken immediately. - - Should be corrected immediately, therefore notify staff who can fix the - problem. An example would be the loss of a primary ISP connection. - -2 Critical crit Critical conditions. - - Should be corrected immediately, but indicates failure in a secondary - system, an example is a loss of a backup ISP connection. - -3 Error err (error) Error conditions. - - Non-urgent failures, these should be relayed to developers or admins; each - item must be resolved within a given time. - -4 Warning warning (warn) Warning conditions. - - Warning messages, not an error, but indication that an error will occur if - action is not taken, e.g. file system 85% full - each item must be resolved - within a given time. - -5 Notice notice Normal but significant condition. - - Events that are unusual but not error conditions - might be summarized in - an email to developers or admins to spot potential problems - no immediate - action required. - -6 Informational info Informational messages. - - Normal operational messages - may be harvested for reporting, measuring - throughput, etc. - no action required. - -7 Debug debug Debug-level messages. - - Info useful to developers for debugging the application, not useful during operations. -*/ - -const ( - // LevelEnv chooses level from LOGXI environment variable or defaults - // to LevelInfo - LevelEnv = -10000 - - // LevelOff means logging is disabled for logger. This should always - // be first - LevelOff = -1000 - - // LevelEmergency is usually 0 but that is also the "zero" value - // for Go, which means whenever we do any lookup in string -> int - // map 0 is returned (not good). - LevelEmergency = -1 - - // LevelAlert means action must be taken immediately. - LevelAlert = 1 - - // LevelFatal means it should be corrected immediately, eg cannot connect to database. - LevelFatal = 2 - - // LevelCritical is alias for LevelFatal - LevelCritical = 2 - - // LevelError is a non-urgen failure to notify devlopers or admins - LevelError = 3 - - // LevelWarn indiates an error will occur if action is not taken, eg file system 85% full - LevelWarn = 4 - - // LevelNotice is normal but significant condition. - LevelNotice = 5 - - // LevelInfo is info level - LevelInfo = 6 - - // LevelDebug is debug level - LevelDebug = 7 - - // LevelTrace is trace level and displays file and line in terminal - LevelTrace = 10 - - // LevelAll is all levels - LevelAll = 1000 -) - -// FormatHappy uses HappyDevFormatter -const FormatHappy = "happy" - -// FormatText uses TextFormatter -const FormatText = "text" - -// FormatJSON uses JSONFormatter -const FormatJSON = "JSON" - -// FormatEnv selects formatter based on LOGXI_FORMAT environment variable -const FormatEnv = "" - -// LevelMap maps int enums to string level. -var LevelMap = map[int]string{ - LevelFatal: "FTL", - LevelError: "ERR", - LevelWarn: "WRN", - LevelInfo: "INF", - LevelDebug: "DBG", - LevelTrace: "TRC", -} - -// LevelMap maps int enums to string level. -var LevelAtoi = map[string]int{ - "OFF": LevelOff, - "FTL": LevelFatal, - "ERR": LevelError, - "WRN": LevelWarn, - "INF": LevelInfo, - "DBG": LevelDebug, - "TRC": LevelTrace, - "ALL": LevelAll, - - "off": LevelOff, - "fatal": LevelFatal, - "error": LevelError, - "warn": LevelWarn, - "info": LevelInfo, - "debug": LevelDebug, - "trace": LevelTrace, - "all": LevelAll, -} - -// Logger is the interface for logging. -type Logger interface { - Trace(msg string, args ...interface{}) - Debug(msg string, args ...interface{}) - Info(msg string, args ...interface{}) - Warn(msg string, args ...interface{}) error - Error(msg string, args ...interface{}) error - Fatal(msg string, args ...interface{}) - Log(level int, msg string, args []interface{}) - - SetLevel(int) - IsTrace() bool - IsDebug() bool - IsInfo() bool - IsWarn() bool - // Error, Fatal not needed, those SHOULD always be logged -} diff --git a/vendor/github.com/mgutz/logxi/v1/methods.go b/vendor/github.com/mgutz/logxi/v1/methods.go deleted file mode 100644 index 7297b90c12..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/methods.go +++ /dev/null @@ -1,51 +0,0 @@ -package log - -// Trace logs a trace statement. On terminals file and line number are logged. -func Trace(msg string, args ...interface{}) { - DefaultLog.Trace(msg, args...) -} - -// Debug logs a debug statement. -func Debug(msg string, args ...interface{}) { - DefaultLog.Debug(msg, args...) -} - -// Info logs an info statement. -func Info(msg string, args ...interface{}) { - DefaultLog.Info(msg, args...) -} - -// Warn logs a warning statement. On terminals it logs file and line number. -func Warn(msg string, args ...interface{}) { - DefaultLog.Warn(msg, args...) -} - -// Error logs an error statement with callstack. -func Error(msg string, args ...interface{}) { - DefaultLog.Error(msg, args...) -} - -// Fatal logs a fatal statement. -func Fatal(msg string, args ...interface{}) { - DefaultLog.Fatal(msg, args...) -} - -// IsTrace determines if this logger logs a trace statement. -func IsTrace() bool { - return DefaultLog.IsTrace() -} - -// IsDebug determines if this logger logs a debug statement. -func IsDebug() bool { - return DefaultLog.IsDebug() -} - -// IsInfo determines if this logger logs an info statement. -func IsInfo() bool { - return DefaultLog.IsInfo() -} - -// IsWarn determines if this logger logs a warning statement. -func IsWarn() bool { - return DefaultLog.IsWarn() -} diff --git a/vendor/github.com/mgutz/logxi/v1/nullLogger.go b/vendor/github.com/mgutz/logxi/v1/nullLogger.go deleted file mode 100644 index 8da9187558..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/nullLogger.go +++ /dev/null @@ -1,66 +0,0 @@ -package log - -// NullLog is a noop logger. Think of it as /dev/null. -var NullLog = &NullLogger{} - -// NullLogger is the default logger for this package. -type NullLogger struct{} - -// Trace logs a debug entry. -func (l *NullLogger) Trace(msg string, args ...interface{}) { -} - -// Debug logs a debug entry. -func (l *NullLogger) Debug(msg string, args ...interface{}) { -} - -// Info logs an info entry. -func (l *NullLogger) Info(msg string, args ...interface{}) { -} - -// Warn logs a warn entry. -func (l *NullLogger) Warn(msg string, args ...interface{}) error { - return nil -} - -// Error logs an error entry. -func (l *NullLogger) Error(msg string, args ...interface{}) error { - return nil -} - -// Fatal logs a fatal entry then panics. -func (l *NullLogger) Fatal(msg string, args ...interface{}) { - panic("exit due to fatal error") -} - -// Log logs a leveled entry. -func (l *NullLogger) Log(level int, msg string, args []interface{}) { -} - -// IsTrace determines if this logger logs a trace statement. -func (l *NullLogger) IsTrace() bool { - return false -} - -// IsDebug determines if this logger logs a debug statement. -func (l *NullLogger) IsDebug() bool { - return false -} - -// IsInfo determines if this logger logs an info statement. -func (l *NullLogger) IsInfo() bool { - return false -} - -// IsWarn determines if this logger logs a warning statement. -func (l *NullLogger) IsWarn() bool { - return false -} - -// SetLevel sets the level of this logger. -func (l *NullLogger) SetLevel(level int) { -} - -// SetFormatter set the formatter for this logger. -func (l *NullLogger) SetFormatter(formatter Formatter) { -} diff --git a/vendor/github.com/mgutz/logxi/v1/pool.go b/vendor/github.com/mgutz/logxi/v1/pool.go deleted file mode 100644 index 3f06bfedcc..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/pool.go +++ /dev/null @@ -1,29 +0,0 @@ -package log - -import ( - "bytes" - "sync" -) - -type BufferPool struct { - sync.Pool -} - -func NewBufferPool() *BufferPool { - return &BufferPool{ - Pool: sync.Pool{New: func() interface{} { - b := bytes.NewBuffer(make([]byte, 128)) - b.Reset() - return b - }}, - } -} - -func (bp *BufferPool) Get() *bytes.Buffer { - return bp.Pool.Get().(*bytes.Buffer) -} - -func (bp *BufferPool) Put(b *bytes.Buffer) { - b.Reset() - bp.Pool.Put(b) -} diff --git a/vendor/github.com/mgutz/logxi/v1/textFormatter.go b/vendor/github.com/mgutz/logxi/v1/textFormatter.go deleted file mode 100644 index f5be9ad404..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/textFormatter.go +++ /dev/null @@ -1,107 +0,0 @@ -package log - -import ( - "fmt" - "io" - "runtime/debug" - "time" -) - -// Formatter records log entries. -type Formatter interface { - Format(writer io.Writer, level int, msg string, args []interface{}) -} - -// TextFormatter is the default recorder used if one is unspecified when -// creating a new Logger. -type TextFormatter struct { - name string - itoaLevelMap map[int]string - timeLabel string -} - -// NewTextFormatter returns a new instance of TextFormatter. SetName -// must be called befored using it. -func NewTextFormatter(name string) *TextFormatter { - timeLabel := KeyMap.Time + AssignmentChar - levelLabel := Separator + KeyMap.Level + AssignmentChar - messageLabel := Separator + KeyMap.Message + AssignmentChar - nameLabel := Separator + KeyMap.Name + AssignmentChar - pidLabel := Separator + KeyMap.PID + AssignmentChar - - var buildKV = func(level string) string { - buf := pool.Get() - defer pool.Put(buf) - - buf.WriteString(pidLabel) - buf.WriteString(pidStr) - - //buf.WriteString(Separator) - buf.WriteString(nameLabel) - buf.WriteString(name) - - //buf.WriteString(Separator) - buf.WriteString(levelLabel) - buf.WriteString(level) - - //buf.WriteString(Separator) - buf.WriteString(messageLabel) - - return buf.String() - } - itoaLevelMap := map[int]string{ - LevelDebug: buildKV(LevelMap[LevelDebug]), - LevelWarn: buildKV(LevelMap[LevelWarn]), - LevelInfo: buildKV(LevelMap[LevelInfo]), - LevelError: buildKV(LevelMap[LevelError]), - LevelFatal: buildKV(LevelMap[LevelFatal]), - } - return &TextFormatter{itoaLevelMap: itoaLevelMap, name: name, timeLabel: timeLabel} -} - -func (tf *TextFormatter) set(buf bufferWriter, key string, val interface{}) { - buf.WriteString(Separator) - buf.WriteString(key) - buf.WriteString(AssignmentChar) - if err, ok := val.(error); ok { - buf.WriteString(err.Error()) - buf.WriteRune('\n') - buf.WriteString(string(debug.Stack())) - return - } - buf.WriteString(fmt.Sprintf("%v", val)) -} - -// Format records a log entry. -func (tf *TextFormatter) Format(writer io.Writer, level int, msg string, args []interface{}) { - buf := pool.Get() - defer pool.Put(buf) - buf.WriteString(tf.timeLabel) - buf.WriteString(time.Now().Format(timeFormat)) - buf.WriteString(tf.itoaLevelMap[level]) - buf.WriteString(msg) - var lenArgs = len(args) - if lenArgs > 0 { - if lenArgs == 1 { - tf.set(buf, singleArgKey, args[0]) - } else if lenArgs%2 == 0 { - for i := 0; i < lenArgs; i += 2 { - if key, ok := args[i].(string); ok { - if key == "" { - // show key is invalid - tf.set(buf, badKeyAtIndex(i), args[i+1]) - } else { - tf.set(buf, key, args[i+1]) - } - } else { - // show key is invalid - tf.set(buf, badKeyAtIndex(i), args[i+1]) - } - } - } else { - tf.set(buf, warnImbalancedKey, args) - } - } - buf.WriteRune('\n') - buf.WriteTo(writer) -} diff --git a/vendor/github.com/mgutz/logxi/v1/util.go b/vendor/github.com/mgutz/logxi/v1/util.go deleted file mode 100644 index 22f3130212..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/util.go +++ /dev/null @@ -1,53 +0,0 @@ -package log - -import ( - "path/filepath" - "strings" -) - -func expandTabs(s string, tabLen int) string { - if s == "" { - return s - } - parts := strings.Split(s, "\t") - buf := pool.Get() - defer pool.Put(buf) - for _, part := range parts { - buf.WriteString(part) - buf.WriteString(strings.Repeat(" ", tabLen-len(part)%tabLen)) - } - return buf.String() -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func indexOfNonSpace(s string) int { - if s == "" { - return -1 - } - for i, r := range s { - if r != ' ' { - return i - } - } - return -1 -} - -var inLogxiPath = filepath.Join("mgutz", "logxi", "v"+strings.Split(Version, ".")[0]) - -func isLogxiCode(filename string) bool { - // need to see errors in tests - return strings.HasSuffix(filepath.Dir(filename), inLogxiPath) && - !strings.HasSuffix(filename, "_test.go") -} diff --git a/vendor/github.com/mgutz/logxi/v1/version.go b/vendor/github.com/mgutz/logxi/v1/version.go deleted file mode 100644 index a7ec7b0e78..0000000000 --- a/vendor/github.com/mgutz/logxi/v1/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package log - -// Version is the version of this package -const Version = "1.0.0-pre" diff --git a/vendor/vendor.json b/vendor/vendor.json index 86627e54ea..749c8f3934 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1494,12 +1494,6 @@ "revision": "9520e82c474b0a04dd04f8a40959027271bab992", "revisionTime": "2017-02-06T15:57:36Z" }, - { - "checksumSHA1": "DUdWW5gRHBk2zTxGQZoH1KLxFtc=", - "path": "github.com/mgutz/logxi/v1", - "revision": "aebf8a7d67ab4625e0fd4a665766fef9a709161b", - "revisionTime": "2016-10-27T14:08:23Z" - }, { "checksumSHA1": "0sodX5Pa1TxO+QslATEY81VGklc=", "path": "github.com/michaelklishin/rabbit-hole",