diff --git a/physical/alicloudoss/alicloudoss.go b/physical/alicloudoss/alicloudoss.go index c58ece46b9..40f3da6d56 100644 --- a/physical/alicloudoss/alicloudoss.go +++ b/physical/alicloudoss/alicloudoss.go @@ -14,7 +14,6 @@ import ( "github.com/aliyun/aliyun-oss-go-sdk/oss" "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/physical" ) @@ -92,7 +91,7 @@ func NewAliCloudOSSBackend(conf map[string]string, logger log.Logger) (physical. _, err = bucketObj.ListObjects() if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("unable to access bucket %q at endpoint %q: {{err}}", bucket, endpoint), err) + return nil, fmt.Errorf("unable to access bucket %q at endpoint %q: %w", bucket, endpoint, err) } maxParStr, ok := conf["max_parallel"] @@ -100,7 +99,7 @@ func NewAliCloudOSSBackend(conf map[string]string, logger log.Logger) (physical. if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) diff --git a/physical/azure/azure.go b/physical/azure/azure.go index 4def98ce99..9b8e92dff8 100644 --- a/physical/azure/azure.go +++ b/physical/azure/azure.go @@ -16,7 +16,6 @@ import ( "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/physical" @@ -90,16 +89,12 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen if environmentURL != "" { environment, err = azure.EnvironmentFromURL(environmentURL) if err != nil { - errorMsg := fmt.Sprintf("failed to look up Azure environment descriptor for URL %q: {{err}}", - environmentURL) - return nil, errwrap.Wrapf(errorMsg, err) + return nil, fmt.Errorf("failed to look up Azure environment descriptor for URL %q: %w", environmentURL, err) } } else { environment, err = azure.EnvironmentFromName(environmentName) if err != nil { - errorMsg := fmt.Sprintf("failed to look up Azure environment descriptor for name %q: {{err}}", - environmentName) - return nil, errwrap.Wrapf(errorMsg, err) + return nil, fmt.Errorf("failed to look up Azure environment descriptor for name %q: %w", environmentName, err) } } @@ -107,9 +102,7 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen if useMSI { authToken, err := getAuthTokenFromIMDS(environment.ResourceIdentifiers.Storage) if err != nil { - errorMsg := fmt.Sprintf("failed to obtain auth token from IMDS %q: {{err}}", - environmentName) - return nil, errwrap.Wrapf(errorMsg, err) + return nil, fmt.Errorf("failed to obtain auth token from IMDS %q: %w", environmentName, err) } credential = azblob.NewTokenCredential(authToken.OAuthToken(), func(c azblob.TokenCredential) time.Duration { @@ -134,14 +127,14 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen } else { credential, err = azblob.NewSharedKeyCredential(accountName, accountKey) if err != nil { - return nil, errwrap.Wrapf("failed to create Azure client: {{err}}", err) + return nil, fmt.Errorf("failed to create Azure client: %w", err) } } URL, err := url.Parse( fmt.Sprintf("https://%s.blob.%s/%s", accountName, environment.StorageEndpointSuffix, name)) if err != nil { - return nil, errwrap.Wrapf("failed to create Azure client: {{err}}", err) + return nil, fmt.Errorf("failed to create Azure client: %w", err) } p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) @@ -158,10 +151,10 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen case azblob.ServiceCodeContainerNotFound: _, err := containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to create %q container: {{err}}", name), err) + return nil, fmt.Errorf("failed to create %q container: %w", name, err) } default: - return nil, errwrap.Wrapf(fmt.Sprintf("failed to get properties for container %q: {{err}}", name), err) + return nil, fmt.Errorf("failed to get properties for container %q: %w", name, err) } } } @@ -171,7 +164,7 @@ func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backen if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) @@ -221,7 +214,7 @@ func (a *AzureBackend) Get(ctx context.Context, key string) (*physical.Entry, er case azblob.ServiceCodeBlobNotFound: return nil, nil default: - return nil, errwrap.Wrapf(fmt.Sprintf("failed to download blob %q: {{err}}", key), err) + return nil, fmt.Errorf("failed to download blob %q: %w", key, err) } } return nil, err @@ -256,7 +249,7 @@ func (a *AzureBackend) Delete(ctx context.Context, key string) error { case azblob.ServiceCodeBlobNotFound: return nil default: - return errwrap.Wrapf(fmt.Sprintf("failed to delete blob %q: {{err}}", key), err) + return fmt.Errorf("failed to delete blob %q: %w", key, err) } } } diff --git a/physical/cassandra/cassandra.go b/physical/cassandra/cassandra.go index 93c5721ebd..f20b992055 100644 --- a/physical/cassandra/cassandra.go +++ b/physical/cassandra/cassandra.go @@ -12,7 +12,6 @@ import ( metrics "github.com/armon/go-metrics" "github.com/gocql/gocql" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/physical" @@ -169,11 +168,11 @@ func setupCassandraTLS(conf map[string]string, cluster *gocql.ClusterConfig) err if pemBundlePath, ok := conf["pem_bundle_file"]; ok { pemBundleData, err := ioutil.ReadFile(pemBundlePath) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("error reading pem bundle from %q: {{err}}", pemBundlePath), err) + return fmt.Errorf("error reading pem bundle from %q: %w", pemBundlePath, err) } pemBundle, err := certutil.ParsePEMBundle(string(pemBundleData)) if err != nil { - return errwrap.Wrapf("error parsing 'pem_bundle': {{err}}", err) + return fmt.Errorf("error parsing 'pem_bundle': %w", err) } tlsConfig, err = pemBundle.GetTLSConfig(certutil.TLSClient) if err != nil { @@ -182,7 +181,7 @@ func setupCassandraTLS(conf map[string]string, cluster *gocql.ClusterConfig) err } else if pemJSONPath, ok := conf["pem_json_file"]; ok { pemJSONData, err := ioutil.ReadFile(pemJSONPath) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("error reading json bundle from %q: {{err}}", pemJSONPath), err) + return fmt.Errorf("error reading json bundle from %q: %w", pemJSONPath, err) } pemJSON, err := certutil.ParsePKIJSON([]byte(pemJSONData)) if err != nil { diff --git a/physical/cockroachdb/cockroachdb.go b/physical/cockroachdb/cockroachdb.go index 587146f2a5..ee91dbfb40 100644 --- a/physical/cockroachdb/cockroachdb.go +++ b/physical/cockroachdb/cockroachdb.go @@ -12,7 +12,6 @@ import ( metrics "github.com/armon/go-metrics" "github.com/cockroachdb/cockroach-go/crdb" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/sdk/helper/strutil" @@ -59,7 +58,7 @@ func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical. err := validateDBTable(dbTable) if err != nil { - return nil, errwrap.Wrapf("invalid table: {{err}}", err) + return nil, fmt.Errorf("invalid table: %w", err) } maxParStr, ok := conf["max_parallel"] @@ -67,7 +66,7 @@ func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical. if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) @@ -77,14 +76,14 @@ func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical. // Create CockroachDB handle for the database. db, err := sql.Open("postgres", connURL) if err != nil { - return nil, errwrap.Wrapf("failed to connect to cockroachdb: {{err}}", err) + return nil, fmt.Errorf("failed to connect to cockroachdb: %w", err) } // Create the required table if it doesn't exists. createQuery := "CREATE TABLE IF NOT EXISTS " + dbTable + " (path STRING, value BYTES, PRIMARY KEY (path))" if _, err := db.Exec(createQuery); err != nil { - return nil, errwrap.Wrapf("failed to create mysql table: {{err}}", err) + return nil, fmt.Errorf("failed to create mysql table: %w", err) } // Setup the backend @@ -117,7 +116,7 @@ func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical. func (c *CockroachDBBackend) prepare(name, query string) error { stmt, err := c.client.Prepare(query) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("failed to prepare %q: {{err}}", name), err) + return fmt.Errorf("failed to prepare %q: %w", name, err) } c.statements[name] = stmt return nil @@ -194,7 +193,7 @@ func (c *CockroachDBBackend) List(ctx context.Context, prefix string) ([]string, var key string err = rows.Scan(&key) if err != nil { - return nil, errwrap.Wrapf("failed to scan rows: {{err}}", err) + return nil, fmt.Errorf("failed to scan rows: %w", err) } key = strings.TrimPrefix(key, prefix) diff --git a/physical/consul/consul.go b/physical/consul/consul.go index ae0703e4ad..814a341117 100644 --- a/physical/consul/consul.go +++ b/physical/consul/consul.go @@ -11,7 +11,6 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/consul/api" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/sdk/helper/consts" @@ -81,7 +80,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe if ok { _, err := parseutil.ParseDurationSecond(sessionTTLStr) if err != nil { - return nil, errwrap.Wrapf("invalid session_ttl: {{err}}", err) + return nil, fmt.Errorf("invalid session_ttl: %w", err) } sessionTTL = sessionTTLStr if logger.IsDebug() { @@ -94,7 +93,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe if ok { d, err := parseutil.ParseDurationSecond(lockWaitTimeRaw) if err != nil { - return nil, errwrap.Wrapf("invalid lock_wait_time: {{err}}", err) + return nil, fmt.Errorf("invalid lock_wait_time: %w", err) } lockWaitTime = d if logger.IsDebug() { @@ -107,7 +106,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe if ok { maxParInt, err := strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) @@ -135,7 +134,7 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe consulConf.HttpClient = &http.Client{Transport: consulConf.Transport} client, err := api.NewClient(consulConf) if err != nil { - return nil, errwrap.Wrapf("client setup failed: {{err}}", err) + return nil, fmt.Errorf("client setup failed: %w", err) } // Setup the backend @@ -249,7 +248,7 @@ func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEnt ok, resp, _, err := c.kv.Txn(ops, queryOpts) if err != nil { if strings.Contains(err.Error(), "is too large") { - return errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", physical.ErrValueTooLarge), err) + return fmt.Errorf("%s: %w", physical.ErrValueTooLarge, err) } return err } @@ -283,7 +282,7 @@ func (c *ConsulBackend) Put(ctx context.Context, entry *physical.Entry) error { _, err := c.kv.Put(pair, writeOpts) if err != nil { if strings.Contains(err.Error(), "Value exceeds") { - return errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", physical.ErrValueTooLarge), err) + return fmt.Errorf("%s: %w", physical.ErrValueTooLarge, err) } return err } @@ -372,7 +371,7 @@ func (c *ConsulBackend) LockWith(key, value string) (physical.Lock, error) { } lock, err := c.client.LockOpts(opts) if err != nil { - return nil, errwrap.Wrapf("failed to create lock: {{err}}", err) + return nil, fmt.Errorf("failed to create lock: %w", err) } cl := &ConsulLock{ client: c.client, diff --git a/physical/couchdb/couchdb.go b/physical/couchdb/couchdb.go index f5bf6b77f0..86fc139ed9 100644 --- a/physical/couchdb/couchdb.go +++ b/physical/couchdb/couchdb.go @@ -14,7 +14,6 @@ import ( "time" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/physical" @@ -179,7 +178,7 @@ func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBac if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) diff --git a/physical/dynamodb/dynamodb.go b/physical/dynamodb/dynamodb.go index 3f79ef781c..44e3ac0309 100644 --- a/physical/dynamodb/dynamodb.go +++ b/physical/dynamodb/dynamodb.go @@ -22,7 +22,6 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/helper/awsutil" @@ -213,7 +212,7 @@ func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Bac awsSession, err := session.NewSession(awsConf) if err != nil { - return nil, errwrap.Wrapf("Could not establish AWS session: {{err}}", err) + return nil, fmt.Errorf("Could not establish AWS session: %w", err) } client := dynamodb.New(awsSession) @@ -233,7 +232,7 @@ func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Bac if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) @@ -260,7 +259,7 @@ func (d *DynamoDBBackend) Put(ctx context.Context, entry *physical.Entry) error } item, err := dynamodbattribute.MarshalMap(record) if err != nil { - return errwrap.Wrapf("could not convert prefix record to DynamoDB item: {{err}}", err) + return fmt.Errorf("could not convert prefix record to DynamoDB item: %w", err) } requests := []*dynamodb.WriteRequest{{ PutRequest: &dynamodb.PutRequest{ @@ -275,7 +274,7 @@ func (d *DynamoDBBackend) Put(ctx context.Context, entry *physical.Entry) error } item, err := dynamodbattribute.MarshalMap(record) if err != nil { - return errwrap.Wrapf("could not convert prefix record to DynamoDB item: {{err}}", err) + return fmt.Errorf("could not convert prefix record to DynamoDB item: %w", err) } requests = append(requests, &dynamodb.WriteRequest{ PutRequest: &dynamodb.PutRequest{ diff --git a/physical/etcd/etcd.go b/physical/etcd/etcd.go index c624a69886..ca573c44fd 100644 --- a/physical/etcd/etcd.go +++ b/physical/etcd/etcd.go @@ -3,12 +3,12 @@ package etcd import ( "context" "errors" + "fmt" "net/url" "os" "strings" "github.com/coreos/go-semver/semver" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/physical" "go.etcd.io/etcd/client" @@ -137,7 +137,7 @@ func getEtcdEndpoints(conf map[string]string) ([]string, error) { discoverer := client.NewSRVDiscover() endpoints, err := discoverer.Discover(domain, srvName) if err != nil { - return nil, errwrap.Wrapf("failed to discover etcd endpoints through SRV discovery: {{err}}", err) + return nil, fmt.Errorf("failed to discover etcd endpoints through SRV discovery: %w", err) } return endpoints, nil } diff --git a/physical/etcd/etcd3.go b/physical/etcd/etcd3.go index 942eaa5c93..91350d0724 100644 --- a/physical/etcd/etcd3.go +++ b/physical/etcd/etcd3.go @@ -12,7 +12,6 @@ import ( "time" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/parseutil" "github.com/hashicorp/vault/sdk/helper/strutil" @@ -115,7 +114,7 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backen // grpc converts this to uint32 internally, so parse as that to avoid passing invalid values val, err := strconv.ParseUint(maxReceive, 10, 32) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("value of 'max_receive_size' (%v) could not be understood: {{err}}", maxReceive), err) + return nil, fmt.Errorf("value of 'max_receive_size' (%v) could not be understood: %w", maxReceive, err) } cfg.MaxCallRecvMsgSize = int(val) } @@ -133,7 +132,7 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backen } reqTimeout, err := parseutil.ParseDurationSecond(sReqTimeout) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("value [%v] of 'request_timeout' could not be understood: {{err}}", sReqTimeout), err) + return nil, fmt.Errorf("value [%v] of 'request_timeout' could not be understood: %w", sReqTimeout, err) } ssync, ok := conf["sync"] @@ -142,7 +141,7 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backen } sync, err := strconv.ParseBool(ssync) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("value of 'sync' (%v) could not be understood: {{err}}", ssync), err) + return nil, fmt.Errorf("value of 'sync' (%v) could not be understood: %w", ssync, err) } if sync { @@ -161,7 +160,7 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backen } lock, err := parseutil.ParseDurationSecond(sLock) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("value [%v] of 'lock_timeout' could not be understood: {{err}}", sLock), err) + return nil, fmt.Errorf("value [%v] of 'lock_timeout' could not be understood: %w", sLock, err) } return &EtcdBackend{ diff --git a/physical/foundationdb/foundationdb.go b/physical/foundationdb/foundationdb.go index ad43a487d5..442c0bed2c 100644 --- a/physical/foundationdb/foundationdb.go +++ b/physical/foundationdb/foundationdb.go @@ -21,7 +21,6 @@ import ( "github.com/apple/foundationdb/bindings/go/src/fdb/tuple" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/physical" ) @@ -165,7 +164,7 @@ func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, fdbApiVersionInt, err := strconv.Atoi(fdbApiVersionStr) if err != nil { - return nil, errwrap.Wrapf("failed to parse fdb_api_version parameter: {{err}}", err) + return nil, fmt.Errorf("failed to parse fdb_api_version parameter: %w", err) } // Check requested FDB API version against minimum required API version @@ -186,18 +185,18 @@ func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, if ok { haEnabled, err = strconv.ParseBool(haEnabledStr) if err != nil { - return nil, errwrap.Wrapf("failed to parse ha_enabled parameter: {{err}}", err) + return nil, fmt.Errorf("failed to parse ha_enabled parameter: %w", err) } } instanceUUID, err := uuid.GenerateUUID() if err != nil { - return nil, errwrap.Wrapf("could not generate instance UUID: {{err}}", err) + return nil, fmt.Errorf("could not generate instance UUID: %w", err) } logger.Debug("Instance UUID", "uuid", instanceUUID) if err := fdb.APIVersion(fdbApiVersionInt); err != nil { - return nil, errwrap.Wrapf("failed to set FDB API version: {{err}}", err) + return nil, fmt.Errorf("failed to set FDB API version: %w", err) } if tlsEnabled { @@ -207,39 +206,39 @@ func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, if ok { err := opts.SetTLSPassword(tlsPassword) if err != nil { - return nil, errwrap.Wrapf("failed to set TLS password: {{err}}", err) + return nil, fmt.Errorf("failed to set TLS password: %w", err) } } err := opts.SetTLSCaPath(tlsCAFile) if err != nil { - return nil, errwrap.Wrapf("failed to set TLS CA bundle path: {{err}}", err) + return nil, fmt.Errorf("failed to set TLS CA bundle path: %w", err) } err = opts.SetTLSCertPath(tlsCertFile) if err != nil { - return nil, errwrap.Wrapf("failed to set TLS certificate path: {{err}}", err) + return nil, fmt.Errorf("failed to set TLS certificate path: %w", err) } err = opts.SetTLSKeyPath(tlsKeyFile) if err != nil { - return nil, errwrap.Wrapf("failed to set TLS key path: {{err}}", err) + return nil, fmt.Errorf("failed to set TLS key path: %w", err) } err = opts.SetTLSVerifyPeers([]byte(tlsVerifyPeers)) if err != nil { - return nil, errwrap.Wrapf("failed to set TLS peer verification criteria: {{err}}", err) + return nil, fmt.Errorf("failed to set TLS peer verification criteria: %w", err) } } db, err := fdb.Open(fdbClusterFile, []byte("DB")) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to open database with cluster file '%s': {{err}}", fdbClusterFile), err) + return nil, fmt.Errorf("failed to open database with cluster file '%s': %w", fdbClusterFile, err) } topDir, err := directory.CreateOrOpen(db, dirPath, nil) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to create/open top-level directory '%s': {{err}}", path), err) + return nil, fmt.Errorf("failed to create/open top-level directory '%s': %w", path, err) } // Setup the backend @@ -262,7 +261,7 @@ func (f *FDBBackend) incDirsRefcount(tr fdb.Transaction, path string) error { for i := len(pathElements) - 1; i != 0; i-- { dPath, err := decoratePath(strings.Join(pathElements[:i], "/") + "/") if err != nil { - return errwrap.Wrapf("error incrementing directories refcount: {{err}}", err) + return fmt.Errorf("error incrementing directories refcount: %w", err) } // Atomic +1 @@ -287,7 +286,7 @@ func (f *FDBBackend) decDirsRefcount(tr fdb.Transaction, path string) error { for i := len(pathElements) - 1; i != 0; i-- { dPath, err := decoratePath(strings.Join(pathElements[:i], "/") + "/") if err != nil { - return errwrap.Wrapf("error decrementing directories refcount: {{err}}", err) + return fmt.Errorf("error decrementing directories refcount: %w", err) } metaFKey := fdb.Key(concat(f.metaKeysSpace.Bytes(), dPath...)) @@ -306,7 +305,7 @@ func (f *FDBBackend) decDirsRefcount(tr fdb.Transaction, path string) error { for _, todo := range dirsTodo { value, err := todo.future.Get() if err != nil { - return errwrap.Wrapf("error getting directory refcount while decrementing: {{err}}", err) + return fmt.Errorf("error getting directory refcount while decrementing: %w", err) } // The directory entry does not exist; this is not expected @@ -317,7 +316,7 @@ func (f *FDBBackend) decDirsRefcount(tr fdb.Transaction, path string) error { var count int64 err = binary.Read(bytes.NewReader(value), binary.LittleEndian, &count) if err != nil { - return errwrap.Wrapf("error reading directory refcount while decrementing: {{err}}", err) + return fmt.Errorf("error reading directory refcount while decrementing: %w", err) } if count > 1 { @@ -346,7 +345,7 @@ func (f *FDBBackend) internalPut(tr fdb.Transaction, decoratedPath []byte, path value, err := metaFuture.Get() if err != nil { - return errwrap.Wrapf("Put error while getting meta key: {{err}}", err) + return fmt.Errorf("Put error while getting meta key: %w", err) } if value == nil { @@ -366,7 +365,7 @@ func (f *FDBBackend) internalClear(tr fdb.Transaction, decoratedPath []byte, pat value, err := tr.Get(metaFKey).Get() if err != nil { - return errwrap.Wrapf("Delete error while getting meta key: {{err}}", err) + return fmt.Errorf("Delete error while getting meta key: %w", err) } if value != nil { @@ -399,7 +398,7 @@ func (f *FDBBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) decoratedPath, err := decoratePath(op.Entry.Key) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("could not build decorated path for transaction item %s: {{err}}", op.Entry.Key), err) + return fmt.Errorf("could not build decorated path for transaction item %s: %w", op.Entry.Key, err) } todo[i] = &TxnTodo{ @@ -419,14 +418,14 @@ func (f *FDBBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) } if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("operation %s failed for transaction item %s: {{err}}", txnTodo.op.Operation, txnTodo.op.Entry.Key), err) + return nil, fmt.Errorf("operation %s failed for transaction item %s: %w", txnTodo.op.Operation, txnTodo.op.Entry.Key, err) } } return nil, nil }) if err != nil { - return errwrap.Wrapf("transaction failed: {{err}}", err) + return fmt.Errorf("transaction failed: %w", err) } return nil @@ -438,7 +437,7 @@ func (f *FDBBackend) Put(ctx context.Context, entry *physical.Entry) error { decoratedPath, err := decoratePath(entry.Key) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("could not build decorated path to put item %s: {{err}}", entry.Key), err) + return fmt.Errorf("could not build decorated path to put item %s: %w", entry.Key, err) } _, err = f.db.Transact(func(tr fdb.Transaction) (interface{}, error) { @@ -451,7 +450,7 @@ func (f *FDBBackend) Put(ctx context.Context, entry *physical.Entry) error { }) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("put failed for item %s: {{err}}", entry.Key), err) + return fmt.Errorf("put failed for item %s: %w", entry.Key, err) } return nil @@ -464,7 +463,7 @@ func (f *FDBBackend) Get(ctx context.Context, key string) (*physical.Entry, erro decoratedPath, err := decoratePath(key) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("could not build decorated path to get item %s: {{err}}", key), err) + return nil, fmt.Errorf("could not build decorated path to get item %s: %w", key, err) } fkey := fdb.Key(concat(f.dataSpace.Bytes(), decoratedPath...)) @@ -478,7 +477,7 @@ func (f *FDBBackend) Get(ctx context.Context, key string) (*physical.Entry, erro return value, nil }) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("get failed for item %s: {{err}}", key), err) + return nil, fmt.Errorf("get failed for item %s: %w", key, err) } if value.([]byte) == nil { return nil, nil @@ -496,7 +495,7 @@ func (f *FDBBackend) Delete(ctx context.Context, key string) error { decoratedPath, err := decoratePath(key) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("could not build decorated path to delete item %s: {{err}}", key), err) + return fmt.Errorf("could not build decorated path to delete item %s: %w", key, err) } _, err = f.db.Transact(func(tr fdb.Transaction) (interface{}, error) { @@ -509,7 +508,7 @@ func (f *FDBBackend) Delete(ctx context.Context, key string) error { }) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("delete failed for item %s: {{err}}", key), err) + return fmt.Errorf("delete failed for item %s: %w", key, err) } return nil @@ -525,7 +524,7 @@ func (f *FDBBackend) List(ctx context.Context, prefix string) ([]string, error) decoratedPrefix, err := decoratePrefix(prefix) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("could not build decorated path to list prefix %s: {{err}}", prefix), err) + return nil, fmt.Errorf("could not build decorated path to list prefix %s: %w", prefix, err) } // The beginning of the range is /\x02foo/\x02bar/\x01 (the decorated prefix) to list foo/bar/ @@ -551,7 +550,7 @@ func (f *FDBBackend) List(ctx context.Context, prefix string) ([]string, error) return dirList, nil }) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("could not list prefix %s: {{err}}", prefix), err) + return nil, fmt.Errorf("could not list prefix %s: %w", prefix, err) } return content.([]string), nil @@ -635,7 +634,7 @@ func (fl *FDBBackendLock) getLockContent(tr fdb.Transaction) (*FDBBackendLockCon content, err := unpackLock(tupleContent) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to unpack lock %s: {{err}}", fl.key), err) + return nil, fmt.Errorf("failed to unpack lock %s: %w", fl.key, err) } return content, nil @@ -657,14 +656,14 @@ func (fl *FDBBackendLock) acquireTryLock(acquired chan struct{}, errors chan err wonTheRace, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) { tupleContent, err := tr.Get(fl.fkey).Get() if err != nil { - return nil, errwrap.Wrapf("could not read lock: {{err}}", err) + return nil, fmt.Errorf("could not read lock: %w", err) } // Lock exists if tupleContent != nil { content, err := unpackLock(tupleContent) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to unpack lock %s: {{err}}", fl.key), err) + return nil, fmt.Errorf("failed to unpack lock %s: %w", fl.key, err) } if fl.isOwned(content) { @@ -842,7 +841,7 @@ func (fl *FDBBackendLock) Unlock() error { _, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) { content, err := fl.getLockContent(tr) if err != nil { - return nil, errwrap.Wrapf("could not get lock content: {{err}}", err) + return nil, fmt.Errorf("could not get lock content: %w", err) } // We don't own the lock @@ -855,7 +854,7 @@ func (fl *FDBBackendLock) Unlock() error { return nil, nil }) if err != nil { - return errwrap.Wrapf("unlock failed: {{err}}", err) + return fmt.Errorf("unlock failed: %w", err) } return nil @@ -865,13 +864,13 @@ func (fl *FDBBackendLock) Value() (bool, string, error) { tupleContent, err := fl.f.db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) { tupleContent, err := rtr.Get(fl.fkey).Get() if err != nil { - return nil, errwrap.Wrapf("could not read lock: {{err}}", err) + return nil, fmt.Errorf("could not read lock: %w", err) } return tupleContent, nil }) if err != nil { - return false, "", errwrap.Wrapf(fmt.Sprintf("get lock value failed for lock %s: {{err}}", fl.key), err) + return false, "", fmt.Errorf("get lock value failed for lock %s: %w", fl.key, err) } if tupleContent.([]byte) == nil { return false, "", nil @@ -879,7 +878,7 @@ func (fl *FDBBackendLock) Value() (bool, string, error) { content, err := unpackLock(tupleContent.([]byte)) if err != nil { - return false, "", errwrap.Wrapf(fmt.Sprintf("get lock value failed to unpack lock %s: {{err}}", fl.key), err) + return false, "", fmt.Errorf("get lock value failed to unpack lock %s: %w", fl.key, err) } return true, content.value, nil diff --git a/physical/foundationdb/foundationdb_test.go b/physical/foundationdb/foundationdb_test.go index 97cdb50d4a..9c08b812bc 100644 --- a/physical/foundationdb/foundationdb_test.go +++ b/physical/foundationdb/foundationdb_test.go @@ -10,7 +10,6 @@ import ( "testing" "time" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" @@ -25,12 +24,12 @@ import ( func connectToFoundationDB(clusterFile string) (*fdb.Database, error) { if err := fdb.APIVersion(520); err != nil { - return nil, errwrap.Wrapf("failed to set FDB API version: {{err}}", err) + return nil, fmt.Errorf("failed to set FDB API version: %w", err) } db, err := fdb.Open(clusterFile, []byte("DB")) if err != nil { - return nil, errwrap.Wrapf("failed to open database: {{err}}", err) + return nil, fmt.Errorf("failed to open database: %w", err) } return &db, nil @@ -39,11 +38,11 @@ func connectToFoundationDB(clusterFile string) (*fdb.Database, error) { func cleanupTopDir(clusterFile, topDir string) error { db, err := connectToFoundationDB(clusterFile) if err != nil { - return errwrap.Wrapf("could not connect to FDB for cleanup: {{err}}", err) + return fmt.Errorf("could not connect to FDB for cleanup: %w", err) } if _, err := directory.Root().Remove(db, []string{topDir}); err != nil { - return errwrap.Wrapf("could not remove directory: {{err}}", err) + return fmt.Errorf("could not remove directory: %w", err) } return nil @@ -170,16 +169,16 @@ func prepareFoundationDBTestDirectory(t *testing.T, topDir string) (func(), stri connectString := fmt.Sprintf("foundationdb:foundationdb@127.0.0.1:%s", resource.GetPort("4500/tcp")) if err := tmpFile.Truncate(0); err != nil { - return errwrap.Wrapf("could not truncate cluster file: {{err}}", err) + return fmt.Errorf("could not truncate cluster file: %w", err) } _, err := tmpFile.WriteAt([]byte(connectString), 0) if err != nil { - return errwrap.Wrapf("could not write cluster file: {{err}}", err) + return fmt.Errorf("could not write cluster file: %w", err) } if _, err := connectToFoundationDB(clusterFile); err != nil { - return errwrap.Wrapf("could not connect to FoundationDB after starting container: %s", err) + return fmt.Errorf("could not connect to FoundationDB after starting container: %s", err) } return nil diff --git a/physical/gcs/gcs.go b/physical/gcs/gcs.go index 2e63b37185..f38ffa53d3 100644 --- a/physical/gcs/gcs.go +++ b/physical/gcs/gcs.go @@ -13,7 +13,6 @@ import ( "time" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/sdk/helper/useragent" @@ -116,7 +115,7 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error } chunkSize, err := strconv.Atoi(chunkSizeStr) if err != nil { - return nil, errwrap.Wrapf("failed to parse chunk_size: {{err}}", err) + return nil, fmt.Errorf("failed to parse chunk_size: %w", err) } // Values are specified as kb, but the API expects them as bytes. @@ -133,7 +132,7 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error var err error haEnabled, err = strconv.ParseBool(haEnabledStr) if err != nil { - return nil, errwrap.Wrapf("failed to parse HA enabled: {{err}}", err) + return nil, fmt.Errorf("failed to parse HA enabled: %w", err) } } if haEnabled { @@ -142,14 +141,14 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error ctx := context.Background() haClient, err = storage.NewClient(ctx, option.WithUserAgent(useragent.String())) if err != nil { - return nil, errwrap.Wrapf("failed to create HA storage client: {{err}}", err) + return nil, fmt.Errorf("failed to create HA storage client: %w", err) } } // Max parallel maxParallel, err := extractInt(c["max_parallel"]) if err != nil { - return nil, errwrap.Wrapf("failed to parse max_parallel: {{err}}", err) + return nil, fmt.Errorf("failed to parse max_parallel: %w", err) } logger.Debug("configuration", @@ -163,7 +162,7 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error ctx := context.Background() client, err := storage.NewClient(ctx, option.WithUserAgent(useragent.String())) if err != nil { - return nil, errwrap.Wrapf("failed to create storage client: {{err}}", err) + return nil, fmt.Errorf("failed to create storage client: %w", err) } return &Backend{ @@ -195,12 +194,12 @@ func (b *Backend) Put(ctx context.Context, entry *physical.Entry) (retErr error) defer func() { closeErr := w.Close() if closeErr != nil { - retErr = multierror.Append(retErr, errwrap.Wrapf("error closing connection: {{err}}", closeErr)) + retErr = multierror.Append(retErr, fmt.Errorf("error closing connection: %w", closeErr)) } }() if _, err := w.Write(entry.Value); err != nil { - return errwrap.Wrapf("failed to put data: {{err}}", err) + return fmt.Errorf("failed to put data: %w", err) } return nil } @@ -219,19 +218,19 @@ func (b *Backend) Get(ctx context.Context, key string) (retEntry *physical.Entry return nil, nil } if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to read value for %q: {{err}}", key), err) + return nil, fmt.Errorf("failed to read value for %q: %w", key, err) } defer func() { closeErr := r.Close() if closeErr != nil { - retErr = multierror.Append(retErr, errwrap.Wrapf("error closing connection: {{err}}", closeErr)) + retErr = multierror.Append(retErr, fmt.Errorf("error closing connection: %w", closeErr)) } }() value, err := ioutil.ReadAll(r) if err != nil { - return nil, errwrap.Wrapf("failed to read value into a string: {{err}}", err) + return nil, fmt.Errorf("failed to read value into a string: %w", err) } return &physical.Entry{ @@ -251,7 +250,7 @@ func (b *Backend) Delete(ctx context.Context, key string) error { // Delete err := b.client.Bucket(b.bucket).Object(key).Delete(ctx) if err != nil && err != storage.ErrObjectNotExist { - return errwrap.Wrapf(fmt.Sprintf("failed to delete key %q: {{err}}", key), err) + return fmt.Errorf("failed to delete key %q: %w", key, err) } return nil } @@ -279,7 +278,7 @@ func (b *Backend) List(ctx context.Context, prefix string) ([]string, error) { break } if err != nil { - return nil, errwrap.Wrapf("failed to read object: {{err}}", err) + return nil, fmt.Errorf("failed to read object: %w", err) } var path string diff --git a/physical/gcs/gcs_ha.go b/physical/gcs/gcs_ha.go index 39f30f7e0b..7ad57a0f48 100644 --- a/physical/gcs/gcs_ha.go +++ b/physical/gcs/gcs_ha.go @@ -9,7 +9,6 @@ import ( "cloud.google.com/go/storage" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/physical" "github.com/pkg/errors" @@ -109,7 +108,7 @@ func (b *Backend) HAEnabled() bool { func (b *Backend) LockWith(key, value string) (physical.Lock, error) { identity, err := uuid.GenerateUUID() if err != nil { - return nil, errwrap.Wrapf("lock with: {{err}}", err) + return nil, fmt.Errorf("lock with: %w", err) } return &Lock{ backend: b, @@ -142,7 +141,7 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { // occurs. acquired, err := l.attemptLock(stopCh) if err != nil { - return nil, errwrap.Wrapf("lock: {{err}}", err) + return nil, fmt.Errorf("lock: %w", err) } if !acquired { return nil, nil @@ -187,7 +186,7 @@ func (l *Lock) Unlock() error { ctx := context.Background() r, err := l.get(ctx) if err != nil { - return errwrap.Wrapf("failed to read lock for deletion: {{err}}", err) + return fmt.Errorf("failed to read lock for deletion: %w", err) } if r != nil && r.Identity == l.identity { ctx := context.Background() @@ -203,7 +202,7 @@ func (l *Lock) Unlock() error { if terr, ok := err.(*googleapi.Error); ok && terr.Code == 412 { l.backend.logger.Debug("unlock: preconditions failed (lock already taken by someone else?)") } else { - return errwrap.Wrapf("failed to delete lock: {{err}}", err) + return fmt.Errorf("failed to delete lock: %w", err) } } } @@ -240,7 +239,7 @@ func (l *Lock) attemptLock(stopCh <-chan struct{}) (bool, error) { case <-ticker.C: acquired, err := l.writeLock() if err != nil { - return false, errwrap.Wrapf("attempt lock: {{err}}", err) + return false, fmt.Errorf("attempt lock: %w", err) } if !acquired { continue @@ -345,7 +344,7 @@ func (l *Lock) writeLock() (bool, error) { // Read the record r, err := l.get(ctx) if err != nil { - return false, errwrap.Wrapf("write lock: {{err}}", err) + return false, fmt.Errorf("write lock: %w", err) } if r != nil { // If the key is empty or the identity is ours or the ttl expired, we can @@ -370,7 +369,7 @@ func (l *Lock) writeLock() (bool, error) { Timestamp: time.Now().UTC(), }) if err != nil { - return false, errwrap.Wrapf("write lock: failed to encode JSON: {{err}}", err) + return false, fmt.Errorf("write lock: failed to encode JSON: %w", err) } // Write the object @@ -399,7 +398,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, error) { return nil, nil } if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to read attrs for %q: {{err}}", l.key), err) + return nil, fmt.Errorf("failed to read attrs for %q: %w", l.key, err) } // If we got this far, we have attributes, meaning the lockfile exists. @@ -407,7 +406,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, error) { r.attrs = attrs lockData := []byte(attrs.Metadata["lock"]) if err := json.Unmarshal(lockData, &r); err != nil { - return nil, errwrap.Wrapf("failed to decode lock: {{err}}", err) + return nil, fmt.Errorf("failed to decode lock: %w", err) } return &r, nil } diff --git a/physical/manta/manta.go b/physical/manta/manta.go index a02bf3d479..390683d369 100644 --- a/physical/manta/manta.go +++ b/physical/manta/manta.go @@ -13,7 +13,6 @@ import ( "time" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/physical" triton "github.com/joyent/triton-go" @@ -63,7 +62,7 @@ func NewMantaBackend(conf map[string]string, logger log.Logger) (physical.Backen } signer, err := authentication.NewSSHAgentSigner(input) if err != nil { - return nil, errwrap.Wrapf("Error Creating SSH Agent Signer: {{err}}", err) + return nil, fmt.Errorf("Error Creating SSH Agent Signer: %w", err) } maxParStr, ok := conf["max_parallel"] @@ -71,7 +70,7 @@ func NewMantaBackend(conf map[string]string, logger log.Logger) (physical.Backen if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) @@ -86,7 +85,7 @@ func NewMantaBackend(conf map[string]string, logger log.Logger) (physical.Backen client, err := storage.NewClient(config) if err != nil { - return nil, errwrap.Wrapf("failed initialising Storage client: {{err}}", err) + return nil, fmt.Errorf("failed initialising Storage client: %w", err) } return &MantaBackend{ diff --git a/physical/mssql/mssql.go b/physical/mssql/mssql.go index f6e3906230..fa145bfe1f 100644 --- a/physical/mssql/mssql.go +++ b/physical/mssql/mssql.go @@ -11,7 +11,6 @@ import ( metrics "github.com/armon/go-metrics" _ "github.com/denisenkom/go-mssqldb" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/physical" @@ -55,7 +54,7 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) @@ -109,13 +108,13 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen db, err := sql.Open("mssql", connectionString) if err != nil { - return nil, errwrap.Wrapf("failed to connect to mssql: {{err}}", err) + return nil, fmt.Errorf("failed to connect to mssql: %w", err) } db.SetMaxOpenConns(maxParInt) if _, err := db.Exec("IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = '" + database + "') CREATE DATABASE " + database); err != nil { - return nil, errwrap.Wrapf("failed to create mssql database: {{err}}", err) + return nil, fmt.Errorf("failed to create mssql database: %w", err) } dbTable := database + "." + schema + "." + table @@ -130,16 +129,16 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen switch { case err == sql.ErrNoRows: if _, err := db.Exec("USE " + database + "; EXEC ('CREATE SCHEMA " + schema + "')"); err != nil { - return nil, errwrap.Wrapf("failed to create mssql schema: {{err}}", err) + return nil, fmt.Errorf("failed to create mssql schema: %w", err) } case err != nil: - return nil, errwrap.Wrapf("failed to check if mssql schema exists: {{err}}", err) + return nil, fmt.Errorf("failed to check if mssql schema exists: %w", err) } } if _, err := db.Exec(createQuery); err != nil { - return nil, errwrap.Wrapf("failed to create mssql table: {{err}}", err) + return nil, fmt.Errorf("failed to create mssql table: %w", err) } m := &MSSQLBackend{ @@ -170,7 +169,7 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen func (m *MSSQLBackend) prepare(name, query string) error { stmt, err := m.client.Prepare(query) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("failed to prepare %q: {{err}}", name), err) + return fmt.Errorf("failed to prepare %q: %w", name, err) } m.statements[name] = stmt @@ -246,7 +245,7 @@ func (m *MSSQLBackend) List(ctx context.Context, prefix string) ([]string, error var key string err = rows.Scan(&key) if err != nil { - return nil, errwrap.Wrapf("failed to scan rows: {{err}}", err) + return nil, fmt.Errorf("failed to scan rows: %w", err) } key = strings.TrimPrefix(key, prefix) diff --git a/physical/mysql/mysql.go b/physical/mysql/mysql.go index 41393977b0..3f7577011f 100644 --- a/physical/mysql/mysql.go +++ b/physical/mysql/mysql.go @@ -22,7 +22,6 @@ import ( metrics "github.com/armon/go-metrics" mysql "github.com/go-sql-driver/mysql" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/physical" ) @@ -84,7 +83,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) @@ -97,7 +96,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen var schemaExist bool schemaRows, err := db.Query("SELECT SCHEMA_NAME FROM information_schema.SCHEMATA WHERE SCHEMA_NAME = ?", database) if err != nil { - return nil, errwrap.Wrapf("failed to check mysql schema exist: {{err}}", err) + return nil, fmt.Errorf("failed to check mysql schema exist: %w", err) } defer schemaRows.Close() schemaExist = schemaRows.Next() @@ -106,7 +105,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen var tableExist bool tableRows, err := db.Query("SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_NAME = ? AND TABLE_SCHEMA = ?", table, database) if err != nil { - return nil, errwrap.Wrapf("failed to check mysql table exist: {{err}}", err) + return nil, fmt.Errorf("failed to check mysql table exist: %w", err) } defer tableRows.Close() tableExist = tableRows.Next() @@ -114,7 +113,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen // Create the required database if it doesn't exists. if !schemaExist { if _, err := db.Exec("CREATE DATABASE IF NOT EXISTS `" + database + "`"); err != nil { - return nil, errwrap.Wrapf("failed to create mysql database: {{err}}", err) + return nil, fmt.Errorf("failed to create mysql database: %w", err) } } @@ -123,7 +122,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen create_query := "CREATE TABLE IF NOT EXISTS " + dbTable + " (vault_key varbinary(512), vault_value mediumblob, PRIMARY KEY (vault_key))" if _, err := db.Exec(create_query); err != nil { - return nil, errwrap.Wrapf("failed to create mysql table: {{err}}", err) + return nil, fmt.Errorf("failed to create mysql table: %w", err) } } @@ -150,7 +149,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen var lockTableExist bool lockTableRows, err := db.Query("SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_NAME = ? AND TABLE_SCHEMA = ?", locktable, database) if err != nil { - return nil, errwrap.Wrapf("failed to check mysql table exist: {{err}}", err) + return nil, fmt.Errorf("failed to check mysql table exist: %w", err) } defer lockTableRows.Close() lockTableExist = lockTableRows.Next() @@ -160,7 +159,7 @@ func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backen create_query := "CREATE TABLE IF NOT EXISTS " + dbLockTable + " (node_job varbinary(512), current_leader varbinary(512), PRIMARY KEY (node_job))" if _, err := db.Exec(create_query); err != nil { - return nil, errwrap.Wrapf("failed to create mysql table: {{err}}", err) + return nil, fmt.Errorf("failed to create mysql table: %w", err) } } } @@ -286,7 +285,7 @@ func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error) if ok { maxIdleConnInt, err = strconv.Atoi(maxIdleConnStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_idle_connections parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_idle_connections parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_idle_connections set", "max_idle_connections", maxIdleConnInt) @@ -298,7 +297,7 @@ func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error) if ok { maxConnLifeInt, err = strconv.Atoi(maxConnLifeStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_connection_lifetime parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_connection_lifetime parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_connection_lifetime set", "max_connection_lifetime", maxConnLifeInt) @@ -310,7 +309,7 @@ func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error) if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) @@ -323,7 +322,7 @@ func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error) tlsCaFile, tlsOk := conf["tls_ca_file"] if tlsOk { if err := setupMySQLTLSConfig(tlsCaFile); err != nil { - return nil, errwrap.Wrapf("failed register TLS config: {{err}}", err) + return nil, fmt.Errorf("failed register TLS config: %w", err) } dsnParams.Add("tls", mysqlTLSKey) @@ -337,7 +336,7 @@ func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error) dsn := username + ":" + password + "@tcp(" + address + ")/?" + dsnParams.Encode() db, err := sql.Open("mysql", dsn) if err != nil { - return nil, errwrap.Wrapf("failed to connect to mysql: {{err}}", err) + return nil, fmt.Errorf("failed to connect to mysql: %w", err) } db.SetMaxOpenConns(maxParInt) if maxIdleConnInt != 0 { @@ -354,7 +353,7 @@ func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error) func (m *MySQLBackend) prepare(name, query string) error { stmt, err := m.client.Prepare(query) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("failed to prepare %q: {{err}}", name), err) + return fmt.Errorf("failed to prepare %q: %w", name, err) } m.statements[name] = stmt return nil @@ -423,7 +422,7 @@ func (m *MySQLBackend) List(ctx context.Context, prefix string) ([]string, error likePrefix := prefix + "%" rows, err := m.statements["list"].Query(likePrefix) if err != nil { - return nil, errwrap.Wrapf("failed to execute statement: {{err}}", err) + return nil, fmt.Errorf("failed to execute statement: %w", err) } var keys []string @@ -431,7 +430,7 @@ func (m *MySQLBackend) List(ctx context.Context, prefix string) ([]string, error var key string err = rows.Scan(&key) if err != nil { - return nil, errwrap.Wrapf("failed to scan rows: {{err}}", err) + return nil, fmt.Errorf("failed to scan rows: %w", err) } key = strings.TrimPrefix(key, prefix) @@ -672,7 +671,7 @@ func NewMySQLLock(in *MySQLBackend, l log.Logger, key, value string) (*MySQLLock func (m *MySQLLock) prepare(name, query string) error { stmt, err := m.in.Prepare(query) if err != nil { - return errwrap.Wrapf(fmt.Sprintf("failed to prepare %q: {{err}}", name), err) + return fmt.Errorf("failed to prepare %q: %w", name, err) } m.statements[name] = stmt return nil diff --git a/physical/oci/oci.go b/physical/oci/oci.go index 3e81376754..cd80f6e9ab 100644 --- a/physical/oci/oci.go +++ b/physical/oci/oci.go @@ -13,7 +13,6 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/helper/strutil" @@ -89,7 +88,7 @@ func NewBackend(conf map[string]string, logger log.Logger) (physical.Backend, er if haEnabledStr != "" { haEnabled, err = strconv.ParseBool(haEnabledStr) if err != nil { - return nil, errwrap.Wrapf("failed to parse HA enabled: {{err}}", err) + return nil, fmt.Errorf("failed to parse HA enabled: %w", err) } if haEnabled { @@ -105,7 +104,7 @@ func NewBackend(conf map[string]string, logger log.Logger) (physical.Backend, er if authTypeAPIKeyStr != "" { authTypeAPIKeyBool, err = strconv.ParseBool(authTypeAPIKeyStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing auth_type_api_key parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing auth_type_api_key parameter: %w", err) } } @@ -115,13 +114,13 @@ func NewBackend(conf map[string]string, logger log.Logger) (physical.Backend, er } else { cp, err = auth.InstancePrincipalConfigurationProvider() if err != nil { - return nil, errwrap.Wrapf("failed creating InstancePrincipalConfigurationProvider: {{err}}", err) + return nil, fmt.Errorf("failed creating InstancePrincipalConfigurationProvider: %w", err) } } objectStorageClient, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(cp) if err != nil { - return nil, errwrap.Wrapf("failed creating NewObjectStorageClientWithConfigurationProvider: {{err}}", err) + return nil, fmt.Errorf("failed creating NewObjectStorageClientWithConfigurationProvider: %w", err) } region := conf["region"] @@ -164,7 +163,7 @@ func (o *Backend) Put(ctx context.Context, entry *physical.Entry) error { if err != nil { metrics.IncrCounter(metricPutFailed, 1) o.logger.Error("failed to generate UUID") - return errwrap.Wrapf("failed to generate UUID: {{err}}", err) + return fmt.Errorf("failed to generate UUID: %w", err) } o.logger.Debug("PUT", "opc-client-request-id", opcClientRequestId) @@ -185,7 +184,7 @@ func (o *Backend) Put(ctx context.Context, entry *physical.Entry) error { if err != nil { metrics.IncrCounter(metricPutFailed, 1) - return errwrap.Wrapf("failed to put data: {{err}}", err) + return fmt.Errorf("failed to put data: %w", err) } o.logRequest("PUT", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err) @@ -207,7 +206,7 @@ func (o *Backend) Get(ctx context.Context, key string) (*physical.Entry, error) opcClientRequestId, err := uuid.GenerateUUID() if err != nil { o.logger.Error("failed to generate UUID") - return nil, errwrap.Wrapf("failed to generate UUID: {{err}}", err) + return nil, fmt.Errorf("failed to generate UUID: %w", err) } o.logger.Debug("GET", "opc-client-request-id", opcClientRequestId) request := objectstorage.GetObjectRequest{ @@ -228,13 +227,13 @@ func (o *Backend) Get(ctx context.Context, key string) (*physical.Entry, error) return nil, nil } metrics.IncrCounter(metricGetFailed, 1) - return nil, errwrap.Wrapf(fmt.Sprintf("failed to read Value: {{err}}"), err) + return nil, fmt.Errorf("failed to read Value: %w", err) } body, err := ioutil.ReadAll(resp.Content) if err != nil { metrics.IncrCounter(metricGetFailed, 1) - return nil, errwrap.Wrapf("failed to decode Value into bytes: {{err}}", err) + return nil, fmt.Errorf("failed to decode Value into bytes: %w", err) } o.logger.Debug("GET completed") @@ -258,7 +257,7 @@ func (o *Backend) Delete(ctx context.Context, key string) error { opcClientRequestId, err := uuid.GenerateUUID() if err != nil { o.logger.Error("Delete: error generating UUID") - return errwrap.Wrapf("failed to generate UUID: {{err}}", err) + return fmt.Errorf("failed to generate UUID: %w", err) } o.logger.Debug("Delete", "opc-client-request-id", opcClientRequestId) request := objectstorage.DeleteObjectRequest{ @@ -280,7 +279,7 @@ func (o *Backend) Delete(ctx context.Context, key string) error { return nil } metrics.IncrCounter(metricDeleteFailed, 1) - return errwrap.Wrapf("failed to delete Key: {{err}}", err) + return fmt.Errorf("failed to delete Key: %w", err) } o.logger.Debug("DELETE completed") @@ -305,7 +304,7 @@ func (o *Backend) List(ctx context.Context, prefix string) ([]string, error) { opcClientRequestId, err := uuid.GenerateUUID() if err != nil { o.logger.Error("List: error generating UUID") - return nil, errwrap.Wrapf("failed to generate UUID {{err}}", err) + return nil, fmt.Errorf("failed to generate UUID %w", err) } o.logger.Debug("LIST", "opc-client-request-id", opcClientRequestId) request := objectstorage.ListObjectsRequest{ @@ -322,7 +321,7 @@ func (o *Backend) List(ctx context.Context, prefix string) ([]string, error) { if err != nil { metrics.IncrCounter(metricListFailed, 1) - return nil, errwrap.Wrapf("failed to list using prefix: {{err}}", err) + return nil, fmt.Errorf("failed to list using prefix: %w", err) } for _, commonPrefix := range resp.Prefixes { diff --git a/physical/oci/oci_ha.go b/physical/oci/oci_ha.go index 9fe3012810..a4c6ad52ea 100644 --- a/physical/oci/oci_ha.go +++ b/physical/oci/oci_ha.go @@ -14,7 +14,6 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/physical" "github.com/oracle/oci-go-sdk/objectstorage" @@ -118,7 +117,7 @@ func (b *Backend) HAEnabled() bool { func (b *Backend) LockWith(key, value string) (physical.Lock, error) { identity, err := uuid.GenerateUUID() if err != nil { - return nil, errwrap.Wrapf("Lock with: {{err}}", err) + return nil, fmt.Errorf("Lock with: %w", err) } return &Lock{ backend: b, @@ -148,7 +147,7 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { // occurs. acquired, err := l.attemptLock(stopCh) if err != nil { - return nil, errwrap.Wrapf("lock: {{err}}", err) + return nil, fmt.Errorf("lock: %w", err) } if !acquired { return nil, nil @@ -183,7 +182,7 @@ func (l *Lock) attemptLock(stopCh <-chan struct{}) (bool, error) { case <-ticker.C: acquired, err := l.writeLock() if err != nil { - return false, errwrap.Wrapf("attempt lock: {{err}}", err) + return false, fmt.Errorf("attempt lock: %w", err) } if !acquired { continue @@ -314,7 +313,7 @@ func (l *Lock) Unlock() error { // Get current lock record currentLockRecord, etag, err := l.get(context.Background()) if err != nil { - return errwrap.Wrapf("error reading lock record: {{err}}", err) + return fmt.Errorf("error reading lock record: %w", err) } if currentLockRecord != nil && currentLockRecord.Identity == l.identity { @@ -323,7 +322,7 @@ func (l *Lock) Unlock() error { opcClientRequestId, err := uuid.GenerateUUID() if err != nil { l.backend.logger.Debug("Unlock: error generating UUID") - return errwrap.Wrapf("failed to generate UUID: {{err}}", err) + return fmt.Errorf("failed to generate UUID: %w", err) } l.backend.logger.Debug("Unlock", "opc-client-request-id", opcClientRequestId) request := objectstorage.DeleteObjectRequest{ @@ -339,7 +338,7 @@ func (l *Lock) Unlock() error { if err != nil { metrics.IncrCounter(metricDeleteFailed, 1) - return errwrap.Wrapf("write lock: {{err}}", err) + return fmt.Errorf("write lock: %w", err) } } @@ -370,7 +369,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, string, error) { opcClientRequestId, err := uuid.GenerateUUID() if err != nil { l.backend.logger.Error("getHa: error generating UUID") - return nil, "", errwrap.Wrapf("failed to generate UUID: {{err}}", err) + return nil, "", fmt.Errorf("failed to generate UUID: %w", err) } l.backend.logger.Debug("getHa", "opc-client-request-id", opcClientRequestId) @@ -394,7 +393,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, string, error) { metrics.IncrCounter(metricGetFailed, 1) l.backend.logger.Error("Error calling GET", "err", err) - return nil, "", errwrap.Wrapf(fmt.Sprintf("failed to read Value for %q: {{err}}", l.key), err) + return nil, "", fmt.Errorf("failed to read Value for %q: %w", l.key, err) } defer response.RawResponse.Body.Close() @@ -403,7 +402,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, string, error) { if err != nil { metrics.IncrCounter(metricGetFailed, 1) l.backend.logger.Error("Error reading content", "err", err) - return nil, "", errwrap.Wrapf("failed to decode Value into bytes: {{err}}", err) + return nil, "", fmt.Errorf("failed to decode Value into bytes: %w", err) } var lockRecord LockRecord @@ -411,7 +410,7 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, string, error) { if err != nil { metrics.IncrCounter(metricGetFailed, 1) l.backend.logger.Error("Error un-marshalling content", "err", err) - return nil, "", errwrap.Wrapf(fmt.Sprintf("failed to read Value for %q: {{err}}", l.key), err) + return nil, "", fmt.Errorf("failed to read Value for %q: %w", l.key, err) } return &lockRecord, *response.ETag, nil @@ -442,7 +441,7 @@ func (l *Lock) writeLock() (bool, error) { // case secondary currentLockRecord, currentEtag, err := l.get(ctx) if err != nil { - return false, errwrap.Wrapf("error reading lock record: {{err}}", err) + return false, fmt.Errorf("error reading lock record: %w", err) } if (lockRecordCache == nil) || lockRecordCache.etag != currentEtag { @@ -471,7 +470,7 @@ func (l *Lock) writeLock() (bool, error) { newLockRecordJson, err := json.Marshal(newLockRecord) if err != nil { - return false, errwrap.Wrapf("error reading lock record: {{err}}", err) + return false, fmt.Errorf("error reading lock record: %w", err) } defer metrics.MeasureSince(metricPutHa, time.Now()) @@ -479,7 +478,7 @@ func (l *Lock) writeLock() (bool, error) { opcClientRequestId, err := uuid.GenerateUUID() if err != nil { l.backend.logger.Error("putHa: error generating UUID") - return false, errwrap.Wrapf("failed to generate UUID", err) + return false, fmt.Errorf("failed to generate UUID: %w", err) } l.backend.logger.Debug("putHa", "opc-client-request-id", opcClientRequestId) size := int64(len(newLockRecordJson)) @@ -536,7 +535,7 @@ func (l *Lock) writeLock() (bool, error) { } if err != nil { - return false, errwrap.Wrapf("write lock: {{err}}", err) + return false, fmt.Errorf("write lock: %w", err) } l.backend.logger.Debug("Lock written", string(newLockRecordJson)) diff --git a/physical/postgresql/postgresql.go b/physical/postgresql/postgresql.go index 669aba98da..6766567341 100644 --- a/physical/postgresql/postgresql.go +++ b/physical/postgresql/postgresql.go @@ -10,7 +10,6 @@ import ( "sync" "time" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/sdk/physical" log "github.com/hashicorp/go-hclog" @@ -108,7 +107,7 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) @@ -122,7 +121,7 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B if maxIdleConnsIsSet { maxIdleConns, err = strconv.Atoi(maxIdleConnsStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_idle_connections parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_idle_connections parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_idle_connections set", "max_idle_connections", maxIdleConnsStr) @@ -132,7 +131,7 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B // Create PostgreSQL handle for the database. db, err := sql.Open("postgres", connURL) if err != nil { - return nil, errwrap.Wrapf("failed to connect to postgres: {{err}}", err) + return nil, fmt.Errorf("failed to connect to postgres: %w", err) } db.SetMaxOpenConns(maxParInt) @@ -144,7 +143,7 @@ func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.B var upsertAvailable bool upsertAvailableQuery := "SELECT current_setting('server_version_num')::int >= 90500" if err := db.QueryRow(upsertAvailableQuery).Scan(&upsertAvailable); err != nil { - return nil, errwrap.Wrapf("failed to check for native upsert: {{err}}", err) + return nil, fmt.Errorf("failed to check for native upsert: %w", err) } if !upsertAvailable && conf["ha_enabled"] == "true" { @@ -313,7 +312,7 @@ func (m *PostgreSQLBackend) List(ctx context.Context, prefix string) ([]string, var key string err = rows.Scan(&key) if err != nil { - return nil, errwrap.Wrapf("failed to scan rows: {{err}}", err) + return nil, fmt.Errorf("failed to scan rows: %w", err) } keys = append(keys, key) diff --git a/physical/raft/fsm.go b/physical/raft/fsm.go index 86cb08279e..934c5726c8 100644 --- a/physical/raft/fsm.go +++ b/physical/raft/fsm.go @@ -15,7 +15,6 @@ import ( metrics "github.com/armon/go-metrics" "github.com/golang/protobuf/proto" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-raftchunking" @@ -125,7 +124,7 @@ func NewFSM(path string, localID string, logger log.Logger) (*FSM, error) { dbPath := filepath.Join(path, databaseFilename) if err := f.openDBFile(dbPath); err != nil { - return nil, errwrap.Wrapf("failed to open bolt file: {{err}}", err) + return nil, fmt.Errorf("failed to open bolt file: %w", err) } return f, nil @@ -792,7 +791,7 @@ func (f *FSM) Restore(r io.ReadCloser) error { var retErr *multierror.Error if err := snapshotInstaller.Install(dbPath); err != nil { f.logger.Error("failed to install snapshot", "error", err) - retErr = multierror.Append(retErr, errwrap.Wrapf("failed to install snapshot database: {{err}}", err)) + retErr = multierror.Append(retErr, fmt.Errorf("failed to install snapshot database: %w", err)) } else { f.logger.Info("snapshot installed") } @@ -801,7 +800,7 @@ func (f *FSM) Restore(r io.ReadCloser) error { // worked. If the install failed we should try to open the old DB file. if err := f.openDBFile(dbPath); err != nil { f.logger.Error("failed to open new database file", "error", err) - retErr = multierror.Append(retErr, errwrap.Wrapf("failed to open new bolt file: {{err}}", err)) + retErr = multierror.Append(retErr, fmt.Errorf("failed to open new bolt file: %w", err)) } // Handle local node config restore. lnConfig should not be nil here, but @@ -810,7 +809,7 @@ func (f *FSM) Restore(r io.ReadCloser) error { // Persist the local node config on the restored fsm. if err := f.persistDesiredSuffrage(lnConfig); err != nil { f.logger.Error("failed to persist local node config from before the restore", "error", err) - retErr = multierror.Append(retErr, errwrap.Wrapf("failed to persist local node config from before the restore: {{err}}", err)) + retErr = multierror.Append(retErr, fmt.Errorf("failed to persist local node config from before the restore: %w", err)) } } @@ -890,7 +889,7 @@ func (f *FSMChunkStorage) chunkPaths(chunk *raftchunking.ChunkInfo) (string, str func (f *FSMChunkStorage) StoreChunk(chunk *raftchunking.ChunkInfo) (bool, error) { b, err := jsonutil.EncodeJSON(chunk) if err != nil { - return false, errwrap.Wrapf("error encoding chunk info: {{err}}", err) + return false, fmt.Errorf("error encoding chunk info: %w", err) } prefix, key := f.chunkPaths(chunk) @@ -907,7 +906,7 @@ func (f *FSMChunkStorage) StoreChunk(chunk *raftchunking.ChunkInfo) (bool, error done := new(bool) if err := f.f.db.Update(func(tx *bolt.Tx) error { if err := tx.Bucket(dataBucketName).Put([]byte(entry.Key), entry.Value); err != nil { - return errwrap.Wrapf("error storing chunk info: {{err}}", err) + return fmt.Errorf("error storing chunk info: %w", err) } // Assume bucket exists and has keys @@ -940,12 +939,12 @@ func (f *FSMChunkStorage) StoreChunk(chunk *raftchunking.ChunkInfo) (bool, error func (f *FSMChunkStorage) FinalizeOp(opNum uint64) ([]*raftchunking.ChunkInfo, error) { ret, err := f.chunksForOpNum(opNum) if err != nil { - return nil, errwrap.Wrapf("error getting chunks for op keys: {{err}}", err) + return nil, fmt.Errorf("error getting chunks for op keys: %w", err) } prefix, _ := f.chunkPaths(&raftchunking.ChunkInfo{OpNum: opNum}) if err := f.f.DeletePrefix(f.ctx, prefix); err != nil { - return nil, errwrap.Wrapf("error deleting prefix after op finalization: {{err}}", err) + return nil, fmt.Errorf("error deleting prefix after op finalization: %w", err) } return ret, nil @@ -956,7 +955,7 @@ func (f *FSMChunkStorage) chunksForOpNum(opNum uint64) ([]*raftchunking.ChunkInf opChunkKeys, err := f.f.List(f.ctx, prefix) if err != nil { - return nil, errwrap.Wrapf("error fetching op chunk keys: {{err}}", err) + return nil, fmt.Errorf("error fetching op chunk keys: %w", err) } if len(opChunkKeys) == 0 { @@ -968,17 +967,17 @@ func (f *FSMChunkStorage) chunksForOpNum(opNum uint64) ([]*raftchunking.ChunkInf for _, v := range opChunkKeys { seqNum, err := strconv.ParseInt(v, 10, 64) if err != nil { - return nil, errwrap.Wrapf("error converting seqnum to integer: {{err}}", err) + return nil, fmt.Errorf("error converting seqnum to integer: %w", err) } entry, err := f.f.Get(f.ctx, prefix+v) if err != nil { - return nil, errwrap.Wrapf("error fetching chunkinfo: {{err}}", err) + return nil, fmt.Errorf("error fetching chunkinfo: %w", err) } var ci raftchunking.ChunkInfo if err := jsonutil.DecodeJSON(entry.Value, &ci); err != nil { - return nil, errwrap.Wrapf("error decoding chunkinfo json: {{err}}", err) + return nil, fmt.Errorf("error decoding chunkinfo json: %w", err) } if ret == nil { @@ -994,7 +993,7 @@ func (f *FSMChunkStorage) chunksForOpNum(opNum uint64) ([]*raftchunking.ChunkInf func (f *FSMChunkStorage) GetChunks() (raftchunking.ChunkMap, error) { opNums, err := f.f.List(f.ctx, chunkingPrefix) if err != nil { - return nil, errwrap.Wrapf("error doing recursive list for chunk saving: {{err}}", err) + return nil, fmt.Errorf("error doing recursive list for chunk saving: %w", err) } if len(opNums) == 0 { @@ -1005,12 +1004,12 @@ func (f *FSMChunkStorage) GetChunks() (raftchunking.ChunkMap, error) { for _, opNumStr := range opNums { opNum, err := strconv.ParseInt(opNumStr, 10, 64) if err != nil { - return nil, errwrap.Wrapf("error parsing op num during chunk saving: {{err}}", err) + return nil, fmt.Errorf("error parsing op num during chunk saving: %w", err) } opChunks, err := f.chunksForOpNum(uint64(opNum)) if err != nil { - return nil, errwrap.Wrapf("error getting chunks for op keys during chunk saving: {{err}}", err) + return nil, fmt.Errorf("error getting chunks for op keys during chunk saving: %w", err) } ret[uint64(opNum)] = opChunks @@ -1021,7 +1020,7 @@ func (f *FSMChunkStorage) GetChunks() (raftchunking.ChunkMap, error) { func (f *FSMChunkStorage) RestoreChunks(chunks raftchunking.ChunkMap) error { if err := f.f.DeletePrefix(f.ctx, chunkingPrefix); err != nil { - return errwrap.Wrapf("error deleting prefix for chunk restoration: {{err}}", err) + return fmt.Errorf("error deleting prefix for chunk restoration: %w", err) } if len(chunks) == 0 { return nil @@ -1036,7 +1035,7 @@ func (f *FSMChunkStorage) RestoreChunks(chunks raftchunking.ChunkMap) error { return errors.New("unexpected op number in chunk") } if _, err := f.StoreChunk(chunk); err != nil { - return errwrap.Wrapf("error storing chunk during restoration: {{err}}", err) + return fmt.Errorf("error storing chunk during restoration: %w", err) } } } diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 2a458b4d83..bf05c3b6a3 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -15,7 +15,6 @@ import ( "github.com/armon/go-metrics" "github.com/golang/protobuf/proto" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" wrapping "github.com/hashicorp/go-kms-wrapping" "github.com/hashicorp/go-raftchunking" @@ -219,7 +218,7 @@ func (b *RaftBackend) JoinConfig() ([]*LeaderJoinInfo, error) { var leaderInfos []*LeaderJoinInfo err := jsonutil.DecodeJSON([]byte(config), &leaderInfos) if err != nil { - return nil, errwrap.Wrapf("failed to decode retry_join config: {{err}}", err) + return nil, fmt.Errorf("failed to decode retry_join config: %w", err) } if len(leaderInfos) == 0 { @@ -238,7 +237,7 @@ func (b *RaftBackend) JoinConfig() ([]*LeaderJoinInfo, error) { info.Retry = true info.TLSConfig, err = parseTLSInfo(info) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to create tls config to communicate with leader node (retry_join index: %d): {{err}}", i), err) + return nil, fmt.Errorf("failed to create tls config to communicate with leader node (retry_join index: %d): %w", i, err) } } @@ -804,7 +803,7 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { recoveryConfig, err := raft.ReadConfigJSON(peersFile) if err != nil { - return errwrap.Wrapf("raft recovery failed to parse peers.json: {{err}}", err) + return fmt.Errorf("raft recovery failed to parse peers.json: %w", err) } // Non-voting servers are only allowed in enterprise. If Suffrage is disabled, @@ -819,12 +818,12 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { err = raft.RecoverCluster(raftConfig, b.fsm, b.logStore, b.stableStore, b.snapStore, b.raftTransport, recoveryConfig) if err != nil { - return errwrap.Wrapf("raft recovery failed: {{err}}", err) + return fmt.Errorf("raft recovery failed: %w", err) } err = os.Remove(peersFile) if err != nil { - return errwrap.Wrapf("raft recovery failed to delete peers.json; please delete manually: {{err}}", err) + return fmt.Errorf("raft recovery failed to delete peers.json; please delete manually: %w", err) } b.logger.Info("raft recovery deleted peers.json") } @@ -832,7 +831,7 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { if opts.RecoveryModeConfig != nil { err = raft.RecoverCluster(raftConfig, b.fsm, b.logStore, b.stableStore, b.snapStore, b.raftTransport, *opts.RecoveryModeConfig) if err != nil { - return errwrap.Wrapf("recovering raft cluster failed: {{err}}", err) + return fmt.Errorf("recovering raft cluster failed: %w", err) } } @@ -857,7 +856,7 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { case <-ctx.Done(): future := raftObj.Shutdown() if future.Error() != nil { - return errwrap.Wrapf("shutdown while waiting for leadership: {{err}}", future.Error()) + return fmt.Errorf("shutdown while waiting for leadership: %w", future.Error()) } return errors.New("shutdown while waiting for leadership") diff --git a/physical/raft/streamlayer.go b/physical/raft/streamlayer.go index 349606b527..ed154f8bcd 100644 --- a/physical/raft/streamlayer.go +++ b/physical/raft/streamlayer.go @@ -19,7 +19,6 @@ import ( "sync" "time" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/raft" @@ -119,7 +118,7 @@ func GenerateTLSKey(reader io.Reader) (*TLSKey, error) { certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) if err != nil { - return nil, errwrap.Wrapf("unable to generate local cluster certificate: {{err}}", err) + return nil, fmt.Errorf("unable to generate local cluster certificate: %w", err) } return &TLSKey{ @@ -226,7 +225,7 @@ func (l *raftLayer) setTLSKeyring(keyring *TLSKeyring) error { parsedCert, err := x509.ParseCertificate(key.CertBytes) if err != nil { - return errwrap.Wrapf("error parsing raft cluster certificate: {{err}}", err) + return fmt.Errorf("error parsing raft cluster certificate: %w", err) } key.parsedCert = parsedCert diff --git a/physical/s3/s3.go b/physical/s3/s3.go index 7c4822a3a3..2329580145 100644 --- a/physical/s3/s3.go +++ b/physical/s3/s3.go @@ -18,7 +18,6 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/errwrap" "github.com/hashicorp/go-cleanhttp" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/awsutil" @@ -129,7 +128,7 @@ func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend, _, err = s3conn.ListObjects(&s3.ListObjectsInput{Bucket: &bucket}) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("unable to access bucket %q in region %q: {{err}}", bucket, region), err) + return nil, fmt.Errorf("unable to access bucket %q in region %q: %w", bucket, region, err) } maxParStr, ok := conf["max_parallel"] @@ -137,7 +136,7 @@ func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend, if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) diff --git a/physical/spanner/spanner.go b/physical/spanner/spanner.go index 8e4e13265f..1202f9c9f1 100644 --- a/physical/spanner/spanner.go +++ b/physical/spanner/spanner.go @@ -10,7 +10,6 @@ import ( "time" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/helper/useragent" @@ -147,7 +146,7 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error var err error haEnabled, err = strconv.ParseBool(haEnabledStr) if err != nil { - return nil, errwrap.Wrapf("failed to parse HA enabled: {{err}}", err) + return nil, fmt.Errorf("failed to parse HA enabled: %w", err) } } if haEnabled { @@ -158,14 +157,14 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error option.WithUserAgent(useragent.String()), ) if err != nil { - return nil, errwrap.Wrapf("failed to create HA client: {{err}}", err) + return nil, fmt.Errorf("failed to create HA client: %w", err) } } // Max parallel maxParallel, err := extractInt(c["max_parallel"]) if err != nil { - return nil, errwrap.Wrapf("failed to parse max_parallel: {{err}}", err) + return nil, fmt.Errorf("failed to parse max_parallel: %w", err) } logger.Debug("configuration", @@ -182,7 +181,7 @@ func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error option.WithUserAgent(useragent.String()), ) if err != nil { - return nil, errwrap.Wrapf("failed to create spanner client: {{err}}", err) + return nil, fmt.Errorf("failed to create spanner client: %w", err) } return &Backend{ @@ -213,7 +212,7 @@ func (b *Backend) Put(ctx context.Context, entry *physical.Entry) error { "Value": entry.Value, }) if _, err := b.client.Apply(ctx, []*spanner.Mutation{m}); err != nil { - return errwrap.Wrapf("failed to put data: {{err}}", err) + return fmt.Errorf("failed to put data: %w", err) } return nil } @@ -232,12 +231,12 @@ func (b *Backend) Get(ctx context.Context, key string) (*physical.Entry, error) return nil, nil } if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to read value for %q: {{err}}", key), err) + return nil, fmt.Errorf("failed to read value for %q: %w", key, err) } var value []byte if err := row.Column(0, &value); err != nil { - return nil, errwrap.Wrapf("failed to decode value into bytes: {{err}}", err) + return nil, fmt.Errorf("failed to decode value into bytes: %w", err) } return &physical.Entry{ @@ -257,7 +256,7 @@ func (b *Backend) Delete(ctx context.Context, key string) error { // Delete m := spanner.Delete(b.table, spanner.Key{key}) if _, err := b.client.Apply(ctx, []*spanner.Mutation{m}); err != nil { - return errwrap.Wrapf("failed to delete key: {{err}}", err) + return fmt.Errorf("failed to delete key: %w", err) } return nil @@ -291,12 +290,12 @@ func (b *Backend) List(ctx context.Context, prefix string) ([]string, error) { break } if err != nil { - return nil, errwrap.Wrapf("failed to read row: {{err}}", err) + return nil, fmt.Errorf("failed to read row: %w", err) } var key string if err := row.Column(0, &key); err != nil { - return nil, errwrap.Wrapf("failed to decode key into string: {{err}}", err) + return nil, fmt.Errorf("failed to decode key into string: %w", err) } // The results will include the full prefix (folder) and any deeply-nested @@ -351,7 +350,7 @@ func (b *Backend) Transaction(ctx context.Context, txns []*physical.TxnEntry) er // Transactivate! if _, err := b.client.Apply(ctx, ms); err != nil { - return errwrap.Wrapf("failed to commit transaction: {{err}}", err) + return fmt.Errorf("failed to commit transaction: %w", err) } return nil diff --git a/physical/spanner/spanner_ha.go b/physical/spanner/spanner_ha.go index ab9c9a855c..f3284fc270 100644 --- a/physical/spanner/spanner_ha.go +++ b/physical/spanner/spanner_ha.go @@ -8,7 +8,6 @@ import ( "cloud.google.com/go/spanner" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/physical" "github.com/pkg/errors" @@ -104,7 +103,7 @@ func (b *Backend) HAEnabled() bool { func (b *Backend) LockWith(key, value string) (physical.Lock, error) { identity, err := uuid.GenerateUUID() if err != nil { - return nil, errwrap.Wrapf("lock with: {{err}}", err) + return nil, fmt.Errorf("lock with: %w", err) } return &Lock{ backend: b, @@ -137,7 +136,7 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { // occurs. acquired, err := l.attemptLock(stopCh) if err != nil { - return nil, errwrap.Wrapf("lock: {{err}}", err) + return nil, fmt.Errorf("lock: %w", err) } if !acquired { return nil, nil @@ -190,7 +189,7 @@ func (l *Lock) Unlock() error { var r LockRecord if derr := row.ToStruct(&r); derr != nil { - return errwrap.Wrapf("failed to decode to struct: {{err}}", derr) + return fmt.Errorf("failed to decode to struct: %w", derr) } // If the identity is different, that means that between the time that after @@ -204,7 +203,7 @@ func (l *Lock) Unlock() error { spanner.Delete(l.backend.haTable, spanner.Key{l.key}), }) }); err != nil { - return errwrap.Wrapf("unlock: {{err}}", err) + return fmt.Errorf("unlock: %w", err) } // We are no longer holding the lock @@ -239,7 +238,7 @@ func (l *Lock) attemptLock(stopCh <-chan struct{}) (bool, error) { case <-ticker.C: acquired, err := l.writeLock() if err != nil { - return false, errwrap.Wrapf("attempt lock: {{err}}", err) + return false, fmt.Errorf("attempt lock: %w", err) } if !acquired { continue @@ -353,7 +352,7 @@ func (l *Lock) writeLock() (bool, error) { if row != nil { var r LockRecord if derr := row.ToStruct(&r); derr != nil { - return errwrap.Wrapf("failed to decode to struct: {{err}}", derr) + return fmt.Errorf("failed to decode to struct: %w", derr) } // If the key is empty or the identity is ours or the ttl expired, we can @@ -370,10 +369,10 @@ func (l *Lock) writeLock() (bool, error) { Timestamp: time.Now().UTC(), }) if err != nil { - return errwrap.Wrapf("failed to generate struct: {{err}}", err) + return fmt.Errorf("failed to generate struct: %w", err) } if err := txn.BufferWrite([]*spanner.Mutation{m}); err != nil { - return errwrap.Wrapf("failed to write: {{err}}", err) + return fmt.Errorf("failed to write: %w", err) } // Mark that the lock was acquired @@ -382,7 +381,7 @@ func (l *Lock) writeLock() (bool, error) { return nil }) if err != nil { - return false, errwrap.Wrapf("write lock: {{err}}", err) + return false, fmt.Errorf("write lock: %w", err) } return lockWritten, nil @@ -396,12 +395,12 @@ func (l *Lock) get(ctx context.Context) (*LockRecord, error) { return nil, nil } if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to read value for %q: {{err}}", l.key), err) + return nil, fmt.Errorf("failed to read value for %q: %w", l.key, err) } var r LockRecord if err := row.ToStruct(&r); err != nil { - return nil, errwrap.Wrapf("failed to decode lock: {{err}}", err) + return nil, fmt.Errorf("failed to decode lock: %w", err) } return &r, nil } diff --git a/physical/swift/swift.go b/physical/swift/swift.go index 260a5bedc5..20de749b19 100644 --- a/physical/swift/swift.go +++ b/physical/swift/swift.go @@ -12,7 +12,6 @@ import ( log "github.com/hashicorp/go-hclog" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/physical" @@ -128,7 +127,7 @@ func NewSwiftBackend(conf map[string]string, logger log.Logger) (physical.Backen _, _, err = c.Container(container) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("Unable to access container %q: {{err}}", container), err) + return nil, fmt.Errorf("Unable to access container %q: %w", container, err) } maxParStr, ok := conf["max_parallel"] @@ -136,7 +135,7 @@ func NewSwiftBackend(conf map[string]string, logger log.Logger) (physical.Backen if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { - return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) } if logger.IsDebug() { logger.Debug("max_parallel set", "max_parallel", maxParInt) diff --git a/physical/zookeeper/zookeeper.go b/physical/zookeeper/zookeeper.go index 47a0fb3eb7..870999220c 100644 --- a/physical/zookeeper/zookeeper.go +++ b/physical/zookeeper/zookeeper.go @@ -13,7 +13,6 @@ import ( "sync" "time" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/parseutil" "github.com/hashicorp/vault/sdk/physical" @@ -129,14 +128,14 @@ func NewZooKeeperBackend(conf map[string]string, logger log.Logger) (physical.Ba // We have all of the configuration in hand - let's try and connect to ZK client, _, err := createClient(conf, machines, time.Second) if err != nil { - return nil, errwrap.Wrapf("client setup failed: {{err}}", err) + return nil, fmt.Errorf("client setup failed: %w", err) } // ZK AddAuth API if the user asked for it if useAddAuth { err = client.AddAuth(schema, []byte(owner)) if err != nil { - return nil, errwrap.Wrapf("ZooKeeper rejected authentication information provided at auth_info: {{err}}", err) + return nil, fmt.Errorf("ZooKeeper rejected authentication information provided at auth_info: %w", err) } } @@ -163,7 +162,7 @@ func createClient(conf map[string]string, machines string, timeout time.Duration if ok && isTlsEnabledStr != "" { parsedBoolval, err := parseutil.ParseBool(isTlsEnabledStr) if err != nil { - return nil, nil, errwrap.Wrapf("failed parsing tls_enabled parameter: {{err}}", err) + return nil, nil, fmt.Errorf("failed parsing tls_enabled parameter: %w", err) } isTlsEnabled = parsedBoolval } @@ -194,7 +193,7 @@ func customTLSDial(conf map[string]string, machines string) zk.Dialer { if strings.Contains(sParseErr.Error(), "missing port") { serverName = addr } else { - return nil, errwrap.Wrapf("failed parsing the server address for 'serverName' setting {{err}}", sParseErr) + return nil, fmt.Errorf("failed parsing the server address for 'serverName' setting %w", sParseErr) } } @@ -204,7 +203,7 @@ func customTLSDial(conf map[string]string, machines string) zk.Dialer { if ok && tlsSkipVerify != "" { b, err := parseutil.ParseBool(tlsSkipVerify) if err != nil { - return nil, errwrap.Wrapf("failed parsing tls_skip_verify parameter: {{err}}", err) + return nil, fmt.Errorf("failed parsing tls_skip_verify parameter: %w", err) } insecureSkipVerify = b } @@ -220,7 +219,7 @@ func customTLSDial(conf map[string]string, machines string) zk.Dialer { if lookupOk && configVal != "" { parsedIpSanCheck, ipSanErr := parseutil.ParseBool(configVal) if ipSanErr != nil { - return nil, errwrap.Wrapf("failed parsing tls_verify_ip parameter: {{err}}", ipSanErr) + return nil, fmt.Errorf("failed parsing tls_verify_ip parameter: %w", ipSanErr) } ipSanCheck = parsedIpSanCheck } @@ -270,7 +269,7 @@ func customTLSDial(conf map[string]string, machines string) zk.Dialer { if okCert && okKey { tlsCert, err := tls.LoadX509KeyPair(conf["tls_cert_file"], conf["tls_key_file"]) if err != nil { - return nil, errwrap.Wrapf("client tls setup failed for ZK: {{err}}", err) + return nil, fmt.Errorf("client tls setup failed for ZK: %w", err) } tlsClientConfig.Certificates = []tls.Certificate{tlsCert} @@ -281,7 +280,7 @@ func customTLSDial(conf map[string]string, machines string) zk.Dialer { data, err := ioutil.ReadFile(tlsCaFile) if err != nil { - return nil, errwrap.Wrapf("failed to read ZK CA file: {{err}}", err) + return nil, fmt.Errorf("failed to read ZK CA file: %w", err) } if !caPool.AppendCertsFromPEM(data) { @@ -346,7 +345,7 @@ func (c *ZooKeeperBackend) cleanupLogicalPath(path string) error { _, stat, err := c.client.Exists(fullPath) if err != nil { - return errwrap.Wrapf("failed to acquire node data: {{err}}", err) + return fmt.Errorf("failed to acquire node data: %w", err) } if stat.DataLength > 0 && stat.NumChildren > 0 { @@ -358,7 +357,7 @@ func (c *ZooKeeperBackend) cleanupLogicalPath(path string) error { } else { // Empty node, lets clean it up! if err := c.client.Delete(fullPath, -1); err != nil && err != zk.ErrNoNode { - return errwrap.Wrapf(fmt.Sprintf("removal of node %q failed: {{err}}", fullPath), err) + return fmt.Errorf("removal of node %q failed: %w", fullPath, err) } } } @@ -426,7 +425,7 @@ func (c *ZooKeeperBackend) Delete(ctx context.Context, key string) error { // Mask if the node does not exist if err != nil && err != zk.ErrNoNode { - return errwrap.Wrapf(fmt.Sprintf("failed to remove %q: {{err}}", fullPath), err) + return fmt.Errorf("failed to remove %q: %w", fullPath, err) } err = c.cleanupLogicalPath(key) @@ -545,7 +544,7 @@ func (i *ZooKeeperHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) // Watch for Events which could result in loss of our zkLock and close(i.leaderCh) currentVal, _, lockeventCh, err := i.in.client.GetW(lockpath) if err != nil { - return nil, errwrap.Wrapf("unable to watch HA lock: {{err}}", err) + return nil, fmt.Errorf("unable to watch HA lock: %w", err) } if i.value != string(currentVal) { return nil, fmt.Errorf("lost HA lock immediately before watch")