From d6a2fe02d395a59d952d74bcf77da02bc53ca32c Mon Sep 17 00:00:00 2001 From: Klaus Post Date: Fri, 9 Jul 2021 11:29:16 -0700 Subject: [PATCH] Add admin file inspector (#12635) Download files from *any* bucket/path as an encrypted zip file. The key is included in the response but can be separated so zip and the key doesn't have to be sent on the same channel. Requires https://github.com/minio/pkg/pull/6 --- cmd/admin-handlers.go | 104 ++++++++++++++++++++++++++++++ cmd/admin-router.go | 1 + cmd/erasure-server-pool.go | 33 ++++++++++ cmd/fs-v1.go | 17 +++++ cmd/naughty-disk_test.go | 7 +++ cmd/storage-interface.go | 1 + cmd/storage-rest-client.go | 17 +++++ cmd/storage-rest-common.go | 1 + cmd/storage-rest-server.go | 19 ++++++ cmd/storagemetric_string.go | 7 ++- cmd/xl-storage-disk-id-check.go | 17 +++++ cmd/xl-storage.go | 29 +++++++++ docs/debugging/inspect/main.go | 108 ++++++++++++++++++++++++++++++++ go.mod | 2 +- go.sum | 3 +- 15 files changed, 361 insertions(+), 5 deletions(-) create mode 100644 docs/debugging/inspect/main.go diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 0732b609b..9f5977dd4 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -19,6 +19,7 @@ package cmd import ( "context" + crand "crypto/rand" "crypto/subtle" "crypto/tls" "encoding/json" @@ -37,6 +38,7 @@ import ( "time" "github.com/gorilla/mux" + "github.com/klauspost/compress/zip" "github.com/minio/kes" "github.com/minio/madmin-go" "github.com/minio/minio/internal/auth" @@ -49,6 +51,7 @@ import ( "github.com/minio/minio/internal/logger/message/log" iampolicy "github.com/minio/pkg/iam/policy" xnet "github.com/minio/pkg/net" + "github.com/secure-io/sio-go" ) const ( @@ -1951,3 +1954,104 @@ func checkConnection(endpointStr string, timeout time.Duration) error { defer xhttp.DrainBody(resp.Body) return nil } + +// getRawDataer provides an interface for getting raw FS files. +type getRawDataer interface { + GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, size int64, modtime time.Time) error) error +} + +// InspectDataHandler - GET /minio/admin/v3/inspect-data +// ---------- +// Download file from all nodes in a zip format +func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "InspectData") + + // Validate request signature. + _, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.InspectDataAction, "") + if adminAPIErr != ErrNone { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL) + return + } + defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) + + o, ok := newObjectLayerFn().(getRawDataer) + if !ok { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) + return + } + + volume := r.URL.Query().Get("volume") + file := r.URL.Query().Get("file") + if len(volume) == 0 { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketName), r.URL) + return + } + if len(file) == 0 { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + + var key [32]byte + // MUST use crypto/rand + n, err := crand.Read(key[:]) + if err != nil || n != len(key) { + logger.LogIf(ctx, err) + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL) + return + } + stream, err := sio.AES_256_GCM.Stream(key[:]) + if err != nil { + logger.LogIf(ctx, err) + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL) + return + } + // Zero nonce, we only use each key once, and 32 bytes is plenty. + nonce := make([]byte, stream.NonceSize()) + encw := stream.EncryptWriter(w, nonce, nil) + + defer encw.Close() + + // Write a version for making *incompatible* changes. + // The AdminClient will reject any version it does not know. + w.Write([]byte{1}) + + // Write key first (without encryption) + _, err = w.Write(key[:]) + if err != nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL) + return + } + + // Initialize a zip writer which will provide a zipped content + // of profiling data of all nodes + zipWriter := zip.NewWriter(encw) + defer zipWriter.Close() + + err = o.GetRawData(ctx, volume, file, func(r io.Reader, host, disk, filename string, size int64, modtime time.Time) error { + // Prefix host+disk + filename = path.Join(host, disk, filename) + header, zerr := zip.FileInfoHeader(dummyFileInfo{ + name: filename, + size: size, + mode: 0600, + modTime: modtime, + isDir: false, + sys: nil, + }) + if zerr != nil { + logger.LogIf(ctx, zerr) + return nil + } + header.Method = zip.Deflate + zwriter, zerr := zipWriter.CreateHeader(header) + if zerr != nil { + logger.LogIf(ctx, zerr) + return nil + } + if _, err = io.Copy(zwriter, r); err != nil { + logger.LogIf(ctx, err) + } + return nil + }) + logger.LogIf(ctx, err) +} diff --git a/cmd/admin-router.go b/cmd/admin-router.go index 7013cddde..84a76c68e 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -54,6 +54,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) // Info operations adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler)) + adminRouter.Methods(http.MethodGet).Path(adminVersion+"/inspect-data").HandlerFunc(httpTraceHdrs(adminAPI.InspectDataHandler)).Queries("volume", "{volume:.*}", "file", "{file:.*}") // StorageInfo operations adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler)) diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index 5179466bd..0dbacc336 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -147,6 +147,39 @@ func (z *erasureServerPools) GetDisksID(ids ...string) []StorageAPI { return res } +// GetRawData will return all files with a given raw path to the callback. +// Errors are ignored, only errors from the callback are returned. +// For now only direct file paths are supported. +func (z *erasureServerPools) GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, size int64, modtime time.Time) error) error { + for _, s := range z.serverPools { + for _, disks := range s.erasureDisks { + for i, disk := range disks { + if disk == OfflineDisk { + continue + } + si, err := disk.StatInfoFile(ctx, volume, file) + if err != nil { + continue + } + r, err := disk.ReadFileStream(ctx, volume, file, 0, si.Size) + if err != nil { + continue + } + defer r.Close() + did, err := disk.GetDiskID() + if err != nil { + did = fmt.Sprintf("disk-%d", i) + } + err = fn(r, disk.Hostname(), did, pathJoin(volume, file), si.Size, si.ModTime) + if err != nil { + return err + } + } + } + } + return nil +} + func (z *erasureServerPools) SetDriveCounts() []int { setDriveCounts := make([]int, len(z.serverPools)) for i := range z.serverPools { diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index b0c82ca1d..d295f57eb 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -27,6 +27,7 @@ import ( "os" "os/user" "path" + "path/filepath" "sort" "strings" "sync" @@ -1509,3 +1510,19 @@ func (fs *FSObjects) TransitionObject(ctx context.Context, bucket, object string func (fs *FSObjects) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error { return NotImplemented{} } + +// GetRawData returns raw file data to the callback. +// Errors are ignored, only errors from the callback are returned. +// For now only direct file paths are supported. +func (fs *FSObjects) GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, size int64, modtime time.Time) error) error { + f, err := os.Open(filepath.Join(fs.fsPath, volume, file)) + if err != nil { + return nil + } + defer f.Close() + st, err := f.Stat() + if err != nil || st.IsDir() { + return nil + } + return fn(f, "fs", fs.fsUUID, file, st.Size(), st.ModTime()) +} diff --git a/cmd/naughty-disk_test.go b/cmd/naughty-disk_test.go index 9ce2f377a..a7be10a1c 100644 --- a/cmd/naughty-disk_test.go +++ b/cmd/naughty-disk_test.go @@ -291,3 +291,10 @@ func (d *naughtyDisk) VerifyFile(ctx context.Context, volume, path string, fi Fi } return d.disk.VerifyFile(ctx, volume, path, fi) } + +func (d *naughtyDisk) StatInfoFile(ctx context.Context, volume, path string) (stat StatInfo, err error) { + if err := d.calcError(); err != nil { + return stat, err + } + return d.disk.StatInfoFile(ctx, volume, path) +} diff --git a/cmd/storage-interface.go b/cmd/storage-interface.go index ac2fdf48d..eb0b30167 100644 --- a/cmd/storage-interface.go +++ b/cmd/storage-interface.go @@ -74,6 +74,7 @@ type StorageAPI interface { CheckFile(ctx context.Context, volume string, path string) (err error) Delete(ctx context.Context, volume string, path string, recursive bool) (err error) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error + StatInfoFile(ctx context.Context, volume, path string) (stat StatInfo, err error) // Write all data, syncs the data to disk. // Should be used for smaller payloads. diff --git a/cmd/storage-rest-client.go b/cmd/storage-rest-client.go index de927d794..55f511176 100644 --- a/cmd/storage-rest-client.go +++ b/cmd/storage-rest-client.go @@ -684,6 +684,23 @@ func (client *storageRESTClient) VerifyFile(ctx context.Context, volume, path st return toStorageErr(verifyResp.Err) } +func (client *storageRESTClient) StatInfoFile(ctx context.Context, volume, path string) (stat StatInfo, err error) { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + values.Set(storageRESTFilePath, path) + respBody, err := client.call(ctx, storageRESTMethodStatInfoFile, values, nil, -1) + if err != nil { + return stat, err + } + defer xhttp.DrainBody(respBody) + respReader, err := waitForHTTPResponse(respBody) + if err != nil { + return stat, err + } + err = stat.DecodeMsg(msgpNewReader(respReader)) + return stat, err +} + // Close - marks the client as closed. func (client *storageRESTClient) Close() error { client.restClient.Close() diff --git a/cmd/storage-rest-common.go b/cmd/storage-rest-common.go index f05a556a9..7e0cb3943 100644 --- a/cmd/storage-rest-common.go +++ b/cmd/storage-rest-common.go @@ -52,6 +52,7 @@ const ( storageRESTMethodRenameFile = "/renamefile" storageRESTMethodVerifyFile = "/verifyfile" storageRESTMethodWalkDir = "/walkdir" + storageRESTMethodStatInfoFile = "/statfile" ) const ( diff --git a/cmd/storage-rest-server.go b/cmd/storage-rest-server.go index ba71454d7..0ab761b80 100644 --- a/cmd/storage-rest-server.go +++ b/cmd/storage-rest-server.go @@ -1038,6 +1038,23 @@ func logFatalErrs(err error, endpoint Endpoint, exit bool) { } } +// StatInfoFile returns file stat info. +func (s *storageRESTServer) StatInfoFile(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + vars := mux.Vars(r) + volume := vars[storageRESTVolume] + filePath := vars[storageRESTFilePath] + done := keepHTTPResponseAlive(w) + si, err := s.storage.StatInfoFile(r.Context(), volume, filePath) + done(err) + if err != nil { + return + } + msgp.Encode(w, &si) +} + // registerStorageRPCRouter - register storage rpc router. func registerStorageRESTHandlers(router *mux.Router, endpointServerPools EndpointServerPools) { storageDisks := make([][]*xlStorage, len(endpointServerPools)) @@ -1129,6 +1146,8 @@ func registerStorageRESTHandlers(router *mux.Router, endpointServerPools Endpoin Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWalkDir).HandlerFunc(httpTraceHdrs(server.WalkDirHandler)). Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTRecursive)...) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodStatInfoFile).HandlerFunc(httpTraceHdrs(server.StatInfoFile)). + Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) } } } diff --git a/cmd/storagemetric_string.go b/cmd/storagemetric_string.go index 8026aabef..1acc69441 100644 --- a/cmd/storagemetric_string.go +++ b/cmd/storagemetric_string.go @@ -32,12 +32,13 @@ func _() { _ = x[storageMetricUpdateMetadata-21] _ = x[storageMetricReadVersion-22] _ = x[storageMetricReadAll-23] - _ = x[storageMetricLast-24] + _ = x[storageStatInfoFile-24] + _ = x[storageMetricLast-25] } -const _storageMetric_name = "MakeVolBulkMakeVolListVolsStatVolDeleteVolWalkDirListDirReadFileAppendFileCreateFileReadFileStreamRenameFileRenameDataCheckPartsCheckFileDeleteDeleteVersionsVerifyFileWriteAllDeleteVersionWriteMetadataUpdateMetadataReadVersionReadAllLast" +const _storageMetric_name = "MakeVolBulkMakeVolListVolsStatVolDeleteVolWalkDirListDirReadFileAppendFileCreateFileReadFileStreamRenameFileRenameDataCheckPartsCheckFileDeleteDeleteVersionsVerifyFileWriteAllDeleteVersionWriteMetadataUpdateMetadataReadVersionReadAllstorageStatInfoFileLast" -var _storageMetric_index = [...]uint8{0, 11, 18, 26, 33, 42, 49, 56, 64, 74, 84, 98, 108, 118, 128, 137, 143, 157, 167, 175, 188, 201, 215, 226, 233, 237} +var _storageMetric_index = [...]uint16{0, 11, 18, 26, 33, 42, 49, 56, 64, 74, 84, 98, 108, 118, 128, 137, 143, 157, 167, 175, 188, 201, 215, 226, 233, 252, 256} func (i storageMetric) String() string { if i >= storageMetric(len(_storageMetric_index)-1) { diff --git a/cmd/xl-storage-disk-id-check.go b/cmd/xl-storage-disk-id-check.go index 46fb035ca..36c3447bf 100644 --- a/cmd/xl-storage-disk-id-check.go +++ b/cmd/xl-storage-disk-id-check.go @@ -58,6 +58,7 @@ const ( storageMetricUpdateMetadata storageMetricReadVersion storageMetricReadAll + storageStatInfoFile // .... add more @@ -611,6 +612,22 @@ func (p *xlStorageDiskIDCheck) ReadAll(ctx context.Context, volume string, path return p.storage.ReadAll(ctx, volume, path) } +func (p *xlStorageDiskIDCheck) StatInfoFile(ctx context.Context, volume, path string) (stat StatInfo, err error) { + defer p.updateStorageMetrics(storageStatInfoFile, volume, path)() + + select { + case <-ctx.Done(): + return StatInfo{}, ctx.Err() + default: + } + + if err = p.checkDiskStale(); err != nil { + return StatInfo{}, err + } + + return p.storage.StatInfoFile(ctx, volume, path) +} + func storageTrace(s storageMetric, startTime time.Time, duration time.Duration, path string) madmin.TraceInfo { return madmin.TraceInfo{ TraceType: madmin.TraceStorage, diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index c0d69cc06..7cf3d41a3 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -2284,3 +2284,32 @@ func (s *xlStorage) VerifyFile(ctx context.Context, volume, path string, fi File return nil } + +func (s *xlStorage) StatInfoFile(ctx context.Context, volume, path string) (stat StatInfo, err error) { + volumeDir, err := s.getVolDir(volume) + if err != nil { + return stat, err + } + + // Stat a volume entry. + if err = Access(volumeDir); err != nil { + if osIsNotExist(err) { + return stat, errVolumeNotFound + } else if isSysErrIO(err) { + return stat, errFaultyDisk + } else if osIsPermission(err) { + return stat, errVolumeAccessDenied + } + return stat, err + } + filePath := pathJoin(volumeDir, path) + if err := checkPathLength(filePath); err != nil { + return stat, err + } + st, _ := Lstat(filePath) + if st == nil { + return stat, errPathNotFound + } + + return StatInfo{ModTime: st.ModTime(), Size: st.Size()}, nil +} diff --git a/docs/debugging/inspect/main.go b/docs/debugging/inspect/main.go new file mode 100644 index 000000000..0c53257dd --- /dev/null +++ b/docs/debugging/inspect/main.go @@ -0,0 +1,108 @@ +// Copyright (c) 2015-2021 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package main + +import ( + "bufio" + "encoding/binary" + "encoding/hex" + "flag" + "fmt" + "hash/crc32" + "io" + "log" + "os" + "strings" + + "github.com/secure-io/sio-go" +) + +var ( + key = flag.String("key", "", "decryption string") + //js = flag.Bool("json", false, "expect json input") +) + +func main() { + flag.Parse() + args := flag.Args() + switch len(flag.Args()) { + case 0: + // Read from stdin, write to stdout. + decrypt(*key, os.Stdin, os.Stdout) + return + case 1: + r, err := os.Open(args[0]) + fatalErr(err) + defer r.Close() + dstName := strings.TrimSuffix(args[0], ".enc") + ".zip" + w, err := os.Create(dstName) + fatalErr(err) + defer w.Close() + if len(*key) == 0 { + reader := bufio.NewReader(os.Stdin) + fmt.Print("Enter Decryption Key: ") + + text, _ := reader.ReadString('\n') + // convert CRLF to LF + *key = strings.Replace(text, "\n", "", -1) + } + decrypt(*key, r, w) + fmt.Println("Output decrypted to", dstName) + return + default: + fatalIf(true, "Only 1 file can be decrypted") + os.Exit(1) + } +} + +func decrypt(keyHex string, r io.Reader, w io.Writer) { + keyHex = strings.TrimSpace(keyHex) + fatalIf(len(keyHex) != 72, "Unexpected key length: %d, want 72", len(keyHex)) + id, err := hex.DecodeString(keyHex[:8]) + fatalErr(err) + key, err := hex.DecodeString(keyHex[8:]) + fatalErr(err) + + // Verify that CRC is ok. + want := binary.LittleEndian.Uint32(id) + got := crc32.ChecksumIEEE(key) + fatalIf(want != got, "Invalid key checksum, want %x, got %x", want, got) + + stream, err := sio.AES_256_GCM.Stream(key) + fatalErr(err) + + // Zero nonce, we only use each key once, and 32 bytes is plenty. + nonce := make([]byte, stream.NonceSize()) + encr := stream.DecryptReader(r, nonce, nil) + _, err = io.Copy(w, encr) + fatalErr(err) +} + +func fatalErr(err error) { + if err == nil { + return + } + log.Fatalln(err) +} + +func fatalIf(b bool, msg string, v ...interface{}) { + if !b { + return + } + log.Fatalf(msg, v...) +} diff --git a/go.mod b/go.mod index 0a62d9342..f4372476b 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,7 @@ require ( github.com/minio/madmin-go v1.0.13 github.com/minio/minio-go/v7 v7.0.13-0.20210706013812-337aa536abe2 github.com/minio/parquet-go v1.0.0 - github.com/minio/pkg v1.0.8 + github.com/minio/pkg v1.0.10 github.com/minio/rpc v1.0.0 github.com/minio/selfupdate v0.3.1 github.com/minio/sha256-simd v1.0.0 diff --git a/go.sum b/go.sum index 0a81ddb3f..d3946e126 100644 --- a/go.sum +++ b/go.sum @@ -1034,8 +1034,9 @@ github.com/minio/parquet-go v1.0.0 h1:fcWsEvub04Nsl/4hiRBDWlbqd6jhacQieV07a+nhiI github.com/minio/parquet-go v1.0.0/go.mod h1:aQlkSOfOq2AtQKkuou3mosNVMwNokd+faTacxxk/oHA= github.com/minio/pkg v1.0.3/go.mod h1:obU54TZ9QlMv0TRaDgQ/JTzf11ZSXxnSfLrm4tMtBP8= github.com/minio/pkg v1.0.4/go.mod h1:obU54TZ9QlMv0TRaDgQ/JTzf11ZSXxnSfLrm4tMtBP8= -github.com/minio/pkg v1.0.8 h1:lWQwHSeYlvnRoPpO+wS0I4mL6c00ABxBgbGjSmjwOi4= github.com/minio/pkg v1.0.8/go.mod h1:32x/3OmGB0EOi1N+3ggnp+B5VFkSBBB9svPMVfpnf14= +github.com/minio/pkg v1.0.10 h1:fohpAm/0ttQFf4BzmzH5r6A9JUIfg63AyGCPM0f9/9U= +github.com/minio/pkg v1.0.10/go.mod h1:32x/3OmGB0EOi1N+3ggnp+B5VFkSBBB9svPMVfpnf14= github.com/minio/rpc v1.0.0 h1:tJCHyLfQF6k6HlMQFpKy2FO/7lc2WP8gLDGMZp18E70= github.com/minio/rpc v1.0.0/go.mod h1:b9xqF7J0xeMXr0cM4pnBlP7Te7PDsG5JrRxl5dG6Ldk= github.com/minio/selfupdate v0.3.1 h1:BWEFSNnrZVMUWXbXIgLDNDjbejkmpAmZvy/nCz1HlEs=