diff --git a/cmd/bitrot-streaming.go b/cmd/bitrot-streaming.go index d707c70e2..7c1a313b7 100644 --- a/cmd/bitrot-streaming.go +++ b/cmd/bitrot-streaming.go @@ -20,6 +20,7 @@ package cmd import ( "bytes" "context" + "errors" "hash" "io" "sync" @@ -37,12 +38,22 @@ type streamingBitrotWriter struct { shardSize int64 canClose *sync.WaitGroup byteBuf []byte + finished bool } func (b *streamingBitrotWriter) Write(p []byte) (int, error) { if len(p) == 0 { return 0, nil } + if b.finished { + return 0, errors.New("bitrot write not allowed") + } + if int64(len(p)) > b.shardSize { + return 0, errors.New("unexpected bitrot buffer size") + } + if int64(len(p)) < b.shardSize { + b.finished = true + } b.h.Reset() b.h.Write(p) hashBytes := b.h.Sum(nil) diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go index f8a3bcdf4..a0d47849f 100644 --- a/cmd/erasure-multipart.go +++ b/cmd/erasure-multipart.go @@ -626,17 +626,13 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo switch size := data.Size(); { case size == 0: buffer = make([]byte, 1) // Allocate at least a byte to reach EOF - case size == -1: - if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize { - // Account for padding and forced compression overhead and encryption. - buffer = make([]byte, data.ActualSize()+256+32+32, data.ActualSize()*2+512) + case size >= fi.Erasure.BlockSize || size == -1: + if int64(globalBytePoolCap.Load().Width()) < fi.Erasure.BlockSize { + buffer = make([]byte, fi.Erasure.BlockSize, 2*fi.Erasure.BlockSize) } else { buffer = globalBytePoolCap.Load().Get() defer globalBytePoolCap.Load().Put(buffer) } - case size >= fi.Erasure.BlockSize: - buffer = globalBytePoolCap.Load().Get() - defer globalBytePoolCap.Load().Put(buffer) case size < fi.Erasure.BlockSize: // No need to allocate fully fi.Erasure.BlockSize buffer if the incoming data is smaller. buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))