talos/pkg/chunker/file/file_test.go
Andrey Smirnov a9766d31bc refactor: implement LoggingManager as central log flow processor
Using this `LoggingManager` all the log flows (reading and writing) were
refactored. Inteface of `LoggingManager` should be now generic enough to
replace log handling with almost any implementation - log rotation,
sending logs to remote destination, keeping logs in memory, etc.

There should be no functional changes.

As part of changes, `follow.Reader` was implemented which makes
appending file feel like a stream. `file.NewChunker` was refactored to
use `follow.Reader` and `stream.NewChunker` to do the actual work. So
basically now we have only a single instance of chunker - stream
chunker, as everything is represented as a stream.

Signed-off-by: Andrey Smirnov <smirnov.andrey@gmail.com>
2020-06-10 14:30:36 -07:00

212 lines
5.0 KiB
Go

// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
package file_test
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/suite"
"github.com/talos-systems/talos/pkg/chunker/file"
)
type FileChunkerSuite struct {
suite.Suite
tmpDir string
no int
reader, writer *os.File
}
func (suite *FileChunkerSuite) SetupSuite() {
var err error
suite.tmpDir, err = ioutil.TempDir("", "talos")
suite.Require().NoError(err)
}
func (suite *FileChunkerSuite) SetupTest() {
suite.no++
var err error
suite.writer, err = os.Create(filepath.Join(suite.tmpDir, fmt.Sprintf("%d.log", suite.no)))
suite.Require().NoError(err)
suite.reader, err = os.Open(suite.writer.Name())
suite.Require().NoError(err)
}
func (suite *FileChunkerSuite) TearDownTest() {
suite.Require().NoError(suite.writer.Close())
suite.reader.Close() //nolint: errcheck
}
func (suite *FileChunkerSuite) TearDownSuite() {
suite.Require().NoError(os.RemoveAll(suite.tmpDir))
}
func collectChunks(chunksCh <-chan []byte) <-chan []byte {
combinedCh := make(chan []byte)
go func() {
res := []byte(nil)
for chunk := range chunksCh {
res = append(res, chunk...)
}
combinedCh <- res
}()
return combinedCh
}
func (suite *FileChunkerSuite) TestStreaming() {
ctx, ctxCancel := context.WithCancel(context.Background())
defer ctxCancel()
chunker := file.NewChunker(ctx, suite.reader, file.WithFollow())
chunksCh := chunker.Read()
combinedCh := collectChunks(chunksCh)
// nolint: errcheck
suite.writer.WriteString("abc")
// nolint: errcheck
suite.writer.WriteString("def")
// nolint: errcheck
suite.writer.WriteString("ghi")
time.Sleep(50 * time.Millisecond)
// nolint: errcheck
suite.writer.WriteString("jkl")
// nolint: errcheck
suite.writer.WriteString("mno")
time.Sleep(50 * time.Millisecond)
ctxCancel()
suite.Require().Equal([]byte("abcdefghijklmno"), <-combinedCh)
}
func (suite *FileChunkerSuite) TestStreamingWithSomeHead() {
ctx, ctxCancel := context.WithCancel(context.Background())
defer ctxCancel()
chunker := file.NewChunker(ctx, suite.reader, file.WithFollow())
// nolint: errcheck
suite.writer.WriteString("abc")
// nolint: errcheck
suite.writer.WriteString("def")
chunksCh := chunker.Read()
combinedCh := collectChunks(chunksCh)
// nolint: errcheck
suite.writer.WriteString("ghi")
time.Sleep(50 * time.Millisecond)
// nolint: errcheck
suite.writer.WriteString("jkl")
time.Sleep(50 * time.Millisecond)
// nolint: errcheck
suite.writer.WriteString("mno")
time.Sleep(50 * time.Millisecond)
ctxCancel()
suite.Require().Equal([]byte("abcdefghijklmno"), <-combinedCh)
}
func (suite *FileChunkerSuite) TestStreamingSmallBuffer() {
ctx, ctxCancel := context.WithCancel(context.Background())
defer ctxCancel()
chunker := file.NewChunker(ctx, suite.reader, file.WithSize(1), file.WithFollow())
chunksCh := chunker.Read()
combinedCh := collectChunks(chunksCh)
// nolint: errcheck
suite.writer.WriteString("abc")
// nolint: errcheck
suite.writer.WriteString("def")
// nolint: errcheck
suite.writer.WriteString("ghi")
time.Sleep(50 * time.Millisecond)
// nolint: errcheck
suite.writer.WriteString("jkl")
// nolint: errcheck
suite.writer.WriteString("mno")
// create extra file to try to confuse watch
_, err := os.Create(filepath.Join(suite.tmpDir, "x.log"))
suite.Require().NoError(err)
time.Sleep(50 * time.Millisecond)
ctxCancel()
suite.Require().Equal([]byte("abcdefghijklmno"), <-combinedCh)
}
func (suite *FileChunkerSuite) TestStreamingDeleted() {
ctx, ctxCancel := context.WithCancel(context.Background())
defer ctxCancel()
chunker := file.NewChunker(ctx, suite.reader, file.WithFollow())
chunksCh := chunker.Read()
combinedCh := collectChunks(chunksCh)
// nolint: errcheck
suite.writer.WriteString("abc")
// nolint: errcheck
suite.writer.WriteString("def")
// nolint: errcheck
suite.writer.WriteString("ghi")
time.Sleep(50 * time.Millisecond)
// nolint: errcheck
suite.writer.WriteString("jkl")
// nolint: errcheck
suite.writer.WriteString("mno")
time.Sleep(50 * time.Millisecond)
// chunker should terminate when file is removed
suite.Require().NoError(os.Remove(suite.writer.Name()))
suite.Require().Equal([]byte("abcdefghijklmno"), <-combinedCh)
}
func (suite *FileChunkerSuite) TestNoFollow() {
ctx, ctxCancel := context.WithCancel(context.Background())
defer ctxCancel()
chunker := file.NewChunker(ctx, suite.reader)
// nolint: errcheck
suite.writer.WriteString("abc")
// nolint: errcheck
suite.writer.WriteString("def")
// nolint: errcheck
suite.writer.WriteString("ghi")
time.Sleep(50 * time.Millisecond)
chunksCh := chunker.Read()
combinedCh := collectChunks(chunksCh)
suite.Require().Equal([]byte("abcdefghi"), <-combinedCh)
}
func TestFileChunkerSuite(t *testing.T) {
suite.Run(t, new(FileChunkerSuite))
}