diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 54f099a541db1228e3b1b6a00ac71a5b04854dac..82d21887b7d64958a3d14a1aa7a669b5b8bcf2cd 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -16,7 +16,7 @@ stages: - build include: - - component: $CI_SERVER_FQDN/adirelle/pre-commit-check/check@v1.0.0 + - component: $CI_SERVER_FQDN/adirelle/pre-commit-check/check@v1.1.0 inputs: stage: lint diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index caa20be93eb5623312c822fc4a52ccb80f3fb862..b18bc0001341ea5a7a6e4d5da19dd11c87370b0f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,3 +11,18 @@ repos: rev: v3.2.1 hooks: - id: editorconfig-checker + + - repo: https://github.com/python-jsonschema/check-jsonschema + rev: 0.31.3 + hooks: + - id: check-gitlab-ci + - id: check-taskfile + + - repo: local + hooks: + - id: go-mod-tidy + name: Check go.sum + entry: go mod tidy -diff + language: golang + files: '(\.go|go\.(mod|sum))$' + pass_filenames: false diff --git a/Taskfile.yml b/Taskfile.yml index c26ae01a1f219b3b0933d9ffe6aeefa18ff5f429..5fdb72bd81ef8750316897bed7c4806c0e92b5c7 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -4,6 +4,10 @@ env: GOPATH: "{{ .ROOT_DIR }}/.go" tasks: + lint: + cmds: + - pre-commit run -a + build: deps: [deps] sources: @@ -18,8 +22,33 @@ tasks: test: deps: [deps] + sources: + - lib/**/* + cmds: + - go test {{.CLI_ARGS}} ./lib/... + + bench: + deps: [deps] + sources: + - lib/**/* + cmds: + - go test -v -bench=. -run=^# ./lib/... + + test-lib: + deps: [deps] + sources: + - lib/{{ .CLI_ARGS }}/**/* cmds: - - go test {{.CLI_ARGS}} ./cmd/... ./lib/... + - go test -v ./lib/{{ .CLI_ARGS }}/... + + run: + deps: [deps] + sources: + - cmd/**/* + - lib/**/* + - exclude: "**/*_test.go" + cmds: + - go run ./cmd/monitor {{.CLI_ARGS}} clear: run: always @@ -37,10 +66,10 @@ tasks: sources: - cmd/**/*.go - lib/**/*.go + generates: - go.mod - go.sum - generates: - - '{{ .GOPATH }}/**/*' + - "{{ .GOPATH }}/**/*" cmds: - mkdir -p "{{ .GOPATH }}" - go mod tidy -v diff --git a/cmd/monitor/main.go b/cmd/monitor/main.go index 083c94cde9a62f1639f5c562b095fa5fbb48a129..8527061956650146d16ea7fe1ee5adf61bf3cb50 100644 --- a/cmd/monitor/main.go +++ b/cmd/monitor/main.go @@ -10,30 +10,52 @@ import ( "time" "github.com/docker/docker/api/types/container" - "github.com/lmittmann/tint" - "github.com/mattn/go-isatty" flag "github.com/spf13/pflag" + "gitlab.com/adirelle/docker-stats/lib/logging" ) const MIN_DOCKER_API_VERSION = "v1.44" func main() { - var verbose = flag.CountP("verbose", "v", "show more messages (cumulative)") - var quiet = flag.CountP("quiet", "q", "only show error messages") + if err := mainWithError(); err != nil { + slog.Error("fatal error", "err", err) + os.Exit(1) + } +} + +func mainWithError() error { + loggingConfig := logging.DefaultConfig() + flag.CountVarP(&loggingConfig.Verbose, "verbose", "v", "show more messages (cumulative)") + flag.CountVarP(&loggingConfig.Quiet, "quiet", "q", "only show error messages") + flag.VarP(&loggingConfig.MetricsFilePath, "output", "o", "append metrics to file") + flag.VarP(&loggingConfig.LogFilePath, "log-file", "l", "append logs to file") + flag.VarP(&loggingConfig.Rotation.Period, "rotate-time", "r", "rotate files at the given time interval") + flag.Var(&loggingConfig.Rotation.Size, "rotate-max-size", "rotate files bigger that the given size") + flag.Var(&loggingConfig.Rotation.MaxAge, "rotate-max-age", "delete rotated files older than the given age") + flag.UintVar(&loggingConfig.Rotation.MaxCount, "rotate-keep", 0, "delete rotateds files when their number exceeds the given count") + flag.BoolVar(&loggingConfig.Rotation.Compress, "rotate-compress", false, "compress files after rotation") + var catchUp = flag.BoolP("catch-up", "c", false, "catch up on running containers") var period = flag.DurationP("period", "p", time.Minute, "period of time between intermediate metrics; 0 to disable") var filter = PRESET_ALL flag.VarP(&filter, "filter", "f", "container and label filter preset") - var outputPath = flag.StringP("output", "o", "-", "write metrics to target file") - flag.Parse() - slog.SetDefault(slog.New(consoleHandler(*verbose - *quiet))) + if logger, err := loggingConfig.CreateLogLogger(); err == nil { + slog.SetDefault(logger) + } else { + return err + } + + output, err := loggingConfig.CreateMetricsLogger() + if err != nil { + return err + } f := FilteringContainerMonitorFactory{ - MetricsLogger: slog.New(metricsHandler(*outputPath)), + MetricsLogger: output, ContainerFilter: filter.ContainerFilter(), LabelFilter: filter.LabelFilter(), Period: *period, @@ -44,8 +66,7 @@ func main() { m, err := NewEngineMonitor(MIN_DOCKER_API_VERSION, f, *catchUp) if err != nil { - slog.Error("could not start main client", "err", err) - os.Exit(1) + return fmt.Errorf("could not start main client: %w", err) } if *period > 0 { @@ -57,41 +78,7 @@ func main() { slog.Debug("filtering containers and labels", "preset", filter) } - if err := m.Run(ctx); err != nil { - slog.Error("fatal error", "err", err) - os.Exit(1) - } -} - -func consoleHandler(verbosity int) slog.Handler { - loggerOptions := tint.Options{ - TimeFormat: time.DateTime, - Level: slog.LevelWarn, - NoColor: len(os.Getenv("NO_COLOR")) > 0 || (!isatty.IsTerminal(os.Stderr.Fd()) && len(os.Getenv("FORCE_COLOR")) == 0), - } - if verbosity < 0 { - loggerOptions.Level = slog.LevelError - } else if verbosity == 1 { - loggerOptions.Level = slog.LevelInfo - } else if verbosity > 1 { - loggerOptions.Level = slog.LevelDebug - } - return tint.NewHandler(os.Stderr, &loggerOptions) -} - -func metricsHandler(outputPath string) slog.Handler { - output := os.Stdout - if len(outputPath) > 0 && outputPath != "-" { - var err error - output, err = os.OpenFile(outputPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o777) - if err != nil { - slog.Error("could open output file", "path", outputPath, "err", err) - os.Exit(1) - } - slog.Debug("writing metrics to file", "path", output.Name()) - } - - return slog.NewJSONHandler(output, nil) + return m.Run(ctx) } // ============================== FilterPreset ============================== diff --git a/go.mod b/go.mod index 178f0420c4458b9f8897c4f3f2f99e53bddd551f..4bcd919f5241a3772997245100b655ae5fd19ffb 100644 --- a/go.mod +++ b/go.mod @@ -4,11 +4,18 @@ go 1.24.0 require ( github.com/docker/docker v28.0.1+incompatible + github.com/gookit/slog v0.5.8 github.com/lmittmann/tint v1.0.7 github.com/mattn/go-isatty v0.0.20 github.com/stretchr/testify v1.10.0 ) +require ( + github.com/gookit/goutil v0.6.18 // indirect + golang.org/x/sync v0.11.0 // indirect + golang.org/x/text v0.22.0 // indirect +) + require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/containerd/log v0.1.0 // indirect diff --git a/go.sum b/go.sum index a418aa886ba932d01fbff8657aa857ac0761e4bc..2620d2e552a1e9629a04fc6db92cde0cec715d00 100644 --- a/go.sum +++ b/go.sum @@ -29,6 +29,12 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0= +github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w= +github.com/gookit/goutil v0.6.18 h1:MUVj0G16flubWT8zYVicIuisUiHdgirPAkmnfD2kKgw= +github.com/gookit/goutil v0.6.18/go.mod h1:AY/5sAwKe7Xck+mEbuxj0n/bc3qwrGNe3Oeulln7zBA= +github.com/gookit/slog v0.5.8 h1:XZCeHLQvvOZWcSUDZcqxXITsL9+d1ESsKZoASBmK1lI= +github.com/gookit/slog v0.5.8/go.mod h1:s0ViFOY/IgUuT4MDPF0l9x5/npcciy8pL4xwWZadnoc= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -63,6 +69,8 @@ github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= @@ -99,12 +107,16 @@ golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= diff --git a/lib/logging/logging.go b/lib/logging/logging.go new file mode 100644 index 0000000000000000000000000000000000000000..df503ee6a02979893a609eaf269da541aa5f5e87 --- /dev/null +++ b/lib/logging/logging.go @@ -0,0 +1,210 @@ +package logging + +import ( + "fmt" + "io" + "log/slog" + "os" + "path" + "syscall" + "time" + + "github.com/gookit/slog/rotatefile" + "github.com/lmittmann/tint" + "github.com/mattn/go-isatty" + "github.com/spf13/pflag" +) + +// ============================== Config ============================== + +type Config struct { + Verbose int + Quiet int + MetricsFilePath WriteFilePath + LogFilePath WriteFilePath + Rotation RotationConfig +} + +func DefaultConfig() Config { + return Config{ + MetricsFilePath: WriteFilePath("/dev/stdout"), + LogFilePath: WriteFilePath("/dev/stderr"), + } +} + +func (l Config) CreateMetricsLogger() (*slog.Logger, error) { + var err error + var writer io.Writer + if l.MetricsFilePath.IsStdfile() { + writer = l.MetricsFilePath.StdFile() + } else { + writer, err = l.Rotation.Open(l.MetricsFilePath.Path()) + if err != nil { + return nil, fmt.Errorf("could not open output for appending: %w", err) + } + } + return slog.New(slog.NewJSONHandler(writer, nil)), nil +} + +func (l Config) CreateLogLogger() (*slog.Logger, error) { + if handler, err := l.createLogHandler(); err == nil { + return slog.New(handler), nil + } else { + return nil, err + } +} + +func (l Config) createLogHandler() (slog.Handler, error) { + if l.LogFilePath.IsStdfile() { + return l.createConsoleHandler(l.LogFilePath.StdFile()), nil + } + + writer, err := l.Rotation.Open(l.LogFilePath.Path()) + if err != nil { + return nil, fmt.Errorf("could not open log file for appending: %w", err) + } + + var fileHandler slog.Handler + opts := slog.HandlerOptions{Level: l.logLevel(), AddSource: l.addSource()} + if l.LogFilePath.IsJSON() { + fileHandler = slog.NewJSONHandler(writer, &opts) + } else { + fileHandler = slog.NewTextHandler(writer, &opts) + } + return fileHandler, nil + // return multi.Failover()(fileHandler, l.createConsoleHandler(os.Stderr)), nil +} + +func (l Config) logLevel() slog.Leveler { + verbosity := l.Verbose - l.Quiet + if verbosity < 0 { + return slog.LevelError + + } else if verbosity == 1 { + return slog.LevelInfo + } else if verbosity > 1 { + return slog.LevelDebug + } + return slog.LevelWarn +} + +func (l Config) addSource() bool { + return l.Verbose-l.Quiet > 2 +} + +func (l Config) createConsoleHandler(console *os.File) slog.Handler { + return tint.NewHandler(console, &tint.Options{ + TimeFormat: time.DateTime, + Level: l.logLevel(), + AddSource: l.addSource(), + NoColor: len(os.Getenv("NO_COLOR")) > 0 || (!isatty.IsTerminal(console.Fd()) && len(os.Getenv("FORCE_COLOR")) == 0), + }) +} + +// ============================== WriteFilePath ============================== + +type WriteFilePath string + +var _ pflag.Value = (*WriteFilePath)(nil) + +// Set implements flag.Value. +func (l *WriteFilePath) Set(filePath string) error { + switch filePath { + case "", "-", "/dev/stdout", "/dev/fd/1": + *l = "/dev/stdout" + return nil + case "/dev/stderr", "/dev/fd/2": + *l = "/dev/stderr" + return nil + default: + info, err := os.Stat(filePath) + if os.IsNotExist(err) { + parent := path.Dir(filePath) + err = syscall.Access(parent, 0x2 /* W_OK */) + if err != nil { + return fmt.Errorf("cannot create %s in %s: %w", path.Base(filePath), parent, err) + } + } else { + if err != nil { + return err + } + if info.IsDir() { + return fmt.Errorf("%s is a directory", filePath) + } + err = syscall.Access(filePath, 0x2 /* W_OK */) + if err != nil { + return fmt.Errorf("cannot write to %s: %w", filePath, err) + } + } + *l = WriteFilePath(filePath) + return nil + } +} + +// String implements flag.Value. +func (l *WriteFilePath) String() string { + return string(*l) +} + +// Type implements pflag.Value. +func (l *WriteFilePath) Type() string { + return "file-path" +} + +func (l WriteFilePath) Path() string { + return string(l) +} + +func (l WriteFilePath) IsStdfile() bool { + return l == "/dev/stdout" || l == "/dev/stderr" +} + +func (l WriteFilePath) StdFile() *os.File { + switch l { + case "/dev/stdout": + return os.Stdout + case "/dev/stderr": + return os.Stderr + default: + panic(fmt.Sprintf("WriteFilePath.StdFile() called on %s", l)) + } +} + +func (l WriteFilePath) IsJSON() bool { + return path.Ext(string(l)) == ".json" +} + +// ============================== RotationConfig ============================== + +type RotationConfig struct { + Period Period + Size FileSize + MaxAge Age + MaxCount uint + Compress bool +} + +func (c *RotationConfig) IsEnabled() bool { + return !(c.Period.IsNever() && c.Size.IsInfinite()) +} + +func (c *RotationConfig) Open(path string) (io.Writer, error) { + if !c.IsEnabled() { + return os.OpenFile(path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, rotatefile.DefaultFilePerm) + } + + cf := rotatefile.EmptyConfigWith() + cf.Filepath = path + if !c.Period.IsNever() { + cf.RotateTime = c.Period.RotateTime() + } + cf.DebugMode = c.Period.IsDebug() + cf.MaxSize = c.Size.Bytes() + // cf.Compress = c.Compress + cf.BackupTime = c.MaxAge.Hours() + cf.BackupNum = c.MaxCount + // cf.CloseLock = true + fmt.Printf("rotation config: %#v\n\n", cf) + + return rotatefile.NewWriter(cf) +} diff --git a/lib/logging/units.go b/lib/logging/units.go new file mode 100644 index 0000000000000000000000000000000000000000..ae0cdd178d8ce5dfa0b1106076782457792ddabf --- /dev/null +++ b/lib/logging/units.go @@ -0,0 +1,212 @@ +package logging + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/gookit/slog/rotatefile" + "github.com/spf13/pflag" +) + +// ============================== Period ============================== + +type Period string + +var _ pflag.Value = (*Period)(nil) + +const ( + PERIOD_HOURLY Period = "hourly" + PERIOD_DAILY Period = "daily" + PERIOD_WEEKLY Period = "weekly" + PERIOD_MONTHLY Period = "monthly" + PERIOD_DEBUG Period = "debug" +) + +// Set implements pflag.Value. +func (p *Period) Set(value string) error { + switch value { + case string(PERIOD_HOURLY), "h": + *p = PERIOD_HOURLY + case string(PERIOD_DAILY), "d": + *p = PERIOD_DAILY + case string(PERIOD_WEEKLY), "w": + *p = PERIOD_WEEKLY + case string(PERIOD_MONTHLY), "m": + *p = PERIOD_DEBUG + case string(PERIOD_DEBUG): + *p = PERIOD_DEBUG + default: + return fmt.Errorf("invalid period: %v", value) + } + return nil +} + +// String implements pflag.Value. +func (p *Period) String() string { + return string(*p) +} + +// Type implements pflag.Value. +func (p *Period) Type() string { + return "period" +} + +func (p Period) IsNever() bool { + return string(p) == "" +} + +func (p Period) IsDebug() bool { + return p == PERIOD_DEBUG +} + +func (p Period) RotateTime() rotatefile.RotateTime { + switch p { + case PERIOD_HOURLY: + return rotatefile.EveryHour + case PERIOD_DAILY, "": + return rotatefile.EveryDay + case PERIOD_WEEKLY: + return rotatefile.EveryDay * 7 + case PERIOD_MONTHLY: + return rotatefile.EveryMonth + case PERIOD_DEBUG: + return rotatefile.EverySecond + default: + panic(fmt.Sprintf("unknown period: %v", string(p))) + } +} + +// ============================== FileSize ============================== + +type FileSize uint64 + +var _ pflag.Value = (*FileSize)(nil) + +var revSizeSuffixes = []string{"G", "M", "K"} + +// Set implements pflag.Value. +func (f *FileSize) Set(size string) error { + size = strings.ToUpper(size) + var err error + for i, suffix := range revSizeSuffixes { + if prefix, hasSuffix := stripSuffix(size, suffix); hasSuffix { + if value, err := strconv.ParseUint(prefix, 10, 64); err == nil { + *f = FileSize(value << (10 * (len(revSizeSuffixes) - i))) + return nil + } else { + return err + } + } + } + value, err := strconv.ParseUint(size, 10, 64) + if err != nil { + return err + } + *f = FileSize(value) + return nil +} + +func stripSuffix(s, suffix string) (string, bool) { + if strings.HasSuffix(s, suffix) { + return s[:len(s)-len(suffix)], true + } + return s, false +} + +var sizeSuffixes = []string{"", "K", "M"} + +// String implements pflag.Value. +func (f *FileSize) String() string { + value := uint64(*f) + if value == 0 { + return "0" + } + for _, suffix := range sizeSuffixes { + if value&0x2ff != 0 { + return fmt.Sprintf("%d%s", value, suffix) + } + value >>= 10 + } + return fmt.Sprintf("%dG", value) +} + +// Type implements pflag.Value. +func (f *FileSize) Type() string { + return "size" +} + +func (f FileSize) IsInfinite() bool { + return f == 0 +} + +func (f FileSize) Bytes() uint64 { + return uint64(f) +} + +// ============================== Age ============================== + +type Age uint + +var _ pflag.Value = (*Age)(nil) + +var ageUnits = []struct { + suffix string + value uint +}{ + {"week", 7 * 24}, + {"w", 7 * 24}, + {"weeks", 7 * 24}, + {"day", 24}, + {"d", 24}, + {"days", 24}, + {"hour", 1}, + {"h", 1}, + {"hours", 1}, + {"", 1}, +} + +// Set implements pflag.Value. +func (a *Age) Set(age string) error { + for _, unit := range ageUnits { + if strings.HasSuffix(age, unit.suffix) { + value, err := strconv.ParseUint(strings.Trim(age[:len(age)-len(unit.suffix)], " "), 10, 32) + *a = Age(uint(value) * unit.value) + return err + } + } + panic("unreachable") +} + +// String implements pflag.Value. +func (a *Age) String() string { + value := uint(*a) + if value == 0 { + return "0" + } + for _, unit := range ageUnits { + if value >= unit.value && math.Mod(float64(value), float64(unit.value)) == 0 { + value /= unit.value + if value != 1 { + return fmt.Sprintf("%d %ss", value, unit.suffix) + } else { + return fmt.Sprintf("%d %s", value, unit.suffix) + } + } + } + panic("unreachable") +} + +// Type implements pflag.Value. +func (a *Age) Type() string { + return "age" +} + +func (a Age) Hours() uint { + return uint(a) +} + +func (a Age) IsInfinite() bool { + return a == 0 +} diff --git a/lib/logging/units_test.go b/lib/logging/units_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8d040f912da213f389b44c44c28d1955bdc718e0 --- /dev/null +++ b/lib/logging/units_test.go @@ -0,0 +1,184 @@ +package logging_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "gitlab.com/adirelle/docker-stats/lib/logging" +) + +func TestPeriodSet(t *testing.T) { + t.Parallel() + t.Helper() + + testCases := map[string]logging.Period{ + "hourly": logging.PERIOD_HOURLY, + "h": logging.PERIOD_HOURLY, + "daily": logging.PERIOD_DAILY, + "d": logging.PERIOD_DAILY, + "weekly": logging.PERIOD_WEEKLY, + "w": logging.PERIOD_WEEKLY, + "monthly": logging.PERIOD_MONTHLY, + "m": logging.PERIOD_MONTHLY, + "debug": logging.PERIOD_DEBUG, + } + + for input, expected := range testCases { + t.Run(input, func(t *testing.T) { + var actual logging.Period + assert.NoError(t, actual.Set(input)) + assert.Equal(t, expected, actual) + }) + } +} + +func TestPeriodSetInvalid(t *testing.T) { + t.Parallel() + t.Helper() + + testCases := []string{ + "hourl", + "foo", + "9d", + } + + for _, input := range testCases { + t.Run(input, func(t *testing.T) { + var actual logging.Period + assert.Error(t, actual.Set(input)) + }) + } +} + +func TestPeriodString(t *testing.T) { + t.Parallel() + t.Helper() + + testCases := map[logging.Period]string{ + logging.PERIOD_HOURLY: "hourly", + logging.PERIOD_DAILY: "daily", + logging.PERIOD_WEEKLY: "weekly", + logging.PERIOD_MONTHLY: "monthly", + } + + for input, expected := range testCases { + t.Run(expected, func(t *testing.T) { + assert.Equal(t, expected, input.String()) + }) + } +} + +func TestFileSizeSet(t *testing.T) { + t.Parallel() + t.Helper() + + testCases := map[string]logging.FileSize{ + "0": 0, + "15": 15, + "0K": 0, + "15K": 15360, + "15k": 15360, + "15M": 15728640, + "15m": 15728640, + "15G": 16106127360, + "15g": 16106127360, + } + + for input, expected := range testCases { + t.Run(input, func(t *testing.T) { + var actual logging.FileSize + assert.NoError(t, actual.Set(input)) + assert.Equal(t, expected, actual) + }) + } +} + +func TestFileSizeSetInvalid(t *testing.T) { + t.Parallel() + t.Helper() + + testCases := []string{"15o", "15.0k", "pof"} + + for _, input := range testCases { + t.Run(input, func(t *testing.T) { + var size logging.FileSize + assert.Error(t, size.Set(input)) + }) + } +} + +func TestFileSizeString(t *testing.T) { + t.Parallel() + t.Helper() + + testCases := map[logging.FileSize]string{ + 0: "0", + 1: "1", + 15: "15", + 1039: "1039", + 1024: "1K", + 15360: "15K", + 1063936: "1039K", + 1048576: "1M", + 15728640: "15M", + 1073741824: "1G", + 16106127360: "15G", + 16492674416640: "15360G", + } + + for input, expected := range testCases { + t.Run(expected, func(t *testing.T) { + assert.Equal(t, expected, input.String()) + }) + } +} + +func TestAgeSet(t *testing.T) { + t.Parallel() + t.Helper() + + testCases := map[string]logging.Age{ + "0": 0, + "1": 3600, + "1h": 3600, + "1 hour": 3600, + "1hour": 3600, + "2 hours": 7200, + "1d": 86400, + "1 day": 86400, + "2 days": 2 * 86400, + "1w": 7 * 86400, + "1week": 7 * 86400, + "4 weeks": 28 * 86400, + } + + for input, expected := range testCases { + t.Run(input, func(t *testing.T) { + var actual logging.Age + assert.NoError(t, actual.Set(input)) + assert.Equal(t, expected, actual) + }) + } +} + +func TestAgeString(t *testing.T) { + t.Parallel() + t.Helper() + + testCases := map[logging.Age]string{ + 0: "0", + 3600: "1 hour", + 7200: "2 hours", + 27 * 3600: "27 hours", + 86400: "1 day", + 7 * 86400: "1 week", + 14 * 86400: "2 weeks", + 15 * 86400: "15 days", + } + + for input, expected := range testCases { + t.Run(expected, func(t *testing.T) { + assert.Equal(t, expected, input.String()) + }) + } +} diff --git a/lib/logging/writer/background.go b/lib/logging/writer/background.go new file mode 100644 index 0000000000000000000000000000000000000000..f0173c206b2d4f81efa37338eafdbcba3ec5d688 --- /dev/null +++ b/lib/logging/writer/background.go @@ -0,0 +1,202 @@ +package writer + +import ( + "errors" + "fmt" + "io" + "log/slog" + "os" + "time" +) + +type BackgroundWriter struct { + path string + pathPrefix string + pathExt string + + file *os.File + size uint64 + lastModTime time.Time + + rotation RotationStrategy + archived chan<- string + + log *slog.Logger + clock Clock +} + +func NewBackgroundWriter(c Config, archived chan<- string) *BackgroundWriter { + prefix, ext := c.splitPath() + return &BackgroundWriter{ + path: c.Path, + pathPrefix: prefix, + pathExt: ext, + rotation: c.Rotation, + archived: archived, + log: c.logger(), + clock: c.clock, + } +} + +func (w *BackgroundWriter) run(reqs <-chan *writeRequest) (err error) { + defer func() { + if w.archived != nil { + close(w.archived) + } + err = errors.Join(err, w.closeFile()) + w.log.Debug("background writer stopped") + }() + + triggerR, hasTimer := w.rotation.(RotationStrategyTrigger) + var trigger <-chan time.Time + w.log.Debug("background writer started") + + for { + if hasTimer && trigger == nil { + now, next := w.clock.Now(), triggerR.Next(w.lastModTime) + if next.After(now) { + trigger = w.clock.After(next.Sub(now)) + } + } + + select { + case <-trigger: + trigger = nil + if w.shouldRotate(0) { + if err := w.rotate(); err != nil { + return err + } + } + case req, open := <-reqs: + if !open { + return nil + } + written, err := w.write(req.data) + req.reply(written, err) + w.size += written + w.lastModTime = w.clock.Now() + + if err != nil { + return err + } + } + } +} + +func (w *BackgroundWriter) write(data []byte) (written uint64, err error) { + defer func() { + if p := recover(); p != nil { + err = errors.Join(err, fmt.Errorf("recovered panic: %v", p)) + } + }() + + toWrite := uint64(len(data)) + if w.shouldRotate(toWrite) { + if err = w.rotate(); err != nil { + return 0, err + } + } + + if toWrite == 0 { + return 0, nil + } + + if err = w.openFile(); err != nil { + return 0, err + } + + for err == nil && written < toWrite { + var n int + n, err = w.file.Write(data[written:]) + written += uint64(n) + } + + return written, nil +} + +func (w *BackgroundWriter) openFile() error { + if w.file != nil { + return nil + } + + var err error + w.file, err = os.OpenFile(w.path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o777) + if err != nil { + w.log.Error("could not open file", "err", err) + return err + } + + if pos, err := w.file.Seek(0, io.SeekCurrent); err == nil { + w.size = uint64(pos) + } else { + return err + } + w.lastModTime = w.clock.Now() + + w.log.Debug("opened file for writing", "size", w.size, "modTime", w.lastModTime) + return err +} + +func (w *BackgroundWriter) closeFile() error { + if w.file == nil { + return nil + } + + if err := w.file.Sync(); err != nil { + w.log.Warn("error syncing file", "path", w.path, "err", err) + } + + if err := w.file.Close(); err != nil { + w.log.Warn("error closing file", "path", w.path, "err", err) + } else { + w.log.Debug("closed file", "path", w.path, "size", w.size) + } + + w.file = nil + w.size = 0 + return nil +} + +func (w *BackgroundWriter) shouldRotate(toWrite uint64) bool { + return w.rotation.ShouldRotate(w.lastModTime, w.clock.Now(), w.size, w.size+toWrite) +} + +func (w *BackgroundWriter) rotate() error { + if w.file == nil { + return nil + } + + if err := w.closeFile(); err != nil { + return err + } + + refTime := w.lastModTime + archivePath, err := w.newArchivePath(refTime) + if err != nil { + return err + } + + if err = os.Rename(w.path, archivePath); err != nil { + return fmt.Errorf("could not archive log file: %w", err) + } + if err = os.Chtimes(archivePath, refTime, refTime); err != nil { + return fmt.Errorf("could not set archive time: %w", err) + } + w.log.Debug("archived file", "archivePath", archivePath) + + if w.archived != nil { + w.archived <- archivePath + } + + return nil +} + +func (w *BackgroundWriter) newArchivePath(when time.Time) (string, error) { + if suffix, err := w.rotation.PathSuffix(w.pathPrefix, w.pathExt, when); err != nil { + return "", err + } else if suffix != "" { + return w.pathPrefix + "." + suffix + w.pathExt, nil + } else { + return w.pathPrefix + w.pathExt, nil + } +} diff --git a/lib/logging/writer/cleaner.go b/lib/logging/writer/cleaner.go new file mode 100644 index 0000000000000000000000000000000000000000..f659d5aa8bcb0aedee8c90eb7e178e42bb2a2e49 --- /dev/null +++ b/lib/logging/writer/cleaner.go @@ -0,0 +1,133 @@ +package writer + +import ( + "fmt" + "log/slog" + "os" + "path/filepath" + "slices" + "strings" + "time" +) + +// ============================== Cleaner ============================== + +type cleaner struct { + archiveGlob string + + compression Compression + pruning PruningStrategy + + log *slog.Logger + clock Clock +} + +func NewCleaner(c Config) *cleaner { + return &cleaner{ + archiveGlob: c.archiveGlob(), + compression: c.Compression, + pruning: c.Pruning, + log: c.logger(), + clock: c.clock, + } +} + +func (c *cleaner) run(archived <-chan string) error { + defer c.log.Debug("background cleaner stopped") + c.log.Debug("background cleaner started") + + for archive := range archived { + if c.compression.IsEnabled() { + if err := c.compression.Compress(archive); err != nil { + return fmt.Errorf("could not compress archived file: %w", err) + } else { + c.log.Info("compressed archived file") + } + } + + if c.pruning.IsEnabled() { + if archives, err := c.listArchives(); err != nil { + return err + } else if len(archives) > 0 { + c.log.Debug("considering archived files for pruning", "paths", archives) + if err := c.prune(archives); err != nil { + return fmt.Errorf("could not prune archived files: %w", err) + } + } + } + } + + return nil +} + +func (c *cleaner) listArchives() ([]ArchivedFile, error) { + paths, err := filepath.Glob(c.archiveGlob) + if err != nil { + return nil, fmt.Errorf("could not list archived files: %w", err) + } + + entries := make([]ArchivedFile, len(paths)) + for i, p := range paths { + if entries[i], err = newArchivedFile(p, c.clock); err != nil { + return nil, err + } + } + + slices.SortStableFunc(entries, func(a, b ArchivedFile) int { + if delta := b.ModTime.Compare(a.ModTime); delta != 0 { + return delta + } else { + return strings.Compare(b.Path, a.Path) + } + }) + + return entries, nil +} + +func (c *cleaner) prune(archives []ArchivedFile) error { + var totalSize uint64 + for i, entry := range archives { + totalSize += uint64(entry.Size) + if c.pruning.ShouldPrune(i, entry, totalSize) { + if err := entry.Remove(); err != nil { + return fmt.Errorf("could not remove archived logfile: %w", err) + } else { + c.log.Info("removed archived file", "archive", entry.Path) + } + } + } + return nil +} + +// ============================== ArchivedFile ============================== + +type ArchivedFile struct { + Path string + Size uint64 + ModTime time.Time + Age time.Duration +} + +var _ fmt.Stringer = ArchivedFile{} + +func newArchivedFile(p string, clock Clock) (ArchivedFile, error) { + info, err := os.Stat(p) + if err != nil { + return ArchivedFile{}, err + } + return ArchivedFile{ + Path: p, + Size: uint64(info.Size()), + ModTime: info.ModTime(), + Age: clock.Now().Sub(info.ModTime()), + }, nil +} + +func (f ArchivedFile) Remove() error { + return os.Remove(f.Path) +} + +// LogValue implements slog.LogValuer. +func (f ArchivedFile) String() string { + return fmt.Sprintf("%s(%d bytes, %s)", f.Path, f.Size, f.ModTime.Format(time.RFC3339)) +} diff --git a/lib/logging/writer/clock.go b/lib/logging/writer/clock.go new file mode 100644 index 0000000000000000000000000000000000000000..76d1fb6f607cab6b65cf99ae45206b2e0093e923 --- /dev/null +++ b/lib/logging/writer/clock.go @@ -0,0 +1,26 @@ +package writer + +import "time" + +// ============================== Clock ============================== + +type Clock interface { + Now() time.Time + After(time.Duration) <-chan time.Time +} + +type timeClock struct { + location *time.Location +} + +var DefaultClock Clock = timeClock{time.Local} + +// Now implements Clock. +func (s timeClock) Now() time.Time { + return time.Now().In(s.location) +} + +// After implements Clock. +func (s timeClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} diff --git a/lib/logging/writer/compression.go b/lib/logging/writer/compression.go new file mode 100644 index 0000000000000000000000000000000000000000..aede3ae1b72a7220a9eeadce78b9c5b96398ffbf --- /dev/null +++ b/lib/logging/writer/compression.go @@ -0,0 +1,87 @@ +package writer + +import ( + "compress/gzip" + "fmt" + "io" + "os" +) + +// ============================== Compression ============================== + +type Compression interface { + Compress(string) error + FileGlob() string + IsEnabled() bool +} + +// ============================== noCompression ============================== + +type noCompression int + +var NoCompression Compression = noCompression(0) + +// Compress implements Compression. +func (noCompression) Compress(string) error { + return nil +} + +// FileGlob implements Compression. +func (noCompression) FileGlob() string { + return "" +} + +// FileGlob implements Compression. +func (noCompression) IsEnabled() bool { + return false +} + +// ============================== gzipCompression ============================== + +type GzipCompression int + +var _ Compression = GzipCompression(gzip.DefaultCompression) + +// Compress implements Compression. +func (c GzipCompression) Compress(path string) error { + src, err := os.Open(path) + if err != nil { + return fmt.Errorf("could not open file to compress: %w", err) + } + defer src.Close() + + dest, err := os.OpenFile(path+".gz", os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0o777) + if err != nil { + return fmt.Errorf("could not open compressed file: %w", err) + } + defer dest.Close() + + compressor, err := gzip.NewWriterLevel(dest, int(c)) + if err != nil { + return fmt.Errorf("could not initialize compressor: %w", err) + } + + if _, err = io.Copy(compressor, src); err != nil { + return fmt.Errorf("error while compressing: %w", err) + } + + if err = src.Close(); err != nil { + return fmt.Errorf("error closing compressed file: %w", err) + } + + if err = os.Remove(path); err != nil { + return fmt.Errorf("could not remove the source of the compressed file: %w", err) + } + + return nil +} + +// FileGlob implements Compression. +func (GzipCompression) FileGlob() string { + return ".gz" +} + +// FileGlob implements Compression. +func (GzipCompression) IsEnabled() bool { + return true +} diff --git a/lib/logging/writer/config.go b/lib/logging/writer/config.go new file mode 100644 index 0000000000000000000000000000000000000000..a937124533d13c33d133d242b99022b4571417ef --- /dev/null +++ b/lib/logging/writer/config.go @@ -0,0 +1,131 @@ +package writer + +import ( + "compress/gzip" + "log/slog" + "path" + "time" +) + +// ============================== Config ============================== + +type Config struct { + Path string + Rotation RotationStrategy + Compression Compression + Pruning PruningStrategy + + log *slog.Logger + clock Clock +} + +func DefaultConfig(path string, options ...Option) Config { + c := Config{ + Path: path, + Rotation: NoRotation, + Compression: NoCompression, + Pruning: NoPruning, + log: DefaultLogger, + clock: DefaultClock, + } + c.Apply(options...) + return c +} + +func (c *Config) Apply(options ...Option) { + for _, option := range options { + option(c) + } +} + +func (c Config) needCleaner() bool { + return c.Compression.IsEnabled() || c.Pruning.IsEnabled() +} + +func (c Config) splitPath() (prefix, ext string) { + ext = path.Ext(c.Path) + return c.Path[:len(c.Path)-len(ext)], ext +} + +func (c Config) archiveGlob() string { + prefix, ext := c.splitPath() + if glob := c.Rotation.FileGlob(); glob != "" { + prefix += "." + glob + } + if glob := c.Compression.FileGlob(); glob != "" { + ext += glob + } + return prefix + ext +} + +func (c Config) logger() *slog.Logger { + return c.log.With("logfile", c.Path) +} + +// ============================== Option ============================== + +type Option func(c *Config) + +func WithLogger(logger *slog.Logger) Option { + if logger == nil { + panic("logger is nil") + } + return func(w *Config) { + w.log = logger + } +} + +func WithRotation(rotation RotationStrategy) Option { + return func(w *Config) { + w.Rotation = ComposeRotationStrategy(w.Rotation, rotation) + } +} + +func WithMaxSize(maxSize uint64) Option { + if maxSize == 0 { + panic("file size limit is zero") + } + return WithRotation(NewSizeRotation(maxSize)) +} + +func WithPruning(p PruningStrategy) Option { + return func(w *Config) { + w.Pruning = ComposePruningStrategy(w.Pruning, p) + } +} + +func WithKeepMaxCount(maxCount uint) Option { + if maxCount == 0 { + panic("max count limit is zero") + } + return WithPruning(MaxCountPruning(maxCount)) +} + +func WithKeepMaxAge(maxAge time.Duration) Option { + if maxAge <= 0 { + panic("max age limit is zero or negative") + } + return WithPruning(MaxAgePruning(maxAge)) +} + +func WithKeepMaxSize(maxSize uint64) Option { + if maxSize == 0 { + panic("total size limit is zero") + } + return WithPruning(MaxSizePruning(maxSize)) + +} + +func WithCompression(c Compression) Option { + return func(w *Config) { + w.Compression = c + } +} + +func WithGZipCompression(levels ...int) Option { + level := gzip.DefaultCompression + if len(levels) > 0 { + level = levels[0] + } + return WithCompression(GzipCompression(level)) +} diff --git a/lib/logging/writer/pruning.go b/lib/logging/writer/pruning.go new file mode 100644 index 0000000000000000000000000000000000000000..5db5ccf70b36e902a6f5b4eb40828f2e0544f049 --- /dev/null +++ b/lib/logging/writer/pruning.go @@ -0,0 +1,118 @@ +package writer + +import ( + "time" +) + +// ============================== Cleaner ============================== + +type PruningStrategy interface { + ShouldPrune(index int, entry ArchivedFile, totalSize uint64) bool + IsEnabled() bool +} + +// ============================== NoPruning ============================== + +type noPruning int + +var NoPruning PruningStrategy = noPruning(0) + +// ShouldPrune implements Pruning. +func (noPruning) ShouldPrune(int, ArchivedFile, uint64) bool { + return false +} + +// ShouldPrune implements Pruning. +func (noPruning) IsEnabled() bool { + return false +} + +// ============================== MaxCountPruning ============================== + +type MaxCountPruning uint + +var _ PruningStrategy = MaxCountPruning(0) + +// ShouldPrune implements Pruning. +func (p MaxCountPruning) ShouldPrune(index int, _ ArchivedFile, _ uint64) bool { + return index >= int(p) +} + +// ShouldPrune implements Pruning. +func (MaxCountPruning) IsEnabled() bool { + return true +} + +// ============================== MaxAgePruning ============================== + +type MaxAgePruning time.Duration + +var _ PruningStrategy = MaxAgePruning(0) + +// ShouldPrune implements Pruning. +func (p MaxAgePruning) ShouldPrune(_ int, entry ArchivedFile, _ uint64) bool { + return entry.Age > time.Duration(p) +} + +// ShouldPrune implements Pruning. +func (MaxAgePruning) IsEnabled() bool { + return true +} + +// ============================== MaxSizePruning ============================== + +type MaxSizePruning uint64 + +var _ PruningStrategy = MaxSizePruning(0) + +// ShouldPrune implements Pruning. +func (p MaxSizePruning) ShouldPrune(_ int, _ ArchivedFile, totalSize uint64) bool { + return totalSize > uint64(p) +} + +// ShouldPrune implements Pruning. +func (MaxSizePruning) IsEnabled() bool { + return true +} + +// ============================== CompoundPruningStrategy ============================== + +type CompoundPruningStrategy []PruningStrategy + +var _ PruningStrategy = CompoundPruningStrategy(nil) + +// ShouldPrune implements Pruning. +func (s CompoundPruningStrategy) ShouldPrune(index int, entry ArchivedFile, totalSize uint64) bool { + for _, sub := range s { + if sub.ShouldPrune(index, entry, totalSize) { + return true + } + } + return false +} + +func ComposePruningStrategy(strategies ...PruningStrategy) PruningStrategy { + s := make([]PruningStrategy, 0, len(strategies)) + for _, sub := range strategies { + if sub != nil && sub.IsEnabled() { + s = append(s, sub) + } + } + if len(s) == 1 { + return s[0] + } else if len(s) > 1 { + return CompoundPruningStrategy(s) + } else { + return NoPruning + } +} + +// ShouldPrune implements Pruning. +func (c CompoundPruningStrategy) IsEnabled() bool { + for _, sub := range c { + if sub.IsEnabled() { + return true + } + } + return false +} diff --git a/lib/logging/writer/rotatiion_test.go b/lib/logging/writer/rotatiion_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0fc315492e0568279a17c45f76a2b4a3bf3d81cf --- /dev/null +++ b/lib/logging/writer/rotatiion_test.go @@ -0,0 +1,201 @@ +package writer + +import ( + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSizeRotationShouldRotate(t *testing.T) { + t.Parallel() + assert := assert.New(t) + + r := NewSizeRotation(50) + + assert.False(r.ShouldRotate(time.Now(), time.Now(), 1, 2)) + assert.False(r.ShouldRotate(time.Now(), time.Now(), 0, 52)) + assert.True(r.ShouldRotate(time.Now(), time.Now(), 1, 52)) + assert.True(r.ShouldRotate(time.Now(), time.Now(), 52, 102)) +} + +func TestSizeRotationFileSuffix(t *testing.T) { + t.Parallel() + require := require.New(t) + + root := t.TempDir() + r := NewSizeRotation(50) + + for j := range 4 { + prefix := fmt.Sprintf("%s/log-%d", root, j) + for i := range 5 { + suffix, err := r.PathSuffix(prefix, ".txt", time.Now()) + require.NoError(err) + require.True(filepath.Match(r.FileGlob(), suffix)) + require.Equal(fmt.Sprintf("%05d", i+1), suffix) + } + } +} + +func TestSizeRotationFileSuffixExist(t *testing.T) { + t.Parallel() + require := require.New(t) + + root := t.TempDir() + r := NewSizeRotation(50) + + require.NoError(os.WriteFile(root+"/log.00052.txt", nil, 0o777)) + + s, err := r.PathSuffix(root+"/log", ".txt", time.Now()) + require.NoError(err) + require.Equal("00053", s) + + require.NoError(os.Remove(root + "/log.00052.txt")) + + s, err = r.PathSuffix(root+"/log", ".txt", time.Now()) + require.NoError(err) + require.Equal("00054", s) +} + +func TestDailyRotationPathSuffix(t *testing.T) { + t.Parallel() + require := require.New(t) + + r := DailyRotation + + now := time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local) + + suffix, err := r.PathSuffix("log", ".txt", now) + require.NoError(err) + require.True(filepath.Match(r.FileGlob(), suffix)) + require.Equal("2000-01-01", suffix) +} + +func TestDailyRotationShouldRotate(t *testing.T) { + t.Parallel() + + r := DailyRotation + now := time.Date(2000, 4, 1, 0, 5, 0, 0, time.UTC) + + positiveCases := []time.Time{ + now.Add(24 * time.Hour), + now.Add(48 * time.Hour), + time.Date(2000, 4, 2, 0, 0, 0, 0, time.UTC), + time.Date(2000, 5, 1, 0, 0, 0, 0, time.UTC), + time.Date(2001, 4, 1, 0, 0, 0, 0, time.UTC), + } + negativeCases := []time.Time{ + now.Add(time.Second), + now.Add(time.Minute), + now.Add(time.Hour), + now.Add(-time.Second), + now.Add(-time.Minute), + now.Add(-time.Hour), + now.Add(-24 * time.Hour), + now.Add(-48 * time.Hour), + time.Date(2000, 4, 1, 0, 0, 0, 0, time.UTC), + time.Date(1999, 4, 1, 0, 5, 0, 0, time.UTC), + time.Date(2000, 3, 1, 0, 5, 0, 0, time.UTC), + } + + for _, date := range positiveCases { + t.Run("allow/"+date.Sub(now).String(), func(t *testing.T) { + assert.Truef(t, r.ShouldRotate(now, date, 0, 0), "should rotate between %v and %v", now, date) + }) + } + for _, date := range negativeCases { + t.Run("deny/"+date.Sub(now).String(), func(t *testing.T) { + assert.Falsef(t, r.ShouldRotate(now, date, 0, 0), "should not rotate between %v and %v", now, date) + }) + } +} + +func TestDailyRotationNext(t *testing.T) { + t.Parallel() + assert := assert.New(t) + + r := DailyRotation + + assert.True(time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC).Equal( + r.Next(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)), + )) + assert.True(time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC).Equal( + r.Next(time.Date(2000, 1, 1, 23, 59, 59, 0, time.UTC)), + )) + assert.True(time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC).Equal( + r.Next(time.Date(2000, 1, 1, 0, 5, 0, 0, time.UTC)), + )) + assert.True(time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC).Equal( + r.Next(time.Date(2000, 12, 31, 0, 5, 0, 0, time.UTC)), + )) +} + +func TestHourlyRotationPathSuffix(t *testing.T) { + t.Parallel() + require := require.New(t) + + r := HourlyRotation + now := time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local) + + suffix, err := r.PathSuffix("log", ".txt", now) + require.NoError(err) + require.True(filepath.Match(r.FileGlob(), suffix)) + require.Equal("2000-01-01.00-00-00", suffix) +} + +func TestHourlyRotationShouldRotate(t *testing.T) { + t.Parallel() + + r := HourlyRotation + now := time.Date(2000, 4, 1, 5, 55, 0, 0, time.UTC) + + positiveCases := []time.Time{ + now.Add(time.Hour), + now.Add(5 * time.Minute), + now.Add(24 * time.Hour), + time.Date(2000, 4, 2, 0, 5, 55, 0, time.UTC), + time.Date(2000, 5, 1, 0, 5, 55, 0, time.UTC), + time.Date(2001, 4, 1, 0, 5, 55, 0, time.UTC), + } + negativeCases := []time.Time{ + now.Add(time.Second), + now.Add(time.Minute), + now.Add(-time.Hour), + now.Add(-time.Second), + } + + for _, date := range positiveCases { + t.Run("allow/"+date.Sub(now).String(), func(t *testing.T) { + assert.True(t, r.ShouldRotate(now, date, 0, 0)) + }) + } + for _, date := range negativeCases { + t.Run("deny/"+date.Sub(now).String(), func(t *testing.T) { + assert.False(t, r.ShouldRotate(now, date, 0, 0)) + }) + } +} + +func TestHourlyRotationNext(t *testing.T) { + t.Parallel() + assert := assert.New(t) + + r := HourlyRotation + + assert.True(time.Date(2000, 1, 1, 1, 0, 0, 0, time.UTC).Equal( + r.Next(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)), + )) + assert.True(time.Date(2000, 1, 1, 6, 0, 0, 0, time.UTC).Equal( + r.Next(time.Date(2000, 1, 1, 5, 55, 0, 0, time.UTC)), + )) + assert.True(time.Date(2000, 1, 2, 0, 00, 0, 0, time.UTC).Equal( + r.Next(time.Date(2000, 1, 1, 23, 20, 0, 0, time.UTC)), + )) + assert.True(time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC).Equal( + r.Next(time.Date(2000, 12, 31, 23, 0, 0, 0, time.UTC)), + )) +} diff --git a/lib/logging/writer/rotation.go b/lib/logging/writer/rotation.go new file mode 100644 index 0000000000000000000000000000000000000000..6cb38474516dd4f4e3a9d756a02514b725a66127 --- /dev/null +++ b/lib/logging/writer/rotation.go @@ -0,0 +1,307 @@ +package writer + +import ( + "fmt" + "math" + "path/filepath" + "slices" + "strconv" + "strings" + "time" +) + +// ============================== RotationStrategy ============================== + +type RotationStrategy interface { + PathSuffix(prefix, ext string, now time.Time) (string, error) + FileGlob() string + ShouldRotate(previous, now time.Time, prevSize, newSize uint64) bool + IsEnabled() bool + Priority() int +} + +type RotationStrategyTrigger interface { + RotationStrategy + Next(previous time.Time) time.Time +} + +// ============================== noRotation ============================== + +type noRotation int + +var NoRotation RotationStrategy = noRotation(0) + +// FileGlob implements RotationStrategy. +func (noRotation) FileGlob() string { + return "" +} + +// IsEnabled implements RotationStrategy. +func (noRotation) IsEnabled() bool { + return false +} + +// PathSuffix implements RotationStrategy. +func (noRotation) PathSuffix(string, string, time.Time) (string, error) { + return "", nil +} + +// ShouldRotate implements RotationStrategy. +func (noRotation) ShouldRotate(time.Time, time.Time, uint64, uint64) bool { + return false +} + +// Priority implements RotationStrategy. +func (noRotation) Priority() int { + return math.MinInt +} + +// ============================== SizeRotation ============================== + +type SizeRotation struct { + maxSize uint64 + lastIndex int + lastPrefix string +} + +var _ RotationStrategy = (*SizeRotation)(nil) + +func NewSizeRotation(maxSize uint64) *SizeRotation { + if maxSize == 0 { + panic("null maximum size") + } + return &SizeRotation{maxSize: maxSize} +} + +func (s *SizeRotation) ShouldRotate(_, _ time.Time, current, next uint64) bool { + return current > 0 && next > s.maxSize +} + +func (s *SizeRotation) PathSuffix(prefix, ext string, _ time.Time) (suffix string, err error) { + if prefix != s.lastPrefix { + s.lastPrefix = prefix + if s.lastIndex, err = s.findLastIndex(prefix, ext); err != nil { + return "", err + } + } + + s.lastIndex += 1 + return fmt.Sprintf("%05d", s.lastIndex), nil +} + +func (s *SizeRotation) findLastIndex(prefix, ext string) (last int, err error) { + existing, err := filepath.Glob(prefix + "." + s.FileGlob() + ext) + if err != nil || len(existing) == 0 { + return 0, err + } + + start, endOffset := len(prefix)+1, len(ext) + for _, path := range existing { + if idx, err := strconv.Atoi(path[start : len(path)-endOffset]); err != nil { + return 0, err + } else if int(idx) > last { + last = int(idx) + } + } + + return last, nil +} + +func (s *SizeRotation) FileGlob() string { + return "[0-9][0-9][0-9][0-9][0-9]" +} + +func (s *SizeRotation) IsEnabled() bool { + return true +} + +func (*SizeRotation) Priority() int { + return 100 +} + +// ============================== CompoundRotationStrategy ============================== + +type CompoundRotationStrategy []RotationStrategy + +var _ RotationStrategyTrigger = CompoundRotationStrategy(nil) + +func ComposeRotationStrategy(strategies ...RotationStrategy) RotationStrategy { + s := make([]RotationStrategy, 0, len(strategies)) + for _, sub := range strategies { + if sub != nil && sub.IsEnabled() { + s = append(s, sub) + } + } + if len(s) == 1 { + return s[0] + } else if len(s) > 1 { + slices.SortFunc(s, func(a, b RotationStrategy) int { + return b.Priority() - a.Priority() + }) + return CompoundRotationStrategy(s) + } else { + return NoRotation + } +} + +// FileGlob implements RotationStrategy. +func (c CompoundRotationStrategy) FileGlob() string { + globs := make([]string, 0, len(c)) + for _, s := range c { + if glob := s.FileGlob(); glob != "" { + globs = append(globs, glob) + } + + } + return strings.Join(globs, ".") +} + +// IsEnabled implements RotationStrategy. +func (c CompoundRotationStrategy) IsEnabled() bool { + return true +} + +// Next implements RotationStrategy. +func (c CompoundRotationStrategy) Next(previous time.Time) time.Time { + for _, s := range c { + if s, hasTimer := s.(RotationStrategyTrigger); hasTimer { + next := s.Next(previous) + if !next.IsZero() { + return next + } + } + } + return time.Time{} +} + +// PathSuffix implements RotationStrategy. +func (c CompoundRotationStrategy) PathSuffix(prefix string, ext string, now time.Time) (string, error) { + suffixes := make([]string, 0, len(c)) + for _, s := range c { + if suffix, err := s.PathSuffix(prefix, ext, now); err != nil { + return "", err + } else if suffix != "" { + suffixes = append(suffixes, suffix) + } + + } + return strings.Join(suffixes, "."), nil +} + +// Priority implements RotationStrategy. +func (c CompoundRotationStrategy) Priority() int { + p := 0 + for _, s := range c { + if s.Priority() > p { + p = s.Priority() + } + } + return p +} + +// ShouldRotate implements RotationStrategy. +func (c CompoundRotationStrategy) ShouldRotate(previous time.Time, now time.Time, prevSize uint64, newSize uint64) bool { + for _, s := range c { + if s.ShouldRotate(previous, now, prevSize, newSize) { + return true + } + } + return false +} + +// ============================== startPeriodRotation ============================== + +type periodStartRotation struct { + timeFormat string + glob string + period time.Duration + truncate func(time.Time) time.Time +} + +func makePeriodStartRotation(period time.Duration, truncate func(time.Time) time.Time) periodStartRotation { + if period == 0 { + panic("null period") + } + timeFormat := "2006-01-02" + glob := "[0-9][0-9][0-9][0-9]-[0-1][0-9]-[0-3][0-9]" + if period < 24*time.Hour { + timeFormat += ".15-04-05" + glob += ".[0-2][0-9]-[0-5][0-9]-[0-5][0-9]" + } + return periodStartRotation{ + timeFormat: timeFormat, + glob: glob, + period: period, + truncate: truncate, + } +} + +var ( + FifteenMinuteRotation RotationStrategyTrigger = makePeriodStartRotation( + 15*time.Minute, + func(t time.Time) time.Time { + y, m, d := t.Date() + h, mi, _ := t.Clock() + mi -= mi % 15 + return time.Date(y, m, d, h, mi, 0, 0, t.Location()) + }, + ) + DailyRotation RotationStrategyTrigger = makePeriodStartRotation( + 24*time.Hour, + func(t time.Time) time.Time { + y, m, d := t.Date() + return time.Date(y, m, d, 0, 0, 0, 0, t.Location()) + }, + ) + HourlyRotation RotationStrategyTrigger = makePeriodStartRotation( + time.Hour, + func(t time.Time) time.Time { + y, m, d := t.Date() + return time.Date(y, m, d, t.Hour(), 0, 0, 0, t.Location()) + }, + ) + WeeklyRotation RotationStrategyTrigger = makePeriodStartRotation( + 7*24*time.Hour, + func(t time.Time) time.Time { + y, m, d := t.Date() + wk := t.Weekday() + return time.Date(y, m, d-int(wk), 0, 0, 0, 0, t.Location()) + }, + ) + MonthlyRotation RotationStrategyTrigger = makePeriodStartRotation( + 31*24*time.Hour, + func(t time.Time) time.Time { + y, m, _ := t.Date() + return time.Date(y, m, 1, 0, 0, 0, 0, t.Location()) + }, + ) +) + +// FileGlob implements RotationStrategy. +func (r periodStartRotation) FileGlob() string { + return r.glob +} + +// IsEnabled implements RotationStrategy. +func (r periodStartRotation) IsEnabled() bool { + return true +} + +// Next implements RotationStrategyTrigger. +func (r periodStartRotation) Next(previous time.Time) time.Time { + return r.truncate(previous.Add(r.period)) +} + +// PathSuffix implements RotationStrategy. +func (r periodStartRotation) PathSuffix(_ string, _ string, now time.Time) (string, error) { + return now.Format(r.timeFormat), nil +} + +// ShouldRotate implements RotationStrategy. +func (r periodStartRotation) ShouldRotate(previous time.Time, now time.Time, _ uint64, _ uint64) bool { + return !now.Before(r.Next(previous)) +} + +func (r periodStartRotation) Priority() int { + return 150 +} diff --git a/lib/logging/writer/writer.go b/lib/logging/writer/writer.go new file mode 100644 index 0000000000000000000000000000000000000000..901a0d64b4ef73373a7d7a9b5b690152afe6837f --- /dev/null +++ b/lib/logging/writer/writer.go @@ -0,0 +1,142 @@ +package writer + +import ( + "fmt" + "io" + "log/slog" + "os" + "sync" + "sync/atomic" +) + +// ============================== Writer ============================== + +// Writer is an asynchronous io.WriteCloser with rotating capabilities +type Writer struct { + reqs chan<- *writeRequest + wg sync.WaitGroup + closed atomic.Bool + err error + + log *slog.Logger +} + +var _ io.WriteCloser = (*Writer)(nil) + +var DefaultLogger = slog.New(slog.DiscardHandler) + +func NewWriter(path string, options ...Option) (*Writer, error) { + c := DefaultConfig(path, options...) + + reqs := make(chan *writeRequest) + w := &Writer{ + reqs: reqs, + log: c.logger(), + } + + var archived chan string + if c.needCleaner() { + archived = make(chan string, 20) + w.runBackgroundTask(func() error { + return NewCleaner(c).run(archived) + }) + } + + w.runBackgroundTask(func() error { + return NewBackgroundWriter(c, archived).run(reqs) + }) + + return w, nil +} + +func (w *Writer) runBackgroundTask(task func() error) { + w.wg.Add(1) + go func() { + defer func() { + if p := recover(); p != nil { + w.close(fmt.Errorf("recovered panic: %v", p)) + } + w.wg.Done() + }() + w.close(task()) + }() +} + +func (w *Writer) close(cause error) bool { + if w.closed.CompareAndSwap(false, true) { + w.log.Debug("closing") + w.err = cause + close(w.reqs) + return true + } + return false +} + +// Write implements io.WriteCloser. +func (w *Writer) Write(data []byte) (n int, err error) { + if w.closed.Load() { + w.log.Debug("already closed") + return 0, os.ErrClosed + } + return makeWriteRequest(data).send(w.reqs) +} + +// Close implements io.WriteCloser. +func (w *Writer) Close() error { + if w.close(nil) { + w.log.Debug("waiting for background tasks to end") + w.wg.Wait() + } + return w.err +} + +// ============================== writeRequest ============================== + +type writeRequest struct { + data []byte + response chan *writeResponse +} + +var requestPool = sync.Pool{New: func() any { + return &writeRequest{ + data: make([]byte, 0, 256), + response: make(chan *writeResponse, 1), + } +}} + +func makeWriteRequest(data []byte) *writeRequest { + req := requestPool.Get().(*writeRequest) + req.data = append(req.data[:0], data...) + return req +} + +func (w *writeRequest) send(reqs chan<- *writeRequest) (int, error) { + reqs <- w + resp := <-w.response + n, err := int(resp.written), resp.err + responsePool.Put(resp) + requestPool.Put(w) + return n, err +} + +func (w *writeRequest) reply(written uint64, err error) { + w.response <- makeWriteResponse(written, err) +} + +// ============================== writeResponse ============================== + +type writeResponse struct { + written uint64 + err error +} + +var responsePool = sync.Pool{New: func() any { + return &writeResponse{} +}} + +func makeWriteResponse(written uint64, err error) *writeResponse { + r := responsePool.Get().(*writeResponse) + r.written = written + r.err = err + return r +} diff --git a/lib/logging/writer/writer_test.go b/lib/logging/writer/writer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..71e3254f0dc5cf43df6bad35b6452c53256e258f --- /dev/null +++ b/lib/logging/writer/writer_test.go @@ -0,0 +1,415 @@ +package writer + +import ( + "fmt" + "io" + "log/slog" + "os" + "runtime" + "strings" + "sync" + "testing" + "time" + + "github.com/lmittmann/tint" + "github.com/stretchr/testify/assert" +) + +func TestSimpleWrite(t *testing.T) { + t.Parallel() + assert := NewFileAssert(t) + root := t.TempDir() + filePath := root + "/logfile.txt" + + w, err := NewWriter(filePath, withDebug()) + assert.NoError(err) + + assert.WriteOk(w, "foo") + assert.WriteOk(w, "bar") + + assert.NoError(w.Close()) + + assert.FileContent(filePath, "foobar") +} + +func TestWriteSizeRotaton(t *testing.T) { + t.Parallel() + assert := NewFileAssert(t) + root := t.TempDir() + + w, err := NewWriter(root+"/logfile.txt", WithMaxSize(10), withDebug()) + assert.NoError(err) + + assert.WriteOk(w, "foo") + assert.WriteOk(w, "bar1\n") + assert.WriteOk(w, "foobar2\n") + assert.WriteOk(w, "foobar3\n") + + assert.NoError(w.Close()) + + assert.FileContent(root+"/logfile.txt", "foobar3\n") + assert.FileContent(root+"/logfile.00001.txt", "foobar1\n") + assert.FileContent(root+"/logfile.00002.txt", "foobar2\n") +} + +func TestWriteTimeRotation(t *testing.T) { + t.Parallel() + assert := NewFileAssert(t) + root := t.TempDir() + + c := newTestClock(t) + dtr := debugTimeRotation(0) + + w, err := NewWriter(root+"/logfile.txt", WithRotation(&dtr), withDebug(), withClock(c)) + assert.NoError(err) + + assert.WriteOk(w, "foobar1\n") + c.Advance(time.Second) + + assert.WriteOk(w, "foobar2\n") + c.Advance(2 * time.Second) + + assert.WriteOk(w, "foobar3\n") + c.Advance(time.Second) + + assert.WriteOk(w, "foobar4\n") + + assert.NoError(w.Close()) + + assert.FileContent(root+"/logfile.0001-01-01.00-00-00.txt", "foobar1\n") + assert.FileContent(root+"/logfile.0001-01-01.00-00-01.txt", "foobar2\n") + assert.FileContent(root+"/logfile.0001-01-01.00-00-03.txt", "foobar3\n") + assert.FileContent(root+"/logfile.txt", "foobar4\n") +} + +func TestKeepMaxCount(t *testing.T) { + t.Parallel() + assert := NewFileAssert(t) + root := t.TempDir() + + w, err := NewWriter(root+"/logfile.txt", WithMaxSize(5), WithKeepMaxCount(2), withDebug()) + assert.NoError(err) + + for range 6 { + assert.WriteOk(w, "foobar\n") + } + + assert.NoError(w.Close()) + + assert.FileExists(root + "/logfile.txt") + assert.NoFileExists(root + "/logfile.00001.txt") + assert.NoFileExists(root + "/logfile.00002.txt") + assert.NoFileExists(root + "/logfile.00003.txt") + assert.FileExists(root + "/logfile.00004.txt") + assert.FileExists(root + "/logfile.00005.txt") +} + +func TestKeepMaxAge(t *testing.T) { + t.Parallel() + assert := NewFileAssert(t) + root := t.TempDir() + + c := newTestClock(t) + c.now = time.Date(2000, 01, 01, 0, 0, 0, 0, time.Local) + dtr := debugTimeRotation(0) + + w, err := NewWriter(root+"/logfile.txt", WithRotation(&dtr), WithKeepMaxAge(2*time.Second), withDebug(), withClock(c)) + assert.NoError(err) + + for range 4 { + assert.WriteOk(w, "foobar\n") + c.Advance(time.Second) + } + + assert.NoError(w.Close()) + + assert.NoFileExists(root + "/logfile.2000-01-01.00-00-00.txt") + assert.NoFileExists(root + "/logfile.2000-01-01.00-00-01.txt") + assert.FileExists(root + "/logfile.2000-01-01.00-00-02.txt") + assert.FileExists(root + "/logfile.2000-01-01.00-00-03.txt") +} + +func TestKeepMaxSize(t *testing.T) { + t.Parallel() + assert := NewFileAssert(t) + root := t.TempDir() + + w, err := NewWriter(root+"/logfile.txt", WithMaxSize(5), WithKeepMaxSize(10), withDebug()) + assert.NoError(err) + + for range 4 { + assert.WriteOk(w, "foobar") + } + + assert.NoError(w.Close()) + + assert.NoFileExists(root + "/logfile.00001.txt") + assert.NoFileExists(root + "/logfile.00002.txt") + assert.FileExists(root + "/logfile.00003.txt") + assert.FileExists(root + "/logfile.txt") +} + +func TestWriteCompression(t *testing.T) { + t.Parallel() + assert := NewFileAssert(t) + root := t.TempDir() + + w, err := NewWriter(root+"/logfile.txt", WithMaxSize(5), WithGZipCompression(), withDebug()) + assert.NoError(err) + + assert.WriteOk(w, "foobar1\n") + assert.WriteOk(w, "foobar2\n") + assert.WriteOk(w, "foobar3\n") + + assert.NoError(w.Close()) + + assert.FileContent(root+"/logfile.txt", "foobar3\n") + assert.FileExists(root + "/logfile.00001.txt.gz") + assert.FileExists(root + "/logfile.00002.txt.gz") +} + +func TestMultiSyncClose(t *testing.T) { + t.Parallel() + assert := NewFileAssert(t) + root := t.TempDir() + + w, err := NewWriter(root+"/logfile.txt", withDebug()) + assert.NoError(err) + + for range 4 { + assert.NotPanics(func() { + assert.NoError(w.Close()) + }) + } +} + +func TestMultiAsyncClose(t *testing.T) { + t.Parallel() + assert := NewFileAssert(t) + root := t.TempDir() + + w, err := NewWriter(root+"/logfile.txt", withDebug()) + assert.NoError(err) + + var wg sync.WaitGroup + start := make(chan bool) + + for range 4 { + wg.Add(1) + go func() { + defer wg.Done() + <-start + assert.NotPanics(func() { + assert.NoError(w.Close()) + }) + }() + } + + close(start) + wg.Wait() +} +func BenchmarkDefaultWriter(b *testing.B) { + for exp := 0; exp < 10; exp += 1 { + var data = []byte(strings.Repeat("0123456789ABCDEF", 1< 0 { + t.timers = t.timers[:i] + } else { + t.timers = nil + } +} + +func (t *testClock) Deadline(deadline time.Time) <-chan time.Time { + t.t.Helper() + t.l.Lock() + defer t.l.Unlock() + expired := make(chan time.Time, 1) + t.timers = append(t.timers, testTimer{deadline, expired}) + t.t.Logf("created timer at %s", deadline) + return expired +} + +func (t *testClock) After(d time.Duration) <-chan time.Time { + t.t.Helper() + return t.Deadline(t.now.Add(d)) +} + +func withDebug() Option { + return WithLogger(slog.New(tint.NewHandler(os.Stdout, &tint.Options{Level: slog.LevelDebug, TimeFormat: time.TimeOnly, AddSource: true}))) +} + +func withClock(c Clock) Option { + return func(w *Config) { + w.clock = c + } +} + +// ============================== debugTimeRotation ============================== + +type debugTimeRotation int + +var _ RotationStrategyTrigger = debugTimeRotation(0) + +// FileGlob implements RotationStrategy. +func (debugTimeRotation) FileGlob() string { + return "[0-9][0-9][0-9][0-9]-[0-1][0-9]-[0-3][0-9].[0-2][0-9]-[0-5][0-9]-[0-5][0-9]" +} + +// IsEnabled implements RotationStrategy. +func (debugTimeRotation) IsEnabled() bool { + return true +} + +// Next implements RotationStrategy. +func (debugTimeRotation) Next(previous time.Time) time.Time { + return previous.Add(time.Second) +} + +// PathSuffix implements RotationStrategy. +func (debugTimeRotation) PathSuffix(_ string, _ string, now time.Time) (string, error) { + return now.Format("2006-01-02.15-04-05"), nil +} + +// Priority implements RotationStrategy. +func (debugTimeRotation) Priority() int { + return 0 +} + +// ShouldRotate implements RotationStrategy. +func (debugTimeRotation) ShouldRotate(previous time.Time, now time.Time, prevSize uint64, newSize uint64) bool { + return now.Second() > previous.Second() || + now.Minute() > previous.Minute() || + now.Hour() > previous.Hour() || + now.YearDay() > previous.YearDay() || + now.Year() > previous.Year() +}