diff --git a/go.mod b/go.mod index fc9807cb..1b6b243d 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( require ( github.com/Masterminds/semver/v3 v3.4.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/clipperhouse/displaywidth v0.6.1 // indirect github.com/clipperhouse/stringish v0.1.1 // indirect github.com/clipperhouse/uax29/v2 v2.3.0 // indirect @@ -36,7 +37,7 @@ require ( github.com/mattn/go-runewidth v0.0.19 // indirect github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 // indirect github.com/olekukonko/errors v1.1.0 // indirect - github.com/olekukonko/ll v0.1.3 // indirect + github.com/olekukonko/ll v0.1.6 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/sagikazarmark/locafero v0.12.0 // indirect github.com/spf13/afero v1.15.0 // indirect diff --git a/go.sum b/go.sum index 0e1444e1..1a19f4c0 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1 github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/clipperhouse/displaywidth v0.6.1 h1:/zMlAezfDzT2xy6acHBzwIfyu2ic0hgkT83UX5EY2gY= github.com/clipperhouse/displaywidth v0.6.1/go.mod h1:R+kHuzaYWFkTm7xoMmK1lFydbci4X2CicfbGstSGg0o= github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= @@ -66,8 +68,8 @@ github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 h1:zrbMGy9YXpIeTnGj github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6/go.mod h1:rEKTHC9roVVicUIfZK7DYrdIoM0EOr8mK1Hj5s3JjH0= github.com/olekukonko/errors v1.1.0 h1:RNuGIh15QdDenh+hNvKrJkmxxjV4hcS50Db478Ou5sM= github.com/olekukonko/errors v1.1.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y= -github.com/olekukonko/ll v0.1.3 h1:sV2jrhQGq5B3W0nENUISCR6azIPf7UBUpVq0x/y70Fg= -github.com/olekukonko/ll v0.1.3/go.mod h1:b52bVQRRPObe+yyBl0TxNfhesL0nedD4Cht0/zx55Ew= +github.com/olekukonko/ll v0.1.6 h1:lGVTHO+Qc4Qm+fce/2h2m5y9LvqaW+DCN7xW9hsU3uA= +github.com/olekukonko/ll v0.1.6/go.mod h1:NVUmjBb/aCtUpjKk75BhWrOlARz3dqsM+OtszpY4o88= github.com/olekukonko/tablewriter v1.1.2 h1:L2kI1Y5tZBct/O/TyZK1zIE9GlBj/TVs+AY5tZDCDSc= github.com/olekukonko/tablewriter v1.1.2/go.mod h1:z7SYPugVqGVavWoA2sGsFIoOVNmEHxUAAMrhXONtfkg= github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 00000000..24b53065 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 00000000..33c88305 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,74 @@ +# xxhash + +[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) +[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 00000000..94b9c443 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 00000000..78bddf1c --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,243 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array for the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest with a zero seed. +func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { + var d Digest + d.ResetWithSeed(seed) + return &d +} + +// Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. +func (d *Digest) Reset() { + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 00000000..3e8b1325 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,209 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 00000000..7e3145a2 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go new file mode 100644 index 00000000..78f95f25 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -0,0 +1,15 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 00000000..118e49e8 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 00000000..05f5e7df --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,16 @@ +//go:build appengine +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 00000000..cf9d42ae --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,58 @@ +//go:build !appengine +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "unsafe" +) + +// In the future it's possible that compiler optimizations will make these +// XxxString functions unnecessary by realizing that calls such as +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. +// If that happens, even if we keep these functions they can be replaced with +// the trivial safe code. + +// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: +// +// var b []byte +// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) +// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data +// bh.Len = len(s) +// bh.Cap = len(s) +// +// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough +// weight to this sequence of expressions that any function that uses it will +// not be inlined. Instead, the functions below use a different unsafe +// conversion designed to minimize the inliner weight and allow both to be +// inlined. There is also a test (TestInlining) which verifies that these are +// inlined. +// +// See https://github.com/golang/go/issues/42739 for discussion. + +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) + // d.Write always returns len(s), nil. + // Ignoring the return output and returning these fixed values buys a + // savings of 6 in the inliner's cost model. + return len(s), nil +} + +// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout +// of the first two words is the same as the layout of a string. +type sliceHeader struct { + s string + cap int +} diff --git a/vendor/github.com/olekukonko/ll/.goreleaser.yaml b/vendor/github.com/olekukonko/ll/.goreleaser.yaml new file mode 100644 index 00000000..937f3634 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/.goreleaser.yaml @@ -0,0 +1,37 @@ +# yaml-language-server: $schema=https://goreleaser.com/static/schema.json +version: 2 + +project_name: ll + +# For a library repo, publish source archives instead of binaries. +source: + enabled: true + name_template: "{{ .ProjectName }}_{{ .Version }}" + + # Optional: include/exclude files in the source archive (defaults are usually fine) + # files: + # - README.md + # - LICENSE + # - go.mod + # - go.sum + # - "**/*.go" + +# No binaries to build. +builds: [] + +## Other Information + +checksum: + name_template: "checksums.txt" + +snapshot: + version_template: "{{ .Tag }}-next" + +changelog: + sort: asc + filters: + exclude: + - "^docs:" + - "^test:" + - "^chore:" + - "^ci:" diff --git a/vendor/github.com/olekukonko/ll/Makefile b/vendor/github.com/olekukonko/ll/Makefile new file mode 100644 index 00000000..e9bf75d3 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/Makefile @@ -0,0 +1,99 @@ +# Git remote for pushing tags +REMOTE ?= origin + +# Version for release tagging (required for tag/release targets) +RELEASE_VERSION ?= + +# Convenience +GO ?= go +GOLANGCI ?= golangci-lint +GORELEASER?= goreleaser + +.PHONY: help \ + test race bench fmt tidy lint check \ + ensure-clean ensure-release-version tag tag-delete \ + release release-dry + +help: + @echo "Targets:" + @echo " fmt - gofmt + go fmt" + @echo " tidy - go mod tidy" + @echo " test - go test ./..." + @echo " race - go test -race ./..." + @echo " bench - go test -bench=. ./..." + @echo " lint - golangci-lint run ./... (if installed)" + @echo " check - fmt + tidy + test + race" + @echo "" + @echo "Release targets:" + @echo " tag - Create annotated tag RELEASE_VERSION and push" + @echo " tag-delete - Delete tag RELEASE_VERSION locally + remote" + @echo " release - tag + goreleaser release --clean (if you use goreleaser)" + @echo " release-dry - tag + goreleaser release --clean --skip=publish" + @echo "" + @echo "Usage:" + @echo " make check" + @echo " make tag RELEASE_VERSION=v0.1.2" + @echo " make release RELEASE_VERSION=v0.1.2" + +fmt: + @echo "Formatting..." + gofmt -w -s . + $(GO) fmt ./... + +tidy: + @echo "Tidying..." + $(GO) mod tidy + +test: + @echo "Testing..." + $(GO) test ./... -count=1 + +race: + @echo "Race testing..." + $(GO) test ./... -race -count=1 + +bench: + @echo "Bench..." + $(GO) test ./... -bench=. -run=^$$ + +lint: + @echo "Linting..." + @command -v $(GOLANGCI) >/dev/null 2>&1 || { echo "golangci-lint not found"; exit 1; } + $(GOLANGCI) run ./... + +check: fmt tidy test race + +# -------------------------- +# Release helpers +# -------------------------- + +ensure-clean: + @echo "Checking git working tree..." + @git diff --quiet || (echo "Error: tracked changes exist. Commit/stash them."; exit 1) + @test -z "$$(git status --porcelain)" || (echo "Error: uncommitted/untracked files:"; git status --porcelain; exit 1) + @echo "OK: working tree clean" + +ensure-release-version: + @test -n "$(RELEASE_VERSION)" || (echo "Error: set RELEASE_VERSION, e.g. make tag RELEASE_VERSION=v0.1.2"; exit 1) + +tag: ensure-clean ensure-release-version + @if git rev-parse "$(RELEASE_VERSION)" >/dev/null 2>&1; then \ + echo "Error: tag $(RELEASE_VERSION) already exists. Bump version."; \ + exit 1; \ + fi + @echo "Tagging $(RELEASE_VERSION) at HEAD $$(git rev-parse --short HEAD)" + @git tag -a $(RELEASE_VERSION) -m "$(RELEASE_VERSION)" + @git push $(REMOTE) $(RELEASE_VERSION) + +tag-delete: ensure-release-version + @echo "Deleting tag $(RELEASE_VERSION) locally + remote..." + @git tag -d $(RELEASE_VERSION) 2>/dev/null || true + @git push $(REMOTE) :refs/tags/$(RELEASE_VERSION) || true + +release: tag + @command -v $(GORELEASER) >/dev/null 2>&1 || { echo "goreleaser not found"; exit 1; } + $(GORELEASER) release --clean + +release-dry: tag + @command -v $(GORELEASER) >/dev/null 2>&1 || { echo "goreleaser not found"; exit 1; } + $(GORELEASER) release --clean --skip=publish diff --git a/vendor/github.com/olekukonko/ll/README.md b/vendor/github.com/olekukonko/ll/README.md index facb4736..aaa58a4b 100644 --- a/vendor/github.com/olekukonko/ll/README.md +++ b/vendor/github.com/olekukonko/ll/README.md @@ -1,17 +1,19 @@ # ll - A Modern Structured Logging Library for Go -`ll` is a high-performance, production-ready logging library for Go, designed to provide **hierarchical namespaces**, **structured logging**, **middleware pipelines**, **conditional logging**, and support for multiple output formats, including text, JSON, colorized logs, and compatibility with Go’s `slog`. It’s ideal for applications requiring fine-grained log control, extensibility, and scalability. +`ll` is a high-performance, production-ready logging library for Go, designed to provide **hierarchical namespaces**, **structured logging**, **middleware pipelines**, **conditional logging**, and support for multiple output formats, including text, JSON, colorized logs, syslog, VictoriaLogs, and compatibility with Go's `slog`. It's ideal for applications requiring fine-grained log control, extensibility, and scalability. ## Key Features -- **Hierarchical Namespaces**: Organize logs with fine-grained control over subsystems (e.g., "app/db"). -- **Structured Logging**: Add key-value metadata for machine-readable logs. -- **Middleware Pipeline**: Customize log processing with error-based rejection. -- **Conditional Logging**: Optimize performance by skipping unnecessary log operations. -- **Multiple Output Formats**: Support for text, JSON, colorized logs, and `slog` integration. -- **Debugging Utilities**: Inspect variables (`Dbg`), binary data (`Dump`), and stack traces (`Stack`). -- **Thread-Safe**: Built for concurrent use with mutex-protected state. -- **Performance Optimized**: Minimal allocations and efficient namespace caching. +- **Logging Enabled by Default** - Zero configuration to start logging +- **Hierarchical Namespaces** - Organize logs with fine-grained control over subsystems (e.g., "app/db") +- **Structured Logging** - Add key-value metadata for machine-readable logs +- **Middleware Pipeline** - Customize log processing with rate limiting, sampling, and deduplication +- **Conditional & Error-Based Logging** - Optimize performance with fluent `If`, `IfErr`, `IfAny`, `IfOne` chains +- **Multiple Output Formats** - Text, JSON, colorized ANSI, syslog, VictoriaLogs, and `slog` integration +- **Advanced Debugging Utilities** - Source-aware `Dbg()`, hex/ASCII `Dump()`, private field `Inspect()`, and stack traces +- **Production Ready** - Buffered batching, log rotation, duplicate suppression, and rate limiting +- **Thread-Safe** - Built for high-concurrency with atomic operations, sharded mutexes, and lock-free fast paths +- **Performance Optimized** - Zero allocations for disabled logs, sync.Pool buffers, LRU caching for source files ## Installation @@ -21,235 +23,267 @@ Install `ll` using Go modules: go get github.com/olekukonko/ll ``` -Ensure you have Go 1.21 or later for optimal compatibility. - -## Getting Started - -Here’s a quick example to start logging with `ll`: +Requires Go 1.21 or later. +## Quick Start ```go package main -import ( - "github.com/olekukonko/ll" -) +import "github.com/olekukonko/ll" func main() { - // Create a logger with namespace "app" - logger := ll.New("") - - // enable output - logger.Enable() - - // Basic log - logger.Info("Welcome") // Output: [app] INFO: Application started - - logger = logger.Namespace("app") - - // Basic log - logger.Info("start at :8080") // Output: [app] INFO: Application started - - //Output - //INFO: Welcome - //[app] INFO: start at :8080 + // Logger is ENABLED by default - no .Enable() needed! + logger := ll.New("app") + + // Basic logging - works immediately + logger.Info("Server starting") // Output: [app] INFO: Server starting + logger.Warn("Memory high") // Output: [app] WARN: Memory high + logger.Error("Connection failed") // Output: [app] ERROR: Connection failed + + // Structured fields + logger.Fields("user", "alice", "status", 200).Info("Login successful") + // Output: [app] INFO: Login successful [user=alice status=200] } - ``` -```go -package main +**That's it. No `.Enable()`, no handlers to configure—it just works.** -import ( - "github.com/olekukonko/ll" - "github.com/olekukonko/ll/lh" - "os" -) +## Core Concepts -func main() { - // Chaining - logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout)) +### 1. Enabled by Default, Configurable When Needed - // Basic log - logger.Info("Application started") // Output: [app] INFO: Application started +Unlike many logging libraries that require explicit enabling, `ll` **logs immediately**. This eliminates boilerplate and reduces the chance of missing logs in production. - // Structured log with fields - logger.Fields("user", "alice", "status", 200).Info("User logged in") - // Output: [app] INFO: User logged in [user=alice status=200] +```go +// This works out of the box: +ll.Info("Service started") // Output: [] INFO: Service started - // Conditional log - debugMode := false - logger.If(debugMode).Debug("Debug info") // No output (debugMode is false) -} +// But you still have full control: +ll.Disable() // Global shutdown +ll.Enable() // Reactivate ``` -## Core Features - -### 1. Hierarchical Namespaces - -Namespaces allow you to organize logs hierarchically, enabling precise control over logging for different parts of your application. This is especially useful for large systems with multiple components. +### 2. Hierarchical Namespaces -**Benefits**: -- **Granular Control**: Enable/disable logs for specific subsystems (e.g., "app/db" vs. "app/api"). -- **Scalability**: Manage log volume in complex applications. -- **Readability**: Clear namespace paths improve traceability. +Organize logs hierarchically with precise control over subsystems: -**Example**: ```go -logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout)) +// Create a logger hierarchy +root := ll.New("app") +db := root.Namespace("database") +cache := root.Namespace("cache").Style(lx.NestedPath) -// Child loggers -dbLogger := logger.Namespace("db") -apiLogger := logger.Namespace("api").Style(lx.NestedPath) +// Control logging per namespace +root.NamespaceEnable("app/database") // Enable database logs +root.NamespaceDisable("app/cache") // Disable cache logs -// Namespace control -logger.NamespaceEnable("app/db") // Enable DB logs -logger.NamespaceDisable("app/api") // Disable API logs - -dbLogger.Info("Query executed") // Output: [app/db] INFO: Query executed -apiLogger.Info("Request received") // No output +db.Info("Connected") // Output: [app/database] INFO: Connected +cache.Info("Hit") // No output (disabled) ``` -### 2. Structured Logging +### 3. Structured Logging with Ordered Fields -Add key-value metadata to logs for machine-readable output, making it easier to query and analyze logs in tools like ELK or Grafana. +Fields maintain insertion order and support fluent chaining: -**Example**: ```go -logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout)) - -// Variadic fields -logger.Fields("user", "bob", "status", 200).Info("Request completed") -// Output: [app] INFO: Request completed [user=bob status=200] +// Fluent key-value pairs +logger. + Fields("request_id", "req-123"). + Fields("user", "alice"). + Fields("duration_ms", 42). + Info("Request processed") // Map-based fields -logger.Field(map[string]interface{}{"method": "GET"}).Info("Request") -// Output: [app] INFO: Request [method=GET] +logger.Field(map[string]interface{}{ + "method": "POST", + "path": "/api/users", +}).Debug("API call") + +// Persistent context (included in ALL subsequent logs) +logger.AddContext("environment", "production", "version", "1.2.3") +logger.Info("Deployed") // Output: ... [environment=production version=1.2.3] ``` -### 3. Middleware Pipeline +### 4. Conditional & Error-Based Logging -Customize log processing with a middleware pipeline. Middleware functions can enrich, filter, or transform logs, using an error-based rejection mechanism (non-nil errors stop logging). +Optimize performance with fluent conditional chains that **completely skip processing** when conditions are false: -**Example**: ```go -logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout)) +// Boolean conditions +logger.If(debugMode).Debug("Detailed diagnostics") // No overhead when false +logger.If(featureEnabled).Info("Feature used") + +// Error conditions +err := db.Query() +logger.IfErr(err).Error("Query failed") // Logs only if err != nil + +// Multiple conditions - ANY true +logger.IfErrAny(err1, err2, err3).Fatal("System failure") + +// Multiple conditions - ALL true +logger.IfErrOne(validateErr, authErr).Error("Both checks failed") + +// Chain conditions +logger. + If(debugMode). + IfErr(queryErr). + Fields("query", sql). + Debug("Query debug") +``` -// Enrich logs with app metadata -logger.Use(ll.FuncMiddleware(func(e *lx.Entry) error { - if e.Fields == nil { - e.Fields = make(map[string]interface{}) - } - e.Fields["app"] = "myapp" - return nil -})) +**Performance**: When conditions are false, the logger returns immediately with zero allocations. -// Filter low-level logs -logger.Use(ll.FuncMiddleware(func(e *lx.Entry) error { - if e.Level < lx.LevelWarn { - return fmt.Errorf("level too low") - } - return nil -})) +### 5. Powerful Debugging Toolkit + +`ll` includes advanced debugging utilities not found in standard logging libraries: -logger.Info("Ignored") // No output (filtered) -logger.Warn("Warning") // Output: [app] WARN: Warning [app=myapp] +#### Dbg() - Source-Aware Variable Inspection +Captures both variable name AND value from your source code: + +```go +x := 42 +user := &User{Name: "Alice"} +ll.Dbg(x, user) +// Output: [file.go:123] x = 42, *user = &{Name:Alice} ``` -### 4. Conditional Logging +#### Dump() - Hex/ASCII Binary Inspection +Perfect for protocol debugging and binary data: + +```go +ll.Handler(lh.NewColorizedHandler(os.Stdout)) +ll.Dump([]byte("hello\nworld")) +// Output: Colorized hex/ASCII dump with offset markers +``` -Optimize performance by skipping expensive log operations when conditions are false, ideal for production environments. +#### Inspect() - Private Field Reflection +Reveals unexported fields, embedded structs, and pointer internals: -**Example**: ```go -logger := ll.New("app").Enable().Handler(lh.NewTextHandler(os.Stdout)) +type secret struct { + password string // unexported! +} -featureEnabled := true -logger.If(featureEnabled).Fields("action", "update").Info("Feature used") -// Output: [app] INFO: Feature used [action=update] +s := secret{password: "hunter2"} +ll.Inspect(s) +// Output: [file.go:123] INSPECT: { +// "(password)": "hunter2" // Note the parentheses +// } +``` -logger.If(false).Info("Ignored") // No output, no processing +#### Stack() - Configurable Stack Traces +```go +ll.StackSize(8192) // Larger buffer for deep stacks +ll.Stack("Critical failure") +// Output: ERROR: Critical failure [stack=goroutine 1 [running]...] ``` -### 5. Multiple Output Formats +#### Mark() - Execution Flow Tracing +```go +func process() { + ll.Mark() // *MARK*: [file.go:123] + ll.Mark("phase1") // *phase1*: [file.go:124] + // ... work ... +} +``` -`ll` supports various output formats, including human-readable text, colorized logs, JSON, and integration with Go’s `slog` package. +### 6. Production-Ready Handlers -**Example**: ```go -logger := ll.New("app").Enable() +import ( + "github.com/olekukonko/ll" + "github.com/olekukonko/ll/lh" + "github.com/olekukonko/ll/l3rd/syslog" + "github.com/olekukonko/ll/l3rd/victoria" +) -// Text output -logger.Handler(lh.NewTextHandler(os.Stdout)) -logger.Info("Text log") // Output: [app] INFO: Text log +// JSON for structured logging +logger.Handler(lh.NewJSONHandler(os.Stdout)) -// JSON output -logger.Handler(lh.NewJSONHandler(os.Stdout, time.RFC3339Nano)) -logger.Info("JSON log") // Output: {"timestamp":"...","level":"INFO","message":"JSON log","namespace":"app"} +// Colorized for development +logger.Handler(lh.NewColorizedHandler(os.Stdout, + lh.WithColorTheme("dark"), + lh.WithColorIntensity(lh.IntensityVibrant), +)) -// Slog integration -slogText := slog.NewTextHandler(os.Stdout, nil) -logger.Handler(lh.NewSlogHandler(slogText)) -logger.Info("Slog log") // Output: level=INFO msg="Slog log" namespace=app class=Text +// Buffered for high throughput (100 entries or 10 seconds) +buffered := lh.NewBuffered( + lh.NewJSONHandler(os.Stdout), + lh.WithBatchSize(100), + lh.WithFlushInterval(10 * time.Second), +) +logger.Handler(buffered) +defer buffered.Close() // Ensures flush on exit + +// Syslog integration +syslogHandler, _ := syslog.New( + syslog.WithTag("myapp"), + syslog.WithFacility(syslog.LOG_LOCAL0), +) +logger.Handler(syslogHandler) + +// VictoriaLogs (cloud-native) +victoriaHandler, _ := victoria.New( + victoria.WithURL("http://victoria-logs:9428"), + victoria.WithAppName("payment-service"), + victoria.WithEnvironment("production"), + victoria.WithBatching(200, 5*time.Second), +) +logger.Handler(victoriaHandler) ``` -### 6. Debugging Utilities - -`ll` provides powerful tools for debugging, including variable inspection, binary data dumps, and stack traces. - -#### Core Debugging Methods - -1. **Dbg - Contextual Inspection** - Inspects variables with file and line context, preserving variable names and handling all Go types. - ```go - x := 42 - user := struct{ Name string }{"Alice"} - ll.Dbg(x) // Output: [file.go:123] x = 42 - ll.Dbg(user) // Output: [file.go:124] user = [Name:Alice] - ``` - -2. **Dump - Binary Inspection** - Displays a hex/ASCII view of data, optimized for strings, bytes, and complex types (with JSON fallback). - ```go - ll.Handler(lh.NewColorizedHandler(os.Stdout)) - ll.Dump("hello\nworld") // Output: Hex/ASCII dump (see example/dump.png) - ``` - -3. **Stack - Stack Inspection** - Logs a stack trace for debugging critical errors. - ```go - ll.Handler(lh.NewColorizedHandler(os.Stdout)) - ll.Stack("Critical error") // Output: [app] ERROR: Critical error [stack=...] (see example/stack.png) - ``` - -4**General Output** - Logs a output in structured way for inspection of public & private values. - ```go - ll.Handler(lh.NewColorizedHandler(os.Stdout)) - ll.Output(&SomeStructWithPrivateValues{}) - ``` - -#### Performance Tracking -Measure execution time for performance analysis. +### 7. Middleware Pipeline + +Transform, filter, or reject logs with a middleware pipeline: + ```go -// Automatic measurement -defer ll.Measure(func() { time.Sleep(time.Millisecond) })() -// Output: [app] INFO: function executed [duration=~1ms] - -// Explicit benchmarking -start := time.Now() -time.Sleep(time.Millisecond) -ll.Benchmark(start) // Output: [app] INFO: benchmark [start=... end=... duration=...] +// Rate limiting - 10 logs per second maximum +rateLimiter := lm.NewRateLimiter(lx.LevelInfo, 10, time.Second) +logger.Use(rateLimiter) + +// Sampling - 10% of debug logs +sampler := lm.NewSampling(lx.LevelDebug, 0.1) +logger.Use(sampler) + +// Deduplication - suppress identical logs for 2 seconds +deduper := lh.NewDedup(logger.GetHandler(), 2*time.Second) +logger.Handler(deduper) + +// Custom middleware +logger.Use(ll.Middle(func(e *lx.Entry) error { + if strings.Contains(e.Message, "password") { + return fmt.Errorf("sensitive information redacted") + } + return nil +})) ``` -**Performance Notes**: -- `Dbg` calls are disabled at compile-time when not enabled. -- `Dump` optimizes for primitive types, strings, and bytes with zero-copy paths. -- Stack traces are configurable via `StackSize`. +### 8. Global Convenience API + +Use package-level functions for quick logging without creating loggers: + +```go +import "github.com/olekukonko/ll" + +func main() { + ll.Info("Server starting") // Global logger + ll.Fields("port", 8080).Info("Listening") + + // Conditional logging at package level + ll.If(simulation).Debug("Test mode") + ll.IfErr(err).Error("Startup failed") + + // Debug utilities + ll.Dbg(config) + ll.Dump(requestBody) + ll.Inspect(complexStruct) +} +``` -## Real-World Example: Web Server +## Real-World Examples -A practical example of using `ll` in a web server with structured logging, middleware, and `slog` integration: +### Web Server with Structured Logging ```go package main @@ -257,110 +291,127 @@ package main import ( "github.com/olekukonko/ll" "github.com/olekukonko/ll/lh" - "log/slog" "net/http" - "os" "time" ) func main() { - // Initialize logger with slog handler - slogHandler := slog.NewJSONHandler(os.Stdout, nil) - logger := ll.New("server").Enable().Handler(lh.NewSlogHandler(slogHandler)) - - // HTTP child logger - httpLogger := logger.Namespace("http").Style(lx.NestedPath) - - // Middleware for request ID - httpLogger.Use(ll.FuncMiddleware(func(e *lx.Entry) error { - if e.Fields == nil { - e.Fields = make(map[string]interface{}) - } - e.Fields["request_id"] = "req-" + time.Now().String() - return nil - })) - - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + // Root logger - enabled by default + log := ll.New("server") + + // JSON output for production + log.Handler(lh.NewJSONHandler(os.Stdout)) + + // Request logger with context + http.HandleFunc("/api/users", func(w http.ResponseWriter, r *http.Request) { + reqLog := log.Namespace("http").Fields( + "method", r.Method, + "path", r.URL.Path, + "request_id", r.Header.Get("X-Request-ID"), + ) + start := time.Now() - httpLogger.Fields("method", r.Method, "path", r.URL.Path).Info("Request received") - w.Write([]byte("Hello, world!")) - httpLogger.Fields("duration_ms", time.Since(start).Milliseconds()).Info("Request completed") + reqLog.Info("request started") + + // ... handle request ... + + reqLog.Fields( + "status", 200, + "duration_ms", time.Since(start).Milliseconds(), + ).Info("request completed") }) - - logger.Info("Starting server on :8080") + + log.Info("Server listening on :8080") http.ListenAndServe(":8080", nil) } ``` -**Sample Output (JSON via slog)**: -```json -{"level":"INFO","msg":"Starting server on :8080","namespace":"server"} -{"level":"INFO","msg":"Request received","namespace":"server/http","class":"Text","method":"GET","path":"/","request_id":"req-..."} -{"level":"INFO","msg":"Request completed","namespace":"server/http","class":"Text","duration_ms":1,"request_id":"req-..."} +### Microservice with VictoriaLogs + +```go +package main + +import ( + "github.com/olekukonko/ll" + "github.com/olekukonko/ll/l3rd/victoria" +) + +func main() { + // Production setup + vlHandler, _ := victoria.New( + victoria.WithURL("http://logs.internal:9428"), + victoria.WithAppName("payment-api"), + victoria.WithEnvironment("production"), + victoria.WithVersion("1.2.3"), + victoria.WithBatching(500, 2*time.Second), + victoria.WithRetry(3), + ) + defer vlHandler.Close() + + logger := ll.New("payment"). + Handler(vlHandler). + AddContext("region", "us-east-1") + + logger.Info("Payment service initialized") + + // Conditional error handling + if err := processPayment(); err != nil { + logger.IfErr(err). + Fields("payment_id", paymentID). + Error("Payment processing failed") + } +} ``` -## Why Choose `ll`? +## Performance -- **Granular Control**: Hierarchical namespaces for precise log management. -- **Performance**: Conditional logging and optimized concatenation reduce overhead. -- **Extensibility**: Middleware pipeline for custom log processing. -- **Structured Output**: Machine-readable logs with key-value metadata. -- **Flexible Formats**: Text, JSON, colorized, and `slog` support. -- **Debugging Power**: Advanced tools like `Dbg`, `Dump`, and `Stack` for deep inspection. -- **Thread-Safe**: Safe for concurrent use in high-throughput applications. - -## Comparison with Other Libraries - -| Feature | `ll` | `log` (stdlib) | `slog` (stdlib) | `zap` | -|--------------------------|--------------------------|----------------|-----------------|-------------------| -| Hierarchical Namespaces | ✅ | ❌ | ❌ | ❌ | -| Structured Logging | ✅ (Fields, Context) | ❌ | ✅ | ✅ | -| Middleware Pipeline | ✅ | ❌ | ❌ | ✅ (limited) | -| Conditional Logging | ✅ (If, IfOne, IfAny) | ❌ | ❌ | ❌ | -| Slog Compatibility | ✅ | ❌ | ✅ (native) | ❌ | -| Debugging (Dbg, Dump) | ✅ | ❌ | ❌ | ❌ | -| Performance (disabled logs) | High (conditional) | Low | Medium | High | -| Output Formats | Text, JSON, Color, Slog | Text | Text, JSON | JSON, Text | - -## Benchmarks - -`ll` is optimized for performance, particularly for disabled logs and structured logging: -- **Disabled Logs**: 30% faster than `slog` due to efficient conditional checks. -- **Structured Logging**: 2x faster than `log` with minimal allocations. -- **Namespace Caching**: Reduces overhead for hierarchical lookups. - -See `ll_bench_test.go` for detailed benchmarks on namespace creation, cloning, and field building. - -## Testing and Stability - -The `ll` library includes a comprehensive test suite (`ll_test.go`) covering: -- Logger configuration, namespaces, and conditional logging. -- Middleware, rate limiting, and sampling. -- Handler output formats (text, JSON, slog). -- Debugging utilities (`Dbg`, `Dump`, `Stack`). - -Recent improvements: -- Fixed sampling middleware for reliable behavior at edge cases (0.0 and 1.0 rates). -- Enhanced documentation across `conditional.go`, `field.go`, `global.go`, `ll.go`, `lx.go`, and `ns.go`. -- Added `slog` compatibility via `lh.SlogHandler`. +`ll` is engineered for high-performance environments: -## Contributing +| Operation | Time/op | Allocations | +|-----------|---------|-------------| +| **Disabled log** | **15.9 ns** | **0 allocs** | +| Simple text log | 176 ns | 2 allocs | +| With 2 fields | 383 ns | 4 allocs | +| JSON output | 1006 ns | 13 allocs | +| Namespace lookup (cached) | 550 ns | 6 allocs | +| Deduplication | 214 ns | 2 allocs | -Contributions are welcome! To contribute: -1. Fork the repository: `github.com/olekukonko/ll`. -2. Create a feature branch: `git checkout -b feature/your-feature`. -3. Commit changes: `git commit -m "Add your feature"`. -4. Push to the branch: `git push origin feature/your-feature`. -5. Open a pull request with a clear description. +**Key optimizations**: +- Zero allocations when logs are skipped (conditional, disabled) +- Atomic operations for hot paths +- Sync.Pool for buffer reuse +- LRU cache for source file lines (Dbg) +- Sharded mutexes for deduplication -Please include tests in `ll_test.go` and update documentation as needed. Follow the Go coding style and run `go test ./...` before submitting. +## Why Choose `ll`? -## License +| Feature | `ll` | `slog` | `zap` | `logrus` | +|---------|------|--------|-------|----------| +| **Enabled by default** | ✅ | ❌ | ❌ | ❌ | +| Hierarchical namespaces | ✅ | ❌ | ❌ | ❌ | +| Conditional logging | ✅ | ❌ | ❌ | ❌ | +| Error-based conditions | ✅ | ❌ | ❌ | ❌ | +| Source-aware Dbg() | ✅ | ❌ | ❌ | ❌ | +| Private field inspection | ✅ | ❌ | ❌ | ❌ | +| Hex/ASCII Dump() | ✅ | ❌ | ❌ | ❌ | +| Middleware pipeline | ✅ | ❌ | ✅ (limited) | ❌ | +| Deduplication | ✅ | ❌ | ❌ | ❌ | +| Rate limiting | ✅ | ❌ | ❌ | ❌ | +| VictoriaLogs support | ✅ | ❌ | ❌ | ❌ | +| Syslog support | ✅ | ❌ | ❌ | ✅ | +| Zero-allocs disabled logs | ✅ | ❌ | ❌ | ❌ | +| Thread-safe | ✅ | ✅ | ✅ | ✅ | + +## Documentation + +- [GoDoc](https://pkg.go.dev/github.com/olekukonko/ll) - Full API documentation +- [Examples](_example/) - Runable example code +- [Benchmarks](tests/ll_bench_test.go) - Performance benchmarks -`ll` is licensed under the MIT License. See [LICENSE](LICENSE) for details. +## Contributing -## Resources +Contributions are welcome! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines. + +## License -- **Source Code**: [github.com/olekukonko/ll](https://github.com/olekukonko/ll) -- **Issue Tracker**: [github.com/olekukonko/ll/issues](https://github.com/olekukonko/ll/issues) -- **GoDoc**: [pkg.go.dev/github.com/olekukonko/ll](https://pkg.go.dev/github.com/olekukonko/ll) \ No newline at end of file +MIT License - see [LICENSE](LICENSE) for details. \ No newline at end of file diff --git a/vendor/github.com/olekukonko/ll/comb.hcl b/vendor/github.com/olekukonko/ll/comb.hcl new file mode 100644 index 00000000..791ed93b --- /dev/null +++ b/vendor/github.com/olekukonko/ll/comb.hcl @@ -0,0 +1,7 @@ +recursive = true +output_file = "all.txt" +extensions = [".go"] +exclude_dirs = ["_examples", "_lab", "_tmp", "pkg", "lab","bin","dist","assets","oppor"] +exclude_files = [""] +use_gitignore = true +detailed = true \ No newline at end of file diff --git a/vendor/github.com/olekukonko/ll/conditional.go b/vendor/github.com/olekukonko/ll/conditional.go index 0ec9e4b8..7aa4434f 100644 --- a/vendor/github.com/olekukonko/ll/conditional.go +++ b/vendor/github.com/olekukonko/ll/conditional.go @@ -12,7 +12,7 @@ type Conditional struct { // If creates a conditional logger that logs only if the condition is true. // It returns a Conditional struct that wraps the logger, enabling conditional logging methods. // This method is typically called on a Logger instance to start a conditional chain. -// Thread-safe via the underlying logger’s mutex. +// Thread-safe via the underlying logger's mutex. // Example: // // logger := New("app").Enable() @@ -22,27 +22,6 @@ func (l *Logger) If(condition bool) *Conditional { return &Conditional{logger: l, condition: condition} } -// IfOne creates a conditional logger that logs only if all conditions are true. -// It evaluates a variadic list of boolean conditions, setting the condition to true only if -// all are true (logical AND). Returns a new Conditional with the result. Thread-safe via the -// underlying logger. -// Example: -// -// logger := New("app").Enable() -// logger.IfOne(true, true).Info("Logged") // Output: [app] INFO: Logged -// logger.IfOne(true, false).Info("Ignored") // No output -func (cl *Conditional) IfOne(conditions ...bool) *Conditional { - result := true - // Check each condition; set result to false if any is false - for _, cond := range conditions { - if !cond { - result = false - break - } - } - return &Conditional{logger: cl.logger, condition: result} -} - // IfAny creates a conditional logger that logs only if at least one condition is true. // It evaluates a variadic list of boolean conditions, setting the condition to true if any // is true (logical OR). Returns a new Conditional with the result. Thread-safe via the @@ -64,79 +43,117 @@ func (cl *Conditional) IfAny(conditions ...bool) *Conditional { return &Conditional{logger: cl.logger, condition: result} } -// Fields starts a fluent chain for adding fields using variadic key-value pairs, if the condition is true. -// It returns a FieldBuilder to attach fields, skipping field processing if the condition is false -// to optimize performance. Thread-safe via the FieldBuilder’s logger. +// IfErr creates a conditional logger that logs only if the error is non-nil. +// It's designed for the common pattern of checking errors before logging. // Example: // -// logger := New("app").Enable() -// logger.If(true).Fields("user", "alice").Info("Logged") // Output: [app] INFO: Logged [user=alice] -// logger.If(false).Fields("user", "alice").Info("Ignored") // No output, no field processing -func (cl *Conditional) Fields(pairs ...any) *FieldBuilder { - // Skip field processing if condition is false - if !cl.condition { - return &FieldBuilder{logger: cl.logger, fields: nil} +// err := doSomething() +// logger.IfErr(err).Error("Operation failed") // Only logs if err != nil +func (l *Logger) IfErr(err error) *Conditional { + return l.If(err != nil) +} + +// IfErrAny creates a conditional logger that logs only if AT LEAST ONE error is non-nil. +// It evaluates a variadic list of errors, setting the condition to true if any +// is non-nil (logical OR). Useful when any error should trigger logging. +// Example: +// +// err1 := validate(input) +// err2 := authorize(user) +// logger.IfErrAny(err1, err2).Error("Either check failed") // Logs if EITHER error exists +func (l *Logger) IfErrAny(errs ...error) *Conditional { + for _, err := range errs { + if err != nil { + return l.If(true) // Any non-nil error makes it true + } } - // Delegate to logger’s Fields method - return cl.logger.Fields(pairs...) + return l.If(false) // False only if all errors are nil } -// Field starts a fluent chain for adding fields from a map, if the condition is true. -// It returns a FieldBuilder to attach fields from a map, skipping processing if the condition -// is false. Thread-safe via the FieldBuilder’s logger. +// IfErrOne creates a conditional logger that logs only if ALL errors are non-nil. +// It evaluates a variadic list of errors, setting the condition to true only if +// all are non-nil (logical AND). Useful when you need all errors to be present. // Example: // -// logger := New("app").Enable() -// logger.If(true).Field(map[string]interface{}{"user": "alice"}).Info("Logged") // Output: [app] INFO: Logged [user=alice] -// logger.If(false).Field(map[string]interface{}{"user": "alice"}).Info("Ignored") // No output -func (cl *Conditional) Field(fields map[string]interface{}) *FieldBuilder { - // Skip field processing if condition is false - if !cl.condition { - return &FieldBuilder{logger: cl.logger, fields: nil} +// err1 := validate(input) +// err2 := authorize(user) +// logger.IfErrOne(err1, err2).Error("Both checks failed") // Logs only if BOTH errors exist +func (l *Logger) IfErrOne(errs ...error) *Conditional { + for _, err := range errs { + if err == nil { + return l.If(false) // Any nil error makes it false + } } - // Delegate to logger’s Field method - return cl.logger.Field(fields) + return l.If(len(errs) > 0) // True only if we have at least one error and all are non-nil } -// Info logs a message at Info level with variadic arguments if the condition is true. -// It concatenates the arguments with spaces and delegates to the logger’s Info method if the -// condition is true. Skips processing if false, optimizing performance. Thread-safe via the -// logger’s log method. +// IfErr creates a conditional logger that logs only if the error is non-nil. +// Returns a new Conditional with the error check result. // Example: // -// logger := New("app").Enable() -// logger.If(true).Info("Action", "started") // Output: [app] INFO: Action started -// logger.If(false).Info("Action", "ignored") // No output -func (cl *Conditional) Info(args ...any) { - // Skip logging if condition is false - if !cl.condition { - return +// err := doSomething() +// logger.If(true).IfErr(err).Error("Failed") // Only logs if condition true AND err != nil +func (cl *Conditional) IfErr(err error) *Conditional { + return cl.IfOne(err != nil) +} + +// IfErrAny creates a conditional logger that logs only if AT LEAST ONE error is non-nil. +// Returns a new Conditional with the logical OR result of error checks. +// Example: +// +// err1 := validate(input) +// err2 := authorize(user) +// logger.If(true).IfErrAny(err1, err2).Error("Either failed") // Logs if condition true AND either error exists +func (cl *Conditional) IfErrAny(errs ...error) *Conditional { + for _, err := range errs { + if err != nil { + return &Conditional{logger: cl.logger, condition: cl.condition && true} + } } - // Delegate to logger’s Info method - cl.logger.Info(args...) + return &Conditional{logger: cl.logger, condition: false} } -// Infof logs a message at Info level with a format string if the condition is true. -// It formats the message using the provided format string and arguments, delegating to the -// logger’s Infof method if the condition is true. Skips processing if false, optimizing performance. -// Thread-safe via the logger’s log method. +// IfErrOne creates a conditional logger that logs only if ALL errors are non-nil. +// Returns a new Conditional with the logical AND result of error checks. +// Example: +// +// err1 := validate(input) +// err2 := authorize(user) +// logger.If(true).IfErrOne(err1, err2).Error("Both failed") // Logs if condition true AND both errors exist +func (cl *Conditional) IfErrOne(errs ...error) *Conditional { + for _, err := range errs { + if err == nil { + return &Conditional{logger: cl.logger, condition: false} + } + } + return &Conditional{logger: cl.logger, condition: cl.condition && len(errs) > 0} +} + +// IfOne creates a conditional logger that logs only if all conditions are true. +// It evaluates a variadic list of boolean conditions, setting the condition to true only if +// all are true (logical AND). Returns a new Conditional with the result. Thread-safe via the +// underlying logger. // Example: // // logger := New("app").Enable() -// logger.If(true).Infof("Action %s", "started") // Output: [app] INFO: Action started -// logger.If(false).Infof("Action %s", "ignored") // No output -func (cl *Conditional) Infof(format string, args ...any) { - // Skip logging if condition is false - if !cl.condition { - return +// logger.IfOne(true, true).Info("Logged") // Output: [app] INFO: Logged +// logger.IfOne(true, false).Info("Ignored") // No output +func (cl *Conditional) IfOne(conditions ...bool) *Conditional { + result := true + // Check each condition; set result to false if any is false + for _, cond := range conditions { + if !cond { + result = false + break + } } - // Delegate to logger’s Infof method - cl.logger.Infof(format, args...) + return &Conditional{logger: cl.logger, condition: result} } // Debug logs a message at Debug level with variadic arguments if the condition is true. -// It concatenates the arguments with spaces and delegates to the logger’s Debug method if the -// condition is true. Skips processing if false. Thread-safe via the logger’s log method. +// It concatenates the arguments with spaces and delegates to the logger's Debug method if the +// condition is true. Skips processing if false, optimizing performance. Thread-safe via the +// logger's log method. // Example: // // logger := New("app").Enable().Level(lx.LevelDebug) @@ -147,13 +164,13 @@ func (cl *Conditional) Debug(args ...any) { if !cl.condition { return } - // Delegate to logger’s Debug method + // Delegate to logger's Debug method cl.logger.Debug(args...) } // Debugf logs a message at Debug level with a format string if the condition is true. -// It formats the message and delegates to the logger’s Debugf method if the condition is true. -// Skips processing if false. Thread-safe via the logger’s log method. +// It formats the message and delegates to the logger's Debugf method if the condition is true. +// Skips processing if false. Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable().Level(lx.LevelDebug) @@ -164,150 +181,152 @@ func (cl *Conditional) Debugf(format string, args ...any) { if !cl.condition { return } - // Delegate to logger’s Debugf method + // Delegate to logger's Debugf method cl.logger.Debugf(format, args...) } -// Warn logs a message at Warn level with variadic arguments if the condition is true. -// It concatenates the arguments with spaces and delegates to the logger’s Warn method if the -// condition is true. Skips processing if false. Thread-safe via the logger’s log method. +// Error logs a message at Error level with variadic arguments if the condition is true. +// It concatenates the arguments with spaces and delegates to the logger's Error method if the +// condition is true. Skips processing if false. Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() -// logger.If(true).Warn("Warning", "issued") // Output: [app] WARN: Warning issued -// logger.If(false).Warn("Warning", "ignored") // No output -func (cl *Conditional) Warn(args ...any) { +// logger.If(true).Error("Error", "occurred") // Output: [app] ERROR: Error occurred +// logger.If(false).Error("Error", "ignored") // No output +func (cl *Conditional) Error(args ...any) { // Skip logging if condition is false if !cl.condition { return } - // Delegate to logger’s Warn method - cl.logger.Warn(args...) + // Delegate to logger's Error method + cl.logger.Error(args...) } -// Warnf logs a message at Warn level with a format string if the condition is true. -// It formats the message and delegates to the logger’s Warnf method if the condition is true. -// Skips processing if false. Thread-safe via the logger’s log method. +// Errorf logs a message at Error level with a format string if the condition is true. +// It formats the message and delegates to the logger's Errorf method if the condition is true. +// Skips processing if false. Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() -// logger.If(true).Warnf("Warning %s", "issued") // Output: [app] WARN: Warning issued -// logger.If(false).Warnf("Warning %s", "ignored") // No output -func (cl *Conditional) Warnf(format string, args ...any) { +// logger.If(true).Errorf("Error %s", "occurred") // Output: [app] ERROR: Error occurred +// logger.If(false).Errorf("Error %s", "ignored") // No output +func (cl *Conditional) Errorf(format string, args ...any) { // Skip logging if condition is false if !cl.condition { return } - // Delegate to logger’s Warnf method - cl.logger.Warnf(format, args...) + // Delegate to logger's Errorf method + cl.logger.Errorf(format, args...) } -// Error logs a message at Error level with variadic arguments if the condition is true. -// It concatenates the arguments with spaces and delegates to the logger’s Error method if the -// condition is true. Skips processing if false. Thread-safe via the logger’s log method. +// Fatal logs a message at Error level with a stack trace and variadic arguments if the condition is true, +// then exits. It concatenates the arguments with spaces and delegates to the logger's Fatal method +// if the condition is true, terminating the program with exit code 1. Skips processing if false. +// Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() -// logger.If(true).Error("Error", "occurred") // Output: [app] ERROR: Error occurred -// logger.If(false).Error("Error", "ignored") // No output -func (cl *Conditional) Error(args ...any) { +// logger.If(true).Fatal("Fatal", "error") // Output: [app] ERROR: Fatal error [stack=...], then exits +// logger.If(false).Fatal("Fatal", "ignored") // No output, no exit +func (cl *Conditional) Fatal(args ...any) { // Skip logging if condition is false if !cl.condition { return } - // Delegate to logger’s Error method - cl.logger.Error(args...) + // Delegate to logger's Fatal method + cl.logger.Fatal(args...) } -// Errorf logs a message at Error level with a format string if the condition is true. -// It formats the message and delegates to the logger’s Errorf method if the condition is true. -// Skips processing if false. Thread-safe via the logger’s log method. +// Fatalf logs a formatted message at Error level with a stack trace if the condition is true, then exits. +// It formats the message and delegates to the logger's Fatalf method if the condition is true, +// terminating the program with exit code 1. Skips processing if false. Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() -// logger.If(true).Errorf("Error %s", "occurred") // Output: [app] ERROR: Error occurred -// logger.If(false).Errorf("Error %s", "ignored") // No output -func (cl *Conditional) Errorf(format string, args ...any) { +// logger.If(true).Fatalf("Fatal %s", "error") // Output: [app] ERROR: Fatal error [stack=...], then exits +// logger.If(false).Fatalf("Fatal %s", "ignored") // No output, no exit +func (cl *Conditional) Fatalf(format string, args ...any) { // Skip logging if condition is false if !cl.condition { return } - // Delegate to logger’s Errorf method - cl.logger.Errorf(format, args...) + // Delegate to logger's Fatalf method + cl.logger.Fatalf(format, args...) } -// Stack logs a message at Error level with a stack trace and variadic arguments if the condition is true. -// It concatenates the arguments with spaces and delegates to the logger’s Stack method if the -// condition is true. Skips processing if false. Thread-safe via the logger’s log method. +// Field starts a fluent chain for adding fields from a map, if the condition is true. +// It returns a FieldBuilder to attach fields from a map, skipping processing if the condition +// is false. Thread-safe via the FieldBuilder's logger. // Example: // // logger := New("app").Enable() -// logger.If(true).Stack("Critical", "error") // Output: [app] ERROR: Critical error [stack=...] -// logger.If(false).Stack("Critical", "ignored") // No output -func (cl *Conditional) Stack(args ...any) { - // Skip logging if condition is false +// logger.If(true).Field(map[string]interface{}{"user": "alice"}).Info("Logged") // Output: [app] INFO: Logged [user=alice] +// logger.If(false).Field(map[string]interface{}{"user": "alice"}).Info("Ignored") // No output +func (cl *Conditional) Field(fields map[string]interface{}) *FieldBuilder { + // Skip field processing if condition is false if !cl.condition { - return + return &FieldBuilder{logger: cl.logger, fields: nil} } - // Delegate to logger’s Stack method - cl.logger.Stack(args...) + // Delegate to logger's Field method + return cl.logger.Field(fields) } -// Stackf logs a message at Error level with a stack trace and a format string if the condition is true. -// It formats the message and delegates to the logger’s Stackf method if the condition is true. -// Skips processing if false. Thread-safe via the logger’s log method. +// Fields starts a fluent chain for adding fields using variadic key-value pairs, if the condition is true. +// It returns a FieldBuilder to attach fields, skipping field processing if the condition is false +// to optimize performance. Thread-safe via the FieldBuilder's logger. // Example: // // logger := New("app").Enable() -// logger.If(true).Stackf("Critical %s", "error") // Output: [app] ERROR: Critical error [stack=...] -// logger.If(false).Stackf("Critical %s", "ignored") // No output -func (cl *Conditional) Stackf(format string, args ...any) { - // Skip logging if condition is false +// logger.If(true).Fields("user", "alice").Info("Logged") // Output: [app] INFO: Logged [user=alice] +// logger.If(false).Fields("user", "alice").Info("Ignored") // No output, no field processing +func (cl *Conditional) Fields(pairs ...any) *FieldBuilder { + // Skip field processing if condition is false if !cl.condition { - return + return &FieldBuilder{logger: cl.logger, fields: nil} } - // Delegate to logger’s Stackf method - cl.logger.Stackf(format, args...) + // Delegate to logger's Fields method + return cl.logger.Fields(pairs...) } -// Fatal logs a message at Error level with a stack trace and variadic arguments if the condition is true, -// then exits. It concatenates the arguments with spaces and delegates to the logger’s Fatal method -// if the condition is true, terminating the program with exit code 1. Skips processing if false. -// Thread-safe via the logger’s log method. +// Info logs a message at Info level with variadic arguments if the condition is true. +// It concatenates the arguments with spaces and delegates to the logger's Info method if the +// condition is true. Skips processing if false, optimizing performance. Thread-safe via the +// logger's log method. // Example: // // logger := New("app").Enable() -// logger.If(true).Fatal("Fatal", "error") // Output: [app] ERROR: Fatal error [stack=...], then exits -// logger.If(false).Fatal("Fatal", "ignored") // No output, no exit -func (cl *Conditional) Fatal(args ...any) { +// logger.If(true).Info("Action", "started") // Output: [app] INFO: Action started +// logger.If(false).Info("Action", "ignored") // No output +func (cl *Conditional) Info(args ...any) { // Skip logging if condition is false if !cl.condition { return } - // Delegate to logger’s Fatal method - cl.logger.Fatal(args...) + // Delegate to logger's Info method + cl.logger.Info(args...) } -// Fatalf logs a formatted message at Error level with a stack trace if the condition is true, then exits. -// It formats the message and delegates to the logger’s Fatalf method if the condition is true, -// terminating the program with exit code 1. Skips processing if false. Thread-safe via the logger’s log method. +// Infof logs a message at Info level with a format string if the condition is true. +// It formats the message using the provided format string and arguments, delegating to the +// logger's Infof method if the condition is true. Skips processing if false, optimizing performance. +// Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() -// logger.If(true).Fatalf("Fatal %s", "error") // Output: [app] ERROR: Fatal error [stack=...], then exits -// logger.If(false).Fatalf("Fatal %s", "ignored") // No output, no exit -func (cl *Conditional) Fatalf(format string, args ...any) { +// logger.If(true).Infof("Action %s", "started") // Output: [app] INFO: Action started +// logger.If(false).Infof("Action %s", "ignored") // No output +func (cl *Conditional) Infof(format string, args ...any) { // Skip logging if condition is false if !cl.condition { return } - // Delegate to logger’s Fatalf method - cl.logger.Fatalf(format, args...) + // Delegate to logger's Infof method + cl.logger.Infof(format, args...) } // Panic logs a message at Error level with a stack trace and variadic arguments if the condition is true, -// then panics. It concatenates the arguments with spaces and delegates to the logger’s Panic method -// if the condition is true, triggering a panic. Skips processing if false. Thread-safe via the logger’s log method. +// then panics. It concatenates the arguments with spaces and delegates to the logger's Panic method +// if the condition is true, triggering a panic. Skips processing if false. Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() @@ -318,13 +337,13 @@ func (cl *Conditional) Panic(args ...any) { if !cl.condition { return } - // Delegate to logger’s Panic method + // Delegate to logger's Panic method cl.logger.Panic(args...) } // Panicf logs a formatted message at Error level with a stack trace if the condition is true, then panics. -// It formats the message and delegates to the logger’s Panicf method if the condition is true, -// triggering a panic. Skips processing if false. Thread-safe via the logger’s log method. +// It formats the message and delegates to the logger's Panicf method if the condition is true, +// triggering a panic. Skips processing if false. Thread-safe via the logger's log method. // Example: // // logger := New("app").Enable() @@ -335,6 +354,74 @@ func (cl *Conditional) Panicf(format string, args ...any) { if !cl.condition { return } - // Delegate to logger’s Panicf method + // Delegate to logger's Panicf method cl.logger.Panicf(format, args...) } + +// Stack logs a message at Error level with a stack trace and variadic arguments if the condition is true. +// It concatenates the arguments with spaces and delegates to the logger's Stack method if the +// condition is true. Skips processing if false. Thread-safe via the logger's log method. +// Example: +// +// logger := New("app").Enable() +// logger.If(true).Stack("Critical", "error") // Output: [app] ERROR: Critical error [stack=...] +// logger.If(false).Stack("Critical", "ignored") // No output +func (cl *Conditional) Stack(args ...any) { + // Skip logging if condition is false + if !cl.condition { + return + } + // Delegate to logger's Stack method + cl.logger.Stack(args...) +} + +// Stackf logs a message at Error level with a stack trace and a format string if the condition is true. +// It formats the message and delegates to the logger's Stackf method if the condition is true. +// Skips processing if false. Thread-safe via the logger's log method. +// Example: +// +// logger := New("app").Enable() +// logger.If(true).Stackf("Critical %s", "error") // Output: [app] ERROR: Critical error [stack=...] +// logger.If(false).Stackf("Critical %s", "ignored") // No output +func (cl *Conditional) Stackf(format string, args ...any) { + // Skip logging if condition is false + if !cl.condition { + return + } + // Delegate to logger's Stackf method + cl.logger.Stackf(format, args...) +} + +// Warn logs a message at Warn level with variadic arguments if the condition is true. +// It concatenates the arguments with spaces and delegates to the logger's Warn method if the +// condition is true. Skips processing if false. Thread-safe via the logger's log method. +// Example: +// +// logger := New("app").Enable() +// logger.If(true).Warn("Warning", "issued") // Output: [app] WARN: Warning issued +// logger.If(false).Warn("Warning", "ignored") // No output +func (cl *Conditional) Warn(args ...any) { + // Skip logging if condition is false + if !cl.condition { + return + } + // Delegate to logger's Warn method + cl.logger.Warn(args...) +} + +// Warnf logs a message at Warn level with a format string if the condition is true. +// It formats the message and delegates to the logger's Warnf method if the condition is true. +// Skips processing if false. Thread-safe via the logger's log method. +// Example: +// +// logger := New("app").Enable() +// logger.If(true).Warnf("Warning %s", "issued") // Output: [app] WARN: Warning issued +// logger.If(false).Warnf("Warning %s", "ignored") // No output +func (cl *Conditional) Warnf(format string, args ...any) { + // Skip logging if condition is false + if !cl.condition { + return + } + // Delegate to logger's Warnf method + cl.logger.Warnf(format, args...) +} diff --git a/vendor/github.com/olekukonko/ll/dbg.go b/vendor/github.com/olekukonko/ll/dbg.go new file mode 100644 index 00000000..d3d8c42b --- /dev/null +++ b/vendor/github.com/olekukonko/ll/dbg.go @@ -0,0 +1,282 @@ +package ll + +import ( + "container/list" + "fmt" + "os" + "runtime" + "strings" + "sync" + + "github.com/olekukonko/ll/lx" +) + +// ----------------------------------------------------------------------------- +// Global Cache Instance +// ----------------------------------------------------------------------------- + +// sourceCache caches up to 128 source files using LRU eviction. +var sourceCache = newFileLRU(128) + +// ----------------------------------------------------------------------------- +// File-Level LRU Cache +// ----------------------------------------------------------------------------- + +type fileLRU struct { + capacity int + mu sync.Mutex + list *list.List + items map[string]*list.Element +} + +type fileItem struct { + key string + lines []string +} + +func newFileLRU(capacity int) *fileLRU { + if capacity <= 0 { + capacity = 1 + } + return &fileLRU{ + capacity: capacity, + list: list.New(), + items: make(map[string]*list.Element, capacity), + } +} + +// getLine retrieves a specific 1-indexed line from a file. +func (c *fileLRU) getLine(file string, line int) (string, bool) { + c.mu.Lock() + defer c.mu.Unlock() + + // 1. Cache Hit + if elem, ok := c.items[file]; ok { + c.list.MoveToFront(elem) + item := elem.Value.(*fileItem) + if item.lines == nil { + return "", false + } + return nthLine(item.lines, line) + } + + // 2. Cache Miss - Read File + // Release lock during I/O to avoid blocking other loggers + c.mu.Unlock() + data, err := os.ReadFile(file) + c.mu.Lock() + + // 3. Double-check (another goroutine might have loaded it while unlocked) + if elem, ok := c.items[file]; ok { + c.list.MoveToFront(elem) + item := elem.Value.(*fileItem) + if item.lines == nil { + return "", false + } + return nthLine(item.lines, line) + } + + var lines []string + if err == nil { + lines = strings.Split(string(data), "\n") + } + + // 4. Store (Positive or Negative Cache) + item := &fileItem{ + key: file, + lines: lines, + } + elem := c.list.PushFront(item) + c.items[file] = elem + + // 5. Evict if needed + if c.list.Len() > c.capacity { + old := c.list.Back() + if old != nil { + c.list.Remove(old) + delete(c.items, old.Value.(*fileItem).key) + } + } + + if lines == nil { + return "", false + } + return nthLine(lines, line) +} + +// nthLine returns the 1-indexed line from slice. +func nthLine(lines []string, n int) (string, bool) { + if n <= 0 || n > len(lines) { + return "", false + } + return strings.TrimSuffix(lines[n-1], "\r"), true +} + +// ----------------------------------------------------------------------------- +// Logger Debug Implementation +// ----------------------------------------------------------------------------- + +// Dbg logs debug information including source file, line number, +// and the best-effort extracted expression. +// +// Example: +// +// x := 42 +// logger.Dbg("val", x) +// Output: [file.go:123] "val" = "val", x = 42 +func (l *Logger) Dbg(values ...interface{}) { + if !l.shouldLog(lx.LevelInfo) { + return + } + l.dbg(2, values...) +} + +func (l *Logger) dbg(skip int, values ...interface{}) { + file, line, ok := callerFrame(skip) + if !ok { + // Fallback if we can't get frame + var sb strings.Builder + sb.WriteString("[?:?] ") + for i, v := range values { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%+v", v)) + } + l.log(lx.LevelInfo, lx.ClassText, sb.String(), nil, false) + return + } + + shortFile := file + if idx := strings.LastIndex(file, "/"); idx >= 0 { + shortFile = file[idx+1:] + } + + srcLine, hit := sourceCache.getLine(file, line) + + var expr string + if hit && srcLine != "" { + // Attempt to extract the text inside Dbg(...) + if a := strings.Index(srcLine, "Dbg("); a >= 0 { + rest := srcLine[a+len("Dbg("):] + if b := strings.LastIndex(rest, ")"); b >= 0 { + expr = strings.TrimSpace(rest[:b]) + } + } else { + // Fallback: extract first (...) group if Dbg isn't explicit prefix + a := strings.Index(srcLine, "(") + b := strings.LastIndex(srcLine, ")") + if a >= 0 && b > a { + expr = strings.TrimSpace(srcLine[a+1 : b]) + } + } + } + + // Format output + var outBuilder strings.Builder + outBuilder.WriteString(fmt.Sprintf("[%s:%d] ", shortFile, line)) + + // Attempt to split expressions to map 1:1 with values + var parts []string + if expr != "" { + parts = splitExpressions(expr) + } + + // If the number of extracted expressions matches the number of values, + // print them as "expr = value". Otherwise, fall back to "expr = val1, val2". + if len(parts) == len(values) { + for i, v := range values { + if i > 0 { + outBuilder.WriteString(", ") + } + outBuilder.WriteString(fmt.Sprintf("%s = %+v", parts[i], v)) + } + } else { + if expr != "" { + outBuilder.WriteString(expr) + outBuilder.WriteString(" = ") + } + for i, v := range values { + if i > 0 { + outBuilder.WriteString(", ") + } + outBuilder.WriteString(fmt.Sprintf("%+v", v)) + } + } + + l.log(lx.LevelInfo, lx.ClassDbg, outBuilder.String(), nil, false) +} + +// splitExpressions splits a comma-separated string of expressions, +// respecting nested parentheses, brackets, braces, and quotes. +// Example: "a, fn(b, c), d" -> ["a", "fn(b, c)", "d"] +func splitExpressions(s string) []string { + var parts []string + var current strings.Builder + depth := 0 // Tracks nested (), [], {} + inQuote := false // Tracks string literals + var quoteChar rune + + for _, r := range s { + switch { + case inQuote: + current.WriteRune(r) + if r == quoteChar { + // We rely on the fact that valid Go source won't have unescaped quotes easily + // accessible here without complex parsing, but for simple Dbg calls this suffices. + // A robust parser handles `\"`, but simple state toggling covers 99% of debug cases. + inQuote = false + } + case r == '"' || r == '\'': + inQuote = true + quoteChar = r + current.WriteRune(r) + case r == '(' || r == '{' || r == '[': + depth++ + current.WriteRune(r) + case r == ')' || r == '}' || r == ']': + depth-- + current.WriteRune(r) + case r == ',' && depth == 0: + // Split point + parts = append(parts, strings.TrimSpace(current.String())) + current.Reset() + default: + current.WriteRune(r) + } + } + if current.Len() > 0 { + parts = append(parts, strings.TrimSpace(current.String())) + } + return parts +} + +// ----------------------------------------------------------------------------- +// Caller Resolution +// ----------------------------------------------------------------------------- + +// callerFrame walks stack frames until it finds the first frame +// outside the ll package. +func callerFrame(skip int) (file string, line int, ok bool) { + // +2 to skip callerFrame + dbg itself. + pcs := make([]uintptr, 32) + n := runtime.Callers(skip+2, pcs) + if n == 0 { + return "", 0, false + } + + frames := runtime.CallersFrames(pcs[:n]) + for { + fr, more := frames.Next() + // fr.Function looks like: "github.com/you/mod/ll.(*Logger).Dbg" + // We want the first frame that is NOT inside package ll. + if fr.Function == "" || !strings.Contains(fr.Function, "/ll.") && !strings.Contains(fr.Function, ".ll.") { + return fr.File, fr.Line, true + } + + if !more { + // Fallback: return the last frame we saw + return fr.File, fr.Line, fr.File != "" + } + } +} diff --git a/vendor/github.com/olekukonko/ll/field.go b/vendor/github.com/olekukonko/ll/field.go index 4162162f..ed18b529 100644 --- a/vendor/github.com/olekukonko/ll/field.go +++ b/vendor/github.com/olekukonko/ll/field.go @@ -1,11 +1,13 @@ +// field.go package ll import ( "fmt" - "github.com/olekukonko/cat" - "github.com/olekukonko/ll/lx" "os" "strings" + + "github.com/olekukonko/cat" + "github.com/olekukonko/ll/lx" ) // FieldBuilder enables fluent addition of fields before logging. @@ -13,12 +15,12 @@ import ( // supporting structured logging with metadata. The builder allows chaining to add fields // and log messages at various levels (Info, Debug, Warn, Error, etc.) in a single expression. type FieldBuilder struct { - logger *Logger // Associated logger instance for logging operations - fields map[string]interface{} // Fields to include in the log entry as key-value pairs + logger *Logger // Associated logger instance for logging operations + fields lx.Fields // Fields to include in the log entry as ordered key-value pairs } -// Logger creates a new logger with the builder’s fields embedded in its context. -// It clones the parent logger and copies the builder’s fields into the new logger’s context, +// Logger creates a new logger with the builder's fields embedded in its context. +// It clones the parent logger and copies the builder's fields into the new logger's context, // enabling persistent field inclusion in subsequent logs. This method supports fluent chaining // after Fields or Field calls. // Example: @@ -29,17 +31,14 @@ type FieldBuilder struct { func (fb *FieldBuilder) Logger() *Logger { // Clone the parent logger to preserve its configuration newLogger := fb.logger.Clone() - // Initialize a new context map to avoid modifying the parent’s context - newLogger.context = make(map[string]interface{}) - // Copy builder’s fields into the new logger’s context - for k, v := range fb.fields { - newLogger.context[k] = v - } + // Copy builder's fields into the new logger's context + newLogger.context = make(lx.Fields, len(fb.fields)) + copy(newLogger.context, fb.fields) return newLogger } -// Info logs a message at Info level with the builder’s fields. -// It concatenates the arguments with spaces and delegates to the logger’s log method, +// Info logs a message at Info level with the builder's fields. +// It concatenates the arguments with spaces and delegates to the logger's log method, // returning early if fields are nil. This method is used for informational messages. // Example: // @@ -50,13 +49,13 @@ func (fb *FieldBuilder) Info(args ...any) { if fb.fields == nil { return } - // Log at Info level with the builder’s fields, no stack trace + // Log at Info level with the builder's fields, no stack trace fb.logger.log(lx.LevelInfo, lx.ClassText, cat.Space(args...), fb.fields, false) } -// Infof logs a message at Info level with the builder’s fields. +// Infof logs a message at Info level with the builder's fields. // It formats the message using the provided format string and arguments, then delegates -// to the logger’s internal log method. If fields are nil, it returns early to avoid logging. +// to the logger's internal log method. If fields are nil, it returns early to avoid logging. // This method is part of the fluent API, typically called after adding fields. // Example: // @@ -69,12 +68,12 @@ func (fb *FieldBuilder) Infof(format string, args ...any) { } // Format the message using the provided arguments msg := fmt.Sprintf(format, args...) - // Log at Info level with the builder’s fields, no stack trace + // Log at Info level with the builder's fields, no stack trace fb.logger.log(lx.LevelInfo, lx.ClassText, msg, fb.fields, false) } -// Debug logs a message at Debug level with the builder’s fields. -// It concatenates the arguments with spaces and delegates to the logger’s log method, +// Debug logs a message at Debug level with the builder's fields. +// It concatenates the arguments with spaces and delegates to the logger's log method, // returning early if fields are nil. This method is used for debugging information. // Example: // @@ -85,12 +84,12 @@ func (fb *FieldBuilder) Debug(args ...any) { if fb.fields == nil { return } - // Log at Debug level with the builder’s fields, no stack trace + // Log at Debug level with the builder's fields, no stack trace fb.logger.log(lx.LevelDebug, lx.ClassText, cat.Space(args...), fb.fields, false) } -// Debugf logs a message at Debug level with the builder’s fields. -// It formats the message and delegates to the logger’s log method, returning early if +// Debugf logs a message at Debug level with the builder's fields. +// It formats the message and delegates to the logger's log method, returning early if // fields are nil. This method is used for debugging information that may be disabled in // production environments. // Example: @@ -104,12 +103,12 @@ func (fb *FieldBuilder) Debugf(format string, args ...any) { } // Format the message msg := fmt.Sprintf(format, args...) - // Log at Debug level with the builder’s fields, no stack trace + // Log at Debug level with the builder's fields, no stack trace fb.logger.log(lx.LevelDebug, lx.ClassText, msg, fb.fields, false) } -// Warn logs a message at Warn level with the builder’s fields. -// It concatenates the arguments with spaces and delegates to the logger’s log method, +// Warn logs a message at Warn level with the builder's fields. +// It concatenates the arguments with spaces and delegates to the logger's log method, // returning early if fields are nil. This method is used for warning conditions. // Example: // @@ -120,12 +119,12 @@ func (fb *FieldBuilder) Warn(args ...any) { if fb.fields == nil { return } - // Log at Warn level with the builder’s fields, no stack trace + // Log at Warn level with the builder's fields, no stack trace fb.logger.log(lx.LevelWarn, lx.ClassText, cat.Space(args...), fb.fields, false) } -// Warnf logs a message at Warn level with the builder’s fields. -// It formats the message and delegates to the logger’s log method, returning early if +// Warnf logs a message at Warn level with the builder's fields. +// It formats the message and delegates to the logger's log method, returning early if // fields are nil. This method is used for warning conditions that do not halt execution. // Example: // @@ -138,12 +137,12 @@ func (fb *FieldBuilder) Warnf(format string, args ...any) { } // Format the message msg := fmt.Sprintf(format, args...) - // Log at Warn level with the builder’s fields, no stack trace + // Log at Warn level with the builder's fields, no stack trace fb.logger.log(lx.LevelWarn, lx.ClassText, msg, fb.fields, false) } -// Error logs a message at Error level with the builder’s fields. -// It concatenates the arguments with spaces and delegates to the logger’s log method, +// Error logs a message at Error level with the builder's fields. +// It concatenates the arguments with spaces and delegates to the logger's log method, // returning early if fields are nil. This method is used for error conditions. // Example: // @@ -154,12 +153,12 @@ func (fb *FieldBuilder) Error(args ...any) { if fb.fields == nil { return } - // Log at Error level with the builder’s fields, no stack trace + // Log at Error level with the builder's fields, no stack trace fb.logger.log(lx.LevelError, lx.ClassText, cat.Space(args...), fb.fields, false) } -// Errorf logs a message at Error level with the builder’s fields. -// It formats the message and delegates to the logger’s log method, returning early if +// Errorf logs a message at Error level with the builder's fields. +// It formats the message and delegates to the logger's log method, returning early if // fields are nil. This method is used for error conditions that may require attention. // Example: // @@ -172,12 +171,12 @@ func (fb *FieldBuilder) Errorf(format string, args ...any) { } // Format the message msg := fmt.Sprintf(format, args...) - // Log at Error level with the builder’s fields, no stack trace + // Log at Error level with the builder's fields, no stack trace fb.logger.log(lx.LevelError, lx.ClassText, msg, fb.fields, false) } -// Stack logs a message at Error level with a stack trace and the builder’s fields. -// It concatenates the arguments with spaces and delegates to the logger’s log method, +// Stack logs a message at Error level with a stack trace and the builder's fields. +// It concatenates the arguments with spaces and delegates to the logger's log method, // returning early if fields are nil. This method is useful for debugging critical errors. // Example: // @@ -188,12 +187,12 @@ func (fb *FieldBuilder) Stack(args ...any) { if fb.fields == nil { return } - // Log at Error level with the builder’s fields and a stack trace + // Log at Error level with the builder's fields and a stack trace fb.logger.log(lx.LevelError, lx.ClassText, cat.Space(args...), fb.fields, true) } -// Stackf logs a message at Error level with a stack trace and the builder’s fields. -// It formats the message and delegates to the logger’s log method, returning early if +// Stackf logs a message at Error level with a stack trace and the builder's fields. +// It formats the message and delegates to the logger's log method, returning early if // fields are nil. This method is useful for debugging critical errors. // Example: // @@ -206,11 +205,11 @@ func (fb *FieldBuilder) Stackf(format string, args ...any) { } // Format the message msg := fmt.Sprintf(format, args...) - // Log at Error level with the builder’s fields and a stack trace + // Log at Error level with the builder's fields and a stack trace fb.logger.log(lx.LevelError, lx.ClassText, msg, fb.fields, true) } -// Fatal logs a message at Error level with a stack trace and the builder’s fields, then exits. +// Fatal logs a message at Error level with a stack trace and the builder's fields, then exits. // It constructs the message from variadic arguments, logs it with a stack trace, and terminates // the program with exit code 1. Returns early if fields are nil. This method is used for // unrecoverable errors. @@ -231,13 +230,16 @@ func (fb *FieldBuilder) Fatal(args ...any) { } builder.WriteString(fmt.Sprint(arg)) } - // Log at Error level with the builder’s fields and a stack trace - fb.logger.log(lx.LevelError, lx.ClassText, builder.String(), fb.fields, true) + // Log at Error level with the builder's fields and a stack trace + fb.logger.log(lx.LevelFatal, lx.ClassText, builder.String(), fb.fields, fb.logger.fatalStack) + // Exit the program with status code 1 - os.Exit(1) + if fb.logger.fatalExits { + os.Exit(1) + } } -// Fatalf logs a formatted message at Error level with a stack trace and the builder’s fields, +// Fatalf logs a formatted message at Error level with a stack trace and the builder's fields, // then exits. It delegates to Fatal and returns early if fields are nil. This method is used // for unrecoverable errors. // Example: @@ -253,7 +255,7 @@ func (fb *FieldBuilder) Fatalf(format string, args ...any) { fb.Fatal(fmt.Sprintf(format, args...)) } -// Panic logs a message at Error level with a stack trace and the builder’s fields, then panics. +// Panic logs a message at Error level with a stack trace and the builder's fields, then panics. // It constructs the message from variadic arguments, logs it with a stack trace, and triggers // a panic with the message. Returns early if fields are nil. This method is used for critical // errors that require immediate program termination with a panic. @@ -275,13 +277,13 @@ func (fb *FieldBuilder) Panic(args ...any) { builder.WriteString(fmt.Sprint(arg)) } msg := builder.String() - // Log at Error level with the builder’s fields and a stack trace + // Log at Error level with the builder's fields and a stack trace fb.logger.log(lx.LevelError, lx.ClassText, msg, fb.fields, true) // Trigger a panic with the formatted message panic(msg) } -// Panicf logs a formatted message at Error level with a stack trace and the builder’s fields, +// Panicf logs a formatted message at Error level with a stack trace and the builder's fields, // then panics. It delegates to Panic and returns early if fields are nil. This method is used // for critical errors that require immediate program termination with a panic. // Example: @@ -301,7 +303,7 @@ func (fb *FieldBuilder) Panicf(format string, args ...any) { // It stores non-nil errors in the "error" field: a single error if only one is non-nil, // or a slice of errors if multiple are non-nil. It logs the concatenated string representations // of non-nil errors (e.g., "failed 1; failed 2") at the Error level. Returns the FieldBuilder -// for chaining, allowing further field additions or logging. Thread-safe via the logger’s mutex. +// for chaining, allowing further field additions or logging. Thread-safe via the logger's mutex. // Example: // // logger := New("app").Enable() @@ -311,9 +313,9 @@ func (fb *FieldBuilder) Panicf(format string, args ...any) { // // Output: [app] ERROR: failed 1; failed 2 // // [app] INFO: Error occurred [error=[failed 1 failed 2] k=v] func (fb *FieldBuilder) Err(errs ...error) *FieldBuilder { - // Initialize fields map if nil + // Initialize fields slice if nil if fb.fields == nil { - fb.fields = make(map[string]interface{}) + fb.fields = make(lx.Fields, 0, 4) } // Collect non-nil errors and build log message @@ -335,10 +337,10 @@ func (fb *FieldBuilder) Err(errs ...error) *FieldBuilder { if count > 0 { if count == 1 { // Store single error directly - fb.fields["error"] = nonNilErrors[0] + fb.fields = append(fb.fields, lx.Field{Key: "error", Value: nonNilErrors[0]}) } else { // Store slice of errors - fb.fields["error"] = nonNilErrors + fb.fields = append(fb.fields, lx.Field{Key: "error", Value: nonNilErrors}) } // Log concatenated error messages at Error level fb.logger.log(lx.LevelError, lx.ClassText, builder.String(), nil, false) @@ -357,19 +359,30 @@ func (fb *FieldBuilder) Err(errs ...error) *FieldBuilder { // logger := New("app").Enable() // logger.Fields("k1", "v1").Merge("k2", "v2").Info("Action") // Output: [app] INFO: Action [k1=v1 k2=v2] func (fb *FieldBuilder) Merge(pairs ...any) *FieldBuilder { + // Initialize fields slice if nil + if fb.fields == nil { + fb.fields = make(lx.Fields, 0, len(pairs)/2) + } + // Process pairs as key-value, advancing by 2 for i := 0; i < len(pairs)-1; i += 2 { // Ensure the key is a string if key, ok := pairs[i].(string); ok { - fb.fields[key] = pairs[i+1] + fb.fields = append(fb.fields, lx.Field{Key: key, Value: pairs[i+1]}) } else { // Log an error field for non-string keys - fb.fields["error"] = fmt.Errorf("non-string key in Merge: %v", pairs[i]) + fb.fields = append(fb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("non-string key in Merge: %v", pairs[i]), + }) } } // Check for uneven pairs (missing value) if len(pairs)%2 != 0 { - fb.fields["error"] = fmt.Errorf("uneven key-value pairs in Merge: [%v]", pairs[len(pairs)-1]) + fb.fields = append(fb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("uneven key-value pairs in Merge: [%v]", pairs[len(pairs)-1]), + }) } return fb } diff --git a/vendor/github.com/olekukonko/ll/global.go b/vendor/github.com/olekukonko/ll/global.go index 85146f54..edc41fbc 100644 --- a/vendor/github.com/olekukonko/ll/global.go +++ b/vendor/github.com/olekukonko/ll/global.go @@ -1,11 +1,9 @@ package ll import ( - "os" "sync/atomic" "time" - "github.com/olekukonko/ll/lh" "github.com/olekukonko/ll/lx" ) @@ -14,16 +12,7 @@ import ( // a logger instance. The logger is initialized with default settings: enabled, Debug level, // flat namespace style, and a text handler to os.Stdout. It is thread-safe due to the Logger // struct’s mutex. -var defaultLogger = &Logger{ - enabled: true, // Initially enabled - level: lx.LevelDebug, // Minimum log level set to Debug - namespaces: defaultStore, // Shared namespace store for enable/disable states - context: make(map[string]interface{}), // Empty context for global fields - style: lx.FlatPath, // Flat namespace style (e.g., [parent/child]) - handler: lh.NewTextHandler(os.Stdout), // Default text handler to os.Stdout - middleware: make([]Middleware, 0), // Empty middleware chain - stackBufferSize: 4096, // Buffer size for stack traces -} +var defaultLogger = New("") // Handler sets the handler for the default logger. // It configures the output destination and format (e.g., text, JSON) for logs emitted by @@ -233,16 +222,25 @@ func Panicf(format string, args ...any) { } // If creates a conditional logger that logs only if the condition is true using the default logger. -// It returns a Conditional struct that wraps the default logger, enabling conditional logging methods. -// Thread-safe via the Logger’s mutex. -// Example: -// -// ll.If(true).Info("Logged") // Output: [] INFO: Logged -// ll.If(false).Info("Ignored") // No output func If(condition bool) *Conditional { return defaultLogger.If(condition) } +// IfErr creates a conditional logger that logs only if the error is non-nil using the default logger. +func IfErr(err error) *Conditional { + return defaultLogger.IfErr(err) +} + +// IfErrAny creates a conditional logger that logs only if AT LEAST ONE error is non-nil using the default logger. +func IfErrAny(errs ...error) *Conditional { + return defaultLogger.IfErrAny(errs...) +} + +// IfErrOne creates a conditional logger that logs only if ALL errors are non-nil using the default logger. +func IfErrOne(errs ...error) *Conditional { + return defaultLogger.IfErrOne(errs...) +} + // Context creates a new logger with additional contextual fields using the default logger. // It preserves existing context fields and adds new ones, returning a new logger instance // to avoid mutating the default logger. Thread-safe with write lock. @@ -260,8 +258,8 @@ func Context(fields map[string]interface{}) *Logger { // // ll.AddContext("user", "alice") // ll.Info("Action") // Output: [] INFO: Action [user=alice] -func AddContext(key string, value interface{}) *Logger { - return defaultLogger.AddContext(key, value) +func AddContext(pairs ...any) *Logger { + return defaultLogger.AddContext(pairs...) } // GetContext returns the default logger’s context map of persistent key-value fields. @@ -269,7 +267,7 @@ func AddContext(key string, value interface{}) *Logger { // Example: // // ll.AddContext("user", "alice") -// ctx := ll.GetContext() // Returns map[string]interface{}{"user": "alice"} +// ctx := ll.GetContext() // Returns map[string]interface{}{"user": "alice"}k func GetContext() map[string]interface{} { return defaultLogger.GetContext() } @@ -472,6 +470,37 @@ func Measure(fns ...func()) time.Duration { return defaultLogger.Measure(fns...) } +// Labels temporarily attaches one or more label names to the logger for the next log entry. +// Labels are typically used for metrics, benchmarking, tracing, or categorizing logs in a structured way. +// +// The labels are stored atomically and intended to be short-lived, applying only to the next +// log operation (or until overwritten by a subsequent call to Labels). Multiple labels can +// be provided as separate string arguments. +// +// Example usage: +// +// logger := New("app").Enable() +// +// // Add labels for a specific operation +// logger.Labels("load_users", "process_orders").Measure(func() { +// // ... perform work ... +// }, func() { +// // ... optional callback ... +// }) +func Labels(names ...string) *Logger { + return defaultLogger.Labels(names...) +} + +// Since creates a timer that will log the duration when completed +// If startTime is provided, uses that as the start time; otherwise uses time.Now() +// +// defer logger.Since().Info("request") // Auto-start +// logger.Since(start).Info("request") // Manual timing +// logger.Since().If(debug).Debug("timing") // Conditional +func Since(start ...time.Time) *SinceBuilder { + return defaultLogger.Since(start...) +} + // Benchmark logs the duration since a start time at Info level using the default logger. // It calculates the time elapsed since the provided start time and logs it with "start", // "end", and "duration" fields. Thread-safe via the Logger’s mutex. @@ -586,8 +615,8 @@ func Dbg(any ...interface{}) { // Example: // // ll.Dump([]byte{0x41, 0x42}) // Outputs hex/ASCII dump -func Dump(any interface{}) { - defaultLogger.Dump(any) +func Dump(values ...interface{}) { + defaultLogger.Dump(values...) } // Enabled returns whether the default logger is enabled for logging. @@ -667,3 +696,12 @@ func Inspect(values ...interface{}) { o := NewInspector(defaultLogger) o.Log(2, values...) } + +func Apply(opts ...Option) *Logger { + return defaultLogger.Apply(opts...) + +} + +func Toggle(v bool) *Logger { + return defaultLogger.Toggle(v) +} diff --git a/vendor/github.com/olekukonko/ll/inspector.go b/vendor/github.com/olekukonko/ll/inspector.go index fb6d6901..b816ac5c 100644 --- a/vendor/github.com/olekukonko/ll/inspector.go +++ b/vendor/github.com/olekukonko/ll/inspector.go @@ -31,7 +31,7 @@ func NewInspector(logger *Logger) *Inspector { // Example usage within a Logger method: // // o := NewInspector(l) -// o.Log(2, someStruct) // Logs JSON representation with caller info +// o.Log(2, someStruct) func (o *Inspector) Log(skip int, values ...interface{}) { // Skip if logger is suspended or Info level is disabled if o.logger.suspend.Load() || !o.logger.shouldLog(lx.LevelInfo) { @@ -74,13 +74,13 @@ func (o *Inspector) Log(skip int, values ...interface{}) { } if err != nil { - o.logger.log(lx.LevelError, lx.ClassText, fmt.Sprintf("Inspector: JSON encoding error: %v", err), nil, false) + o.logger.log(lx.LevelError, lx.ClassInspect, fmt.Sprintf("Inspector: JSON encoding error: %v", err), nil, false) continue } // Construct log message with file, line, and JSON data - msg := fmt.Sprintf("[%s:%d] INSPECT: %s", shortFile, line, string(jsonData)) - o.logger.log(lx.LevelInfo, lx.ClassText, msg, nil, false) + msg := fmt.Sprintf("[%s:%d] %s", shortFile, line, string(jsonData)) + o.logger.log(lx.LevelInfo, lx.ClassInspect, msg, nil, false) } } diff --git a/vendor/github.com/olekukonko/ll/lh/buffered.go b/vendor/github.com/olekukonko/ll/lh/buffered.go index 0fc8c14d..9c388b04 100644 --- a/vendor/github.com/olekukonko/ll/lh/buffered.go +++ b/vendor/github.com/olekukonko/ll/lh/buffered.go @@ -17,6 +17,7 @@ type Buffering struct { FlushInterval time.Duration // Maximum time between flushes (default: 10s) MaxBuffer int // Maximum buffer size before applying backpressure (default: 1000) OnOverflow func(int) // Called when buffer reaches MaxBuffer (default: logs warning) + ErrorOutput io.Writer // Destination for internal errors like flush failures (default: os.Stderr) } // BufferingOpt configures Buffered handler. @@ -66,6 +67,18 @@ func WithOverflowHandler(fn func(int)) BufferingOpt { } } +// WithErrorOutput sets the destination for internal errors (e.g., downstream handler failures). +// Defaults to os.Stderr if not set. +// Example: +// +// // Redirect internal errors to a file or discard them +// handler := NewBuffered(textHandler, WithErrorOutput(os.Stdout)) +func WithErrorOutput(w io.Writer) BufferingOpt { + return func(c *Buffering) { + c.ErrorOutput = w + } +} + // Buffered wraps any Handler to provide buffering capabilities. // It buffers log entries in a channel and flushes them based on batch size, time interval, or explicit flush. // The generic type H ensures compatibility with any lx.Handler implementation. @@ -93,7 +106,8 @@ func NewBuffered[H lx.Handler](handler H, opts ...BufferingOpt) *Buffered[H] { BatchSize: 100, // Default: flush every 100 entries FlushInterval: 10 * time.Second, // Default: flush every 10 seconds MaxBuffer: 1000, // Default: max 1000 entries in buffer - OnOverflow: func(count int) { // Default: log overflow to io.Discard + ErrorOutput: os.Stderr, // Default: report errors to stderr + OnOverflow: func(count int) { // Default: log overflow to io.Discard (silent by default for overflow) fmt.Fprintf(io.Discard, "log buffer overflow: %d entries\n", count) }, } @@ -113,6 +127,9 @@ func NewBuffered[H lx.Handler](handler H, opts ...BufferingOpt) *Buffered[H] { if config.FlushInterval <= 0 { config.FlushInterval = 10 * time.Second // Minimum flush interval is 10s } + if config.ErrorOutput == nil { + config.ErrorOutput = os.Stderr + } // Initialize Buffered handler b := &Buffered[H]{ @@ -173,18 +190,25 @@ func (b *Buffered[H]) Flush() { // Close flushes any remaining entries and stops the worker. // It ensures shutdown is performed only once and waits for the worker to finish. +// If the underlying handler implements a Close() error method, it will be called to release resources. // Thread-safe via sync.Once and WaitGroup. -// Returns nil as it does not produce errors. +// Returns any error from the underlying handler's Close, or nil. // Example: // // buffered.Close() // Flushes entries and stops worker func (b *Buffered[H]) Close() error { + var closeErr error b.shutdownOnce.Do(func() { close(b.shutdown) // Signal worker to shut down b.wg.Wait() // Wait for worker to finish runtime.SetFinalizer(b, nil) // Remove finalizer + + // Check if underlying handler has a Close method and call it + if closer, ok := any(b.handler).(interface{ Close() error }); ok { + closeErr = closer.Close() + } }) - return nil + return closeErr } // Final ensures remaining entries are flushed during garbage collection. @@ -246,7 +270,7 @@ func (b *Buffered[H]) worker() { } // flushBatch processes a batch of entries through the wrapped handler. -// It writes each entry to the underlying handler, logging any errors to stderr. +// It writes each entry to the underlying handler, logging any errors to the configured ErrorOutput. // Example (internal usage): // // b.flushBatch([]*lx.Entry{entry1, entry2}) @@ -254,14 +278,16 @@ func (b *Buffered[H]) flushBatch(batch []*lx.Entry) { for _, entry := range batch { // Process each entry through the handler if err := b.handler.Handle(entry); err != nil { - fmt.Fprintf(os.Stderr, "log flush error: %v\n", err) // Log errors to stderr + if b.config.ErrorOutput != nil { + fmt.Fprintf(b.config.ErrorOutput, "log flush error: %v\n", err) + } } } } // drainRemaining processes any remaining entries in the channel. // It flushes all entries from the entries channel to the underlying handler, -// logging any errors to stderr. Used during flush or shutdown. +// logging any errors to the configured ErrorOutput. Used during flush or shutdown. // Example (internal usage): // // b.drainRemaining() // Flushes all pending entries @@ -270,7 +296,9 @@ func (b *Buffered[H]) drainRemaining() { select { case entry := <-b.entries: // Process next entry if err := b.handler.Handle(entry); err != nil { - fmt.Fprintf(os.Stderr, "log drain error: %v\n", err) // Log errors to stderr + if b.config.ErrorOutput != nil { + fmt.Fprintf(b.config.ErrorOutput, "log drain error: %v\n", err) + } } default: // Exit when channel is empty return diff --git a/vendor/github.com/olekukonko/ll/lh/colorized.go b/vendor/github.com/olekukonko/ll/lh/colorized.go index e343ff38..05285002 100644 --- a/vendor/github.com/olekukonko/ll/lh/colorized.go +++ b/vendor/github.com/olekukonko/ll/lh/colorized.go @@ -1,10 +1,12 @@ package lh import ( + "bytes" "fmt" "io" "os" - "sort" + "runtime" + "strconv" "strings" "sync" "time" @@ -12,15 +14,23 @@ import ( "github.com/olekukonko/ll/lx" ) +// ColorIntensity defines the intensity level for ANSI colors +type ColorIntensity int + +const ( + IntensityNormal ColorIntensity = iota + IntensityBright + IntensityPastel + IntensityVibrant +) + // Palette defines ANSI color codes for various log components. -// It specifies colors for headers, goroutines, functions, paths, stack traces, and log levels, -// used by ColorizedHandler to format log output with color. type Palette struct { Header string // Color for stack trace header and dump separators Goroutine string // Color for goroutine lines in stack traces Func string // Color for function names in stack traces Path string // Color for file paths in stack traces - FileLine string // Color for file line numbers (not used in provided code) + FileLine string // Color for file line numbers Reset string // Reset code to clear color formatting Pos string // Color for position in hex dumps Hex string // Color for hex values in dumps @@ -29,134 +39,352 @@ type Palette struct { Info string // Color for Info level messages Warn string // Color for Warn level messages Error string // Color for Error level messages + Fatal string // Color for Fatal level messages Title string // Color for dump titles (BEGIN/END separators) + + // Field type colors + Key string // Color for field keys + Number string // Color for numbers + String string // Color for strings + Bool string // Color for booleans + Time string // Color for timestamps/durations + Nil string // Color for nil values + Default string // Default color for unknown types + + // JSON and Inspect specific colors + JSONKey string // Color for JSON keys + JSONString string // Color for JSON string values + JSONNumber string // Color for JSON number values + JSONBool string // Color for JSON boolean values + JSONNull string // Color for JSON null values + JSONBrace string // Color for JSON braces and brackets + InspectKey string // Color for inspect keys + InspectValue string // Color for inspect values + InspectMeta string // Color for inspect metadata (annotations) } // darkPalette defines colors optimized for dark terminal backgrounds. -// It uses bright, contrasting colors for readability on dark backgrounds. var darkPalette = Palette{ - Header: "\033[1;31m", // Bold red for headers - Goroutine: "\033[1;36m", // Bold cyan for goroutines - Func: "\033[97m", // Bright white for functions - Path: "\033[38;5;245m", // Light gray for paths - FileLine: "\033[38;5;111m", // Muted light blue (unused) - Reset: "\033[0m", // Reset color formatting + Header: "\033[1;38;5;203m", // Brighter red + Goroutine: "\033[1;38;5;51m", // Bright cyan + Func: "\033[1;97m", // Bright white + Path: "\033[38;5;110m", // Brighter gray-blue + FileLine: "\033[38;5;117m", // Bright blue + Reset: "\033[0m", + Title: "\033[38;5;245m", + Pos: "\033[38;5;117m", + Hex: "\033[38;5;156m", + Ascii: "\033[38;5;224m", + Debug: "\033[36m", + Info: "\033[32m", + Warn: "\033[33m", + Error: "\033[31m", + Fatal: "\033[1;31m", - Title: "\033[38;5;245m", // Light gray for dump titles - Pos: "\033[38;5;117m", // Light blue for dump positions - Hex: "\033[38;5;156m", // Light green for hex values - Ascii: "\033[38;5;224m", // Light pink for ASCII values + // Field type colors - made brighter for dark backgrounds + Key: "\033[38;5;117m", // Brighter blue + Number: "\033[38;5;141m", // Brighter purple + String: "\033[38;5;223m", // Brighter yellow/orange + Bool: "\033[38;5;85m", // Brighter green + Time: "\033[38;5;110m", // Brighter cyan-blue + Nil: "\033[38;5;243m", // Slightly brighter gray + Default: "\033[38;5;250m", // Brighter gray - Debug: "\033[36m", // Cyan for Debug level - Info: "\033[32m", // Green for Info level - Warn: "\033[33m", // Yellow for Warn level - Error: "\033[31m", // Red for Error level + // JSON and Inspect colors + JSONKey: "\033[38;5;117m", + JSONString: "\033[38;5;223m", + JSONNumber: "\033[38;5;141m", + JSONBool: "\033[38;5;85m", + JSONNull: "\033[38;5;243m", + JSONBrace: "\033[38;5;245m", + InspectKey: "\033[38;5;117m", + InspectValue: "\033[38;5;223m", + InspectMeta: "\033[38;5;243m", } // lightPalette defines colors optimized for light terminal backgrounds. -// It uses darker colors for better contrast on light backgrounds. var lightPalette = Palette{ - Header: "\033[1;31m", // Same red for headers - Goroutine: "\033[34m", // Blue (darker for light bg) - Func: "\033[30m", // Black text for functions - Path: "\033[90m", // Dark gray for paths - FileLine: "\033[94m", // Blue for file lines (unused) - Reset: "\033[0m", // Reset color formatting + Header: "\033[1;31m", + Goroutine: "\033[34m", + Func: "\033[30m", + Path: "\033[90m", + FileLine: "\033[94m", + Reset: "\033[0m", + Title: "\033[38;5;245m", + Pos: "\033[38;5;117m", + Hex: "\033[38;5;156m", + Ascii: "\033[38;5;224m", + Debug: "\033[36m", + Info: "\033[32m", + Warn: "\033[33m", + Error: "\033[31m", + Fatal: "\033[1;31m", + + Key: "\033[34m", + Number: "\033[35m", + String: "\033[38;5;94m", + Bool: "\033[32m", + Time: "\033[38;5;24m", + Nil: "\033[38;5;240m", + Default: "\033[30m", + + JSONKey: "\033[1;34m", + JSONString: "\033[1;33m", + JSONNumber: "\033[1;35m", + JSONBool: "\033[1;32m", + JSONNull: "\033[1;37m", + JSONBrace: "\033[1;37m", + InspectKey: "\033[1;34m", + InspectValue: "\033[1;33m", + InspectMeta: "\033[1;37m", +} + +// brightPalette defines vibrant, high-contrast colors +var brightPalette = Palette{ + Header: "\033[1;91m", + Goroutine: "\033[1;96m", + Func: "\033[1;97m", + Path: "\033[38;5;250m", + FileLine: "\033[38;5;117m", + Reset: "\033[0m", + Title: "\033[1;37m", + Pos: "\033[1;33m", + Hex: "\033[1;32m", + Ascii: "\033[1;35m", + Debug: "\033[1;36m", + Info: "\033[1;32m", + Warn: "\033[1;33m", + Error: "\033[1;31m", + Fatal: "\033[1;91m", + + Key: "\033[1;34m", + Number: "\033[1;35m", + String: "\033[1;33m", + Bool: "\033[1;32m", + Time: "\033[1;36m", + Nil: "\033[1;37m", + Default: "\033[1;37m", + + JSONKey: "\033[1;34m", + JSONString: "\033[1;33m", + JSONNumber: "\033[1;35m", + JSONBool: "\033[1;32m", + JSONNull: "\033[1;37m", + JSONBrace: "\033[1;37m", + InspectKey: "\033[1;34m", + InspectValue: "\033[1;33m", + InspectMeta: "\033[1;37m", +} + +// pastelPalette defines soft, pastel colors +var pastelPalette = Palette{ + Header: "\033[38;5;211m", + Goroutine: "\033[38;5;153m", + Func: "\033[38;5;255m", + Path: "\033[38;5;248m", + FileLine: "\033[38;5;111m", + Reset: "\033[0m", + Title: "\033[38;5;248m", + Pos: "\033[38;5;153m", + Hex: "\033[38;5;158m", + Ascii: "\033[38;5;218m", + Debug: "\033[38;5;122m", + Info: "\033[38;5;120m", + Warn: "\033[38;5;221m", + Error: "\033[38;5;211m", + Fatal: "\033[38;5;204m", - Title: "\033[38;5;245m", // Light gray for dump titles - Pos: "\033[38;5;117m", // Light blue for dump positions - Hex: "\033[38;5;156m", // Light green for hex values - Ascii: "\033[38;5;224m", // Light pink for ASCII values + Key: "\033[38;5;153m", + Number: "\033[38;5;183m", + String: "\033[38;5;223m", + Bool: "\033[38;5;120m", + Time: "\033[38;5;117m", + Nil: "\033[38;5;247m", + Default: "\033[38;5;250m", - Debug: "\033[36m", // Cyan for Debug level - Info: "\033[32m", // Green for Info level - Warn: "\033[33m", // Yellow for Warn level - Error: "\033[31m", // Red for Error level + JSONKey: "\033[38;5;153m", + JSONString: "\033[38;5;223m", + JSONNumber: "\033[38;5;183m", + JSONBool: "\033[38;5;120m", + JSONNull: "\033[38;5;247m", + JSONBrace: "\033[38;5;247m", + InspectKey: "\033[38;5;153m", + InspectValue: "\033[38;5;223m", + InspectMeta: "\033[38;5;247m", +} + +// vibrantPalette defines highly saturated, eye-catching colors +var vibrantPalette = Palette{ + Header: "\033[38;5;196m", + Goroutine: "\033[38;5;51m", + Func: "\033[38;5;15m", + Path: "\033[38;5;244m", + FileLine: "\033[38;5;75m", + Reset: "\033[0m", + Title: "\033[38;5;244m", + Pos: "\033[38;5;51m", + Hex: "\033[38;5;46m", + Ascii: "\033[38;5;201m", + Debug: "\033[38;5;51m", + Info: "\033[38;5;46m", + Warn: "\033[38;5;226m", + Error: "\033[38;5;196m", + Fatal: "\033[1;38;5;196m", + + Key: "\033[38;5;33m", + Number: "\033[38;5;129m", + String: "\033[38;5;214m", + Bool: "\033[38;5;46m", + Time: "\033[38;5;75m", + Nil: "\033[38;5;242m", + Default: "\033[38;5;15m", + + JSONKey: "\033[38;5;33m", + JSONString: "\033[38;5;214m", + JSONNumber: "\033[38;5;129m", + JSONBool: "\033[38;5;46m", + JSONNull: "\033[38;5;242m", + JSONBrace: "\033[38;5;242m", + InspectKey: "\033[38;5;33m", + InspectValue: "\033[38;5;214m", + InspectMeta: "\033[38;5;242m", +} + +// noColorPalette defines a palette with empty strings for environments without color support +var noColorPalette = Palette{ + Header: "", Goroutine: "", Func: "", Path: "", FileLine: "", Reset: "", + Title: "", Pos: "", Hex: "", Ascii: "", Debug: "", Info: "", Warn: "", Error: "", Fatal: "", + Key: "", Number: "", String: "", Bool: "", Time: "", Nil: "", Default: "", + JSONKey: "", JSONString: "", JSONNumber: "", JSONBool: "", JSONNull: "", JSONBrace: "", + InspectKey: "", InspectValue: "", InspectMeta: "", +} + +// colorBufPool is a pool of bytes.Buffer instances to reduce allocations +var colorBufPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, } // ColorizedHandler is a handler that outputs log entries with ANSI color codes. -// It formats log entries with colored namespace, level, message, fields, and stack traces, -// writing the result to the provided writer. -// Thread-safe if the underlying writer is thread-safe. type ColorizedHandler struct { - w io.Writer // Destination for colored log output - palette Palette // Color scheme for formatting - showTime bool // Whether to display timestamps - timeFormat string // Format for timestamps (defaults to time.RFC3339) - mu sync.Mutex + writer io.Writer + palette Palette + showTime bool + timeFormat string + mu sync.Mutex + noColor bool // Whether to disable colors entirely + intensity ColorIntensity // Color intensity level + colorFields bool // Whether to colorize fields (default: true) } // ColorOption defines a configuration function for ColorizedHandler. -// It allows customization of the handler, such as setting the color palette. type ColorOption func(*ColorizedHandler) // WithColorPallet sets the color palette for the ColorizedHandler. -// It allows specifying a custom Palette for dark or light terminal backgrounds. -// Example: -// -// handler := NewColorizedHandler(os.Stdout, WithColorPallet(lightPalette)) func WithColorPallet(pallet Palette) ColorOption { return func(c *ColorizedHandler) { c.palette = pallet } } -// NewColorizedHandler creates a new ColorizedHandler writing to the specified writer. -// It initializes the handler with a detected or specified color palette and applies -// optional configuration functions. +// WithColorNone disables all color output. +func WithColorNone() ColorOption { + return func(c *ColorizedHandler) { + c.noColor = true + c.colorFields = false // Also disable field coloring + } +} + +// WithColorField enables or disables field coloring specifically. +// This is useful for performance optimization or when field colors are too much. // Example: // -// handler := NewColorizedHandler(os.Stdout) -// logger := ll.New("app").Enable().Handler(handler) -// logger.Info("Test") // Output: [app] : Test +// handler := NewColorizedHandler(os.Stdout, WithColorField(false)) // Disable field coloring only +func WithColorField(enable bool) ColorOption { + return func(c *ColorizedHandler) { + c.colorFields = enable + } +} + +// WithColorShowTime enables or disables the display of timestamps. +func WithColorShowTime(show bool) ColorOption { + return func(c *ColorizedHandler) { + c.showTime = show + } +} + +// WithColorIntensity sets the color intensity for the ColorizedHandler. +func WithColorIntensity(intensity ColorIntensity) ColorOption { + return func(c *ColorizedHandler) { + c.intensity = intensity + } +} + +// WithColorTheme configures the ColorizedHandler to use a specific color theme based on the provided theme name. +func WithColorTheme(theme string) ColorOption { + return func(c *ColorizedHandler) { + switch strings.ToLower(theme) { + case "light": + c.palette = lightPalette + case "dark": + c.palette = darkPalette + case "bright": + c.palette = brightPalette + case "pastel": + c.palette = pastelPalette + case "vibrant": + c.palette = vibrantPalette + } + } +} + +// NewColorizedHandler creates a new ColorizedHandler writing to the specified writer. func NewColorizedHandler(w io.Writer, opts ...ColorOption) *ColorizedHandler { - // Initialize with writer - c := &ColorizedHandler{w: w, - showTime: false, - timeFormat: time.RFC3339, + c := &ColorizedHandler{ + writer: w, + showTime: false, + timeFormat: time.RFC3339, + noColor: false, + intensity: IntensityNormal, + colorFields: true, // Default: enable field coloring } - // Apply configuration options for _, opt := range opts { opt(c) } - // Detect palette if not set + c.palette = c.detectPalette() return c } +func (h *ColorizedHandler) Output(w io.Writer) { + h.mu.Lock() + defer h.mu.Unlock() + h.writer = w +} + // Handle processes a log entry and writes it with ANSI color codes. -// It delegates to specialized methods based on the entry's class (Dump, Raw, or regular). -// Returns an error if writing to the underlying writer fails. -// Thread-safe if the writer is thread-safe. -// Example: -// -// handler.Handle(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Writes colored output func (h *ColorizedHandler) Handle(e *lx.Entry) error { - h.mu.Lock() defer h.mu.Unlock() switch e.Class { case lx.ClassDump: - // Handle hex dump entries return h.handleDumpOutput(e) + case lx.ClassJSON: + return h.handleJSONOutput(e) + case lx.ClassInspect: + return h.handleInspectOutput(e) case lx.ClassRaw: - // Write raw entries directly - _, err := h.w.Write([]byte(e.Message)) + _, err := h.writer.Write([]byte(e.Message)) return err default: - // Handle standard log entries return h.handleRegularOutput(e) } } -// Timestamped enables or disables timestamp display and optionally sets a custom time format. -// If format is empty, defaults to RFC3339. -// Example: -// -// handler := NewColorizedHandler(os.Stdout).Timestamped(true, time.StampMilli) -// // Output: Jan 02 15:04:05.000 [app] INFO: Test +// Timestamped enables or disables timestamp display. func (h *ColorizedHandler) Timestamped(enable bool, format ...string) { h.showTime = enable if len(format) > 0 && format[0] != "" { @@ -165,53 +393,291 @@ func (h *ColorizedHandler) Timestamped(enable bool, format ...string) { } // handleRegularOutput handles normal log entries. -// It formats the entry with colored namespace, level, message, fields, and stack trace (if present), -// writing the result to the handler's writer. -// Returns an error if writing fails. -// Example (internal usage): -// -// h.handleRegularOutput(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Writes colored output func (h *ColorizedHandler) handleRegularOutput(e *lx.Entry) error { - var builder strings.Builder // Buffer for building formatted output + buf := colorBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer colorBufPool.Put(buf) - // Add timestamp if enabled if h.showTime { - builder.WriteString(e.Timestamp.Format(h.timeFormat)) - builder.WriteString(lx.Space) + buf.WriteString(e.Timestamp.Format(h.timeFormat)) + buf.WriteString(lx.Space) } - // Format namespace with colors - h.formatNamespace(&builder, e) - - // Format level with color based on severity - h.formatLevel(&builder, e) + h.formatNamespace(buf, e) + h.formatLevel(buf, e) + buf.WriteString(e.Message) + h.formatFields(buf, e) - // Add message and fields - builder.WriteString(e.Message) - h.formatFields(&builder, e) - - // fmt.Println("------------>", len(e.Stack)) - // Format stack trace if present if len(e.Stack) > 0 { - h.formatStack(&builder, e.Stack) + h.formatStack(buf, e.Stack) } - // Append newline for non-None levels if e.Level != lx.LevelNone { - builder.WriteString(lx.Newline) + buf.WriteString(lx.Newline) + } + + _, err := h.writer.Write(buf.Bytes()) + return err +} + +// handleJSONOutput handles JSON log entries. +func (h *ColorizedHandler) handleJSONOutput(e *lx.Entry) error { + buf := colorBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer colorBufPool.Put(buf) + + if h.showTime { + buf.WriteString(e.Timestamp.Format(h.timeFormat)) + buf.WriteString(lx.Newline) + } + + if e.Namespace != "" { + h.formatNamespace(buf, e) + h.formatLevel(buf, e) + } + + h.colorizeJSON(buf, e.Message) + buf.WriteString(lx.Newline) + + _, err := h.writer.Write(buf.Bytes()) + return err +} + +// handleInspectOutput handles inspect log entries. +func (h *ColorizedHandler) handleInspectOutput(e *lx.Entry) error { + buf := colorBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer colorBufPool.Put(buf) + + if h.showTime { + buf.WriteString(e.Timestamp.Format(h.timeFormat)) + buf.WriteString(lx.Space) } - // Write formatted output to writer - _, err := h.w.Write([]byte(builder.String())) + h.formatNamespace(buf, e) + h.formatLevel(buf, e) + h.colorizeInspect(buf, e.Message) + buf.WriteString(lx.Newline) + + _, err := h.writer.Write(buf.Bytes()) return err } +// colorizeJSON applies syntax highlighting to JSON strings without changing formatting +func (h *ColorizedHandler) colorizeJSON(b *bytes.Buffer, jsonStr string) { + inString := false + escapeNext := false + + for i := 0; i < len(jsonStr); i++ { + ch := jsonStr[i] + + if escapeNext { + b.WriteByte(ch) + escapeNext = false + continue + } + + switch ch { + case '\\': + escapeNext = true + if inString { + b.WriteString(h.palette.JSONString) + } + b.WriteByte(ch) + + case '"': + if inString { + // End of string + b.WriteString(h.palette.JSONString) + b.WriteByte(ch) + b.WriteString(h.palette.Reset) + inString = false + } else { + // Start of string + inString = true + b.WriteString(h.palette.JSONString) + b.WriteByte(ch) + } + + case ':': + if !inString { + b.WriteString(h.palette.JSONBrace) + b.WriteByte(ch) + b.WriteString(h.palette.Reset) + } else { + b.WriteByte(ch) + } + + case '{', '}', '[', ']', ',': + if !inString { + b.WriteString(h.palette.JSONBrace) + b.WriteByte(ch) + b.WriteString(h.palette.Reset) + } else { + b.WriteByte(ch) + } + + default: + if !inString { + // Check for numbers, booleans, null + remaining := jsonStr[i:] + + // Check for null + if len(remaining) >= 4 && strings.HasPrefix(remaining, "null") { + b.WriteString(h.palette.JSONNull) + b.WriteString("null") + b.WriteString(h.palette.Reset) + i += 3 // Skip "null" + } else if len(remaining) >= 4 && strings.HasPrefix(remaining, "true") { + b.WriteString(h.palette.JSONBool) + b.WriteString("true") + b.WriteString(h.palette.Reset) + i += 3 // Skip "true" + } else if len(remaining) >= 5 && strings.HasPrefix(remaining, "false") { + b.WriteString(h.palette.JSONBool) + b.WriteString("false") + b.WriteString(h.palette.Reset) + i += 4 // Skip "false" + } else if (ch >= '0' && ch <= '9') || ch == '-' || ch == '.' { + b.WriteString(h.palette.JSONNumber) + b.WriteByte(ch) + // Continue writing digits + for j := i + 1; j < len(jsonStr); j++ { + nextCh := jsonStr[j] + if (nextCh >= '0' && nextCh <= '9') || nextCh == '.' || nextCh == 'e' || nextCh == 'E' || nextCh == '+' || nextCh == '-' { + b.WriteByte(nextCh) + i = j + } else { + break + } + } + b.WriteString(h.palette.Reset) + } else if ch == ' ' || ch == '\n' || ch == '\t' || ch == '\r' { + // Preserve whitespace exactly as is + b.WriteByte(ch) + } else { + // Unexpected character outside string - preserve it + b.WriteByte(ch) + } + } else { + // Inside string + b.WriteByte(ch) + } + } + } +} + +// colorizeInspect applies syntax highlighting to inspect output +func (h *ColorizedHandler) colorizeInspect(b *bytes.Buffer, inspectStr string) { + lines := strings.Split(inspectStr, "\n") + + for lineIdx, line := range lines { + if lineIdx > 0 { + b.WriteString("\n") + } + + trimmed := strings.TrimSpace(line) + if trimmed == "" { + b.WriteString(line) + continue + } + + // For inspect output, we'll do simple line-based coloring + // This preserves the original formatting + inString := false + escapeNext := false + + for i := 0; i < len(line); i++ { + ch := line[i] + + if escapeNext { + b.WriteByte(ch) + escapeNext = false + continue + } + + if ch == '\\' { + escapeNext = true + b.WriteByte(ch) + continue + } + + if ch == '"' { + inString = !inString + if inString { + // Check if this is a metadata key + if i+1 < len(line) && line[i+1] == '(' { + b.WriteString(h.palette.InspectMeta) + } else if i+2 < len(line) && line[i+1] == '*' && line[i+2] == '(' { + b.WriteString(h.palette.InspectMeta) + } else { + b.WriteString(h.palette.InspectKey) + } + } + b.WriteByte(ch) + if !inString { + b.WriteString(h.palette.Reset) + } + continue + } + + if inString { + // Inside a string key or value + b.WriteByte(ch) + } else { + // Outside strings + if ch == ':' { + b.WriteString(h.palette.JSONBrace) + b.WriteByte(ch) + b.WriteString(h.palette.Reset) + } else if ch == '{' || ch == '}' || ch == '[' || ch == ']' || ch == ',' { + b.WriteString(h.palette.JSONBrace) + b.WriteByte(ch) + b.WriteString(h.palette.Reset) + } else { + // Check for numbers, booleans, null outside strings + remaining := line[i:] + + if len(remaining) >= 4 && strings.HasPrefix(remaining, "null") { + b.WriteString(h.palette.JSONNull) + b.WriteString("null") + b.WriteString(h.palette.Reset) + i += 3 + } else if len(remaining) >= 4 && strings.HasPrefix(remaining, "true") { + b.WriteString(h.palette.JSONBool) + b.WriteString("true") + b.WriteString(h.palette.Reset) + i += 3 + } else if len(remaining) >= 5 && strings.HasPrefix(remaining, "false") { + b.WriteString(h.palette.JSONBool) + b.WriteString("false") + b.WriteString(h.palette.Reset) + i += 4 + } else if (ch >= '0' && ch <= '9') || ch == '-' { + b.WriteString(h.palette.InspectValue) + b.WriteByte(ch) + // Continue writing digits + for j := i + 1; j < len(line); j++ { + nextCh := line[j] + if (nextCh >= '0' && nextCh <= '9') || nextCh == '.' { + b.WriteByte(nextCh) + i = j + } else { + break + } + } + b.WriteString(h.palette.Reset) + } else { + b.WriteByte(ch) + } + } + } + } + } +} + // formatNamespace formats the namespace with ANSI color codes. -// It supports FlatPath ([parent/child]) and NestedPath ([parent]→[child]) styles. -// Example (internal usage): -// -// h.formatNamespace(&builder, &lx.Entry{Namespace: "parent/child", Style: lx.FlatPath}) // Writes "[parent/child]: " -func (h *ColorizedHandler) formatNamespace(b *strings.Builder, e *lx.Entry) { +func (h *ColorizedHandler) formatNamespace(b *bytes.Buffer, e *lx.Entry) { if e.Namespace == "" { return } @@ -219,7 +685,6 @@ func (h *ColorizedHandler) formatNamespace(b *strings.Builder, e *lx.Entry) { b.WriteString(lx.LeftBracket) switch e.Style { case lx.NestedPath: - // Split namespace and format as [parent]→[child] parts := strings.Split(e.Namespace, lx.Slash) for i, part := range parts { b.WriteString(part) @@ -229,8 +694,7 @@ func (h *ColorizedHandler) formatNamespace(b *strings.Builder, e *lx.Entry) { b.WriteString(lx.LeftBracket) } } - default: // FlatPath - // Format as [parent/child] + default: b.WriteString(e.Namespace) b.WriteString(lx.RightBracket) } @@ -239,64 +703,151 @@ func (h *ColorizedHandler) formatNamespace(b *strings.Builder, e *lx.Entry) { } // formatLevel formats the log level with ANSI color codes. -// It applies a color based on the level (Debug, Info, Warn, Error) and resets afterward. -// Example (internal usage): -// -// h.formatLevel(&builder, &lx.Entry{Level: lx.LevelInfo}) // Writes "INFO: " -func (h *ColorizedHandler) formatLevel(b *strings.Builder, e *lx.Entry) { - // Map levels to colors +func (h *ColorizedHandler) formatLevel(b *bytes.Buffer, e *lx.Entry) { color := map[lx.LevelType]string{ - lx.LevelDebug: h.palette.Debug, // Cyan - lx.LevelInfo: h.palette.Info, // Green - lx.LevelWarn: h.palette.Warn, // Yellow - lx.LevelError: h.palette.Error, // Red + lx.LevelDebug: h.palette.Debug, + lx.LevelInfo: h.palette.Info, + lx.LevelWarn: h.palette.Warn, + lx.LevelError: h.palette.Error, + lx.LevelFatal: h.palette.Fatal, }[e.Level] b.WriteString(color) - b.WriteString(e.Level.String()) + //b.WriteString(rightPad(e.Level.Name(e.Class), 8)) + b.WriteString(e.Level.Name(e.Class)) b.WriteString(h.palette.Reset) + // b.WriteString(lx.Space) b.WriteString(lx.Colon) b.WriteString(lx.Space) } // formatFields formats the log entry's fields in sorted order. -// It writes fields as [key=value key=value], with no additional coloring. -// Example (internal usage): -// -// h.formatFields(&builder, &lx.Entry{Fields: map[string]interface{}{"key": "value"}}) // Writes " [key=value]" -func (h *ColorizedHandler) formatFields(b *strings.Builder, e *lx.Entry) { +func (h *ColorizedHandler) formatFields(b *bytes.Buffer, e *lx.Entry) { if len(e.Fields) == 0 { return } - // Collect and sort field keys - var keys []string - for k := range e.Fields { - keys = append(keys, k) - } - sort.Strings(keys) - b.WriteString(lx.Space) b.WriteString(lx.LeftBracket) - // Format fields as key=value - for i, k := range keys { + + for i, pair := range e.Fields { if i > 0 { b.WriteString(lx.Space) } - b.WriteString(k) - b.WriteString("=") - b.WriteString(fmt.Sprint(e.Fields[k])) + + if h.colorFields { + // Color the key + b.WriteString(h.palette.Key) + b.WriteString(pair.Key) + b.WriteString(h.palette.Reset) + b.WriteString("=") + + // Format value with type-based coloring + h.formatFieldValue(b, pair.Value) + } else { + // No field coloring - just write plain text + b.WriteString(pair.Key) + b.WriteString("=") + fmt.Fprint(b, pair.Value) + } } + b.WriteString(lx.RightBracket) } +// formatFieldValue formats a field value with type-based ANSI color codes. +func (h *ColorizedHandler) formatFieldValue(b *bytes.Buffer, value interface{}) { + // If field coloring is disabled, just write the value + if !h.colorFields { + fmt.Fprint(b, value) + return + } + + switch v := value.(type) { + case time.Time: + b.WriteString(h.palette.Time) + b.WriteString(v.Format("2006-01-02 15:04:05")) + b.WriteString(h.palette.Reset) + + case time.Duration: + b.WriteString(h.palette.Time) + h.formatDuration(b, v) + b.WriteString(h.palette.Reset) + + case error: + b.WriteString(h.palette.Error) + b.WriteString(`"`) + b.WriteString(v.Error()) + b.WriteString(`"`) + b.WriteString(h.palette.Reset) + + case int, int8, int16, int32, int64: + b.WriteString(h.palette.Number) + fmt.Fprint(b, v) + b.WriteString(h.palette.Reset) + + case uint, uint8, uint16, uint32, uint64: + b.WriteString(h.palette.Number) + fmt.Fprint(b, v) + b.WriteString(h.palette.Reset) + + case float32, float64: + b.WriteString(h.palette.Number) + switch f := v.(type) { + case float32: + fmt.Fprintf(b, "%.6g", f) + case float64: + fmt.Fprintf(b, "%.6g", f) + } + b.WriteString(h.palette.Reset) + + case string: + b.WriteString(h.palette.String) + b.WriteString(`"`) + b.WriteString(v) + b.WriteString(`"`) + b.WriteString(h.palette.Reset) + + case bool: + b.WriteString(h.palette.Bool) + fmt.Fprint(b, v) + b.WriteString(h.palette.Reset) + + case nil: + b.WriteString(h.palette.Nil) + b.WriteString("nil") + b.WriteString(h.palette.Reset) + + default: + b.WriteString(h.palette.Default) + fmt.Fprint(b, v) + b.WriteString(h.palette.Reset) + } +} + +// formatDuration formats a duration in a human-readable way +func (h *ColorizedHandler) formatDuration(b *bytes.Buffer, d time.Duration) { + if d < time.Microsecond { + b.WriteString(d.String()) + } else if d < time.Millisecond { + fmt.Fprintf(b, "%.3fµs", float64(d)/float64(time.Microsecond)) + } else if d < time.Second { + fmt.Fprintf(b, "%.3fms", float64(d)/float64(time.Millisecond)) + } else if d < time.Minute { + fmt.Fprintf(b, "%.3fs", float64(d)/float64(time.Second)) + } else if d < time.Hour { + minutes := d / time.Minute + seconds := (d % time.Minute) / time.Second + fmt.Fprintf(b, "%dm%.3fs", minutes, float64(seconds)/float64(time.Second)) + } else { + hours := d / time.Hour + minutes := (d % time.Hour) / time.Minute + fmt.Fprintf(b, "%dh%dm", hours, minutes) + } +} + // formatStack formats a stack trace with ANSI color codes. -// It structures the stack trace with colored goroutine, function, and path segments, -// using indentation and separators for readability. -// Example (internal usage): -// -// h.formatStack(&builder, []byte("goroutine 1 [running]:\nmain.main()\n\tmain.go:10")) // Appends colored stack trace -func (h *ColorizedHandler) formatStack(b *strings.Builder, stack []byte) { +func (h *ColorizedHandler) formatStack(b *bytes.Buffer, stack []byte) { b.WriteString("\n") b.WriteString(h.palette.Header) b.WriteString("[stack]") @@ -308,14 +859,12 @@ func (h *ColorizedHandler) formatStack(b *strings.Builder, stack []byte) { return } - // Format goroutine line b.WriteString(" ┌─ ") b.WriteString(h.palette.Goroutine) b.WriteString(lines[0]) b.WriteString(h.palette.Reset) b.WriteString("\n") - // Pair function name and file path lines for i := 1; i < len(lines)-1; i += 2 { funcLine := strings.TrimSpace(lines[i]) pathLine := strings.TrimSpace(lines[i+1]) @@ -330,25 +879,21 @@ func (h *ColorizedHandler) formatStack(b *strings.Builder, stack []byte) { if pathLine != "" { b.WriteString(" │ ") - // Look for last "/" before ".go:" lastSlash := strings.LastIndex(pathLine, "/") goIndex := strings.Index(pathLine, ".go:") if lastSlash >= 0 && goIndex > lastSlash { - // Prefix path prefix := pathLine[:lastSlash+1] - // File and line (e.g., ll.go:698 +0x5c) suffix := pathLine[lastSlash+1:] b.WriteString(h.palette.Path) b.WriteString(prefix) b.WriteString(h.palette.Reset) - b.WriteString(h.palette.Path) // Use mainPath color for suffix + b.WriteString(h.palette.Path) b.WriteString(suffix) b.WriteString(h.palette.Reset) } else { - // Fallback: whole line is gray b.WriteString(h.palette.Path) b.WriteString(pathLine) b.WriteString(h.palette.Reset) @@ -358,7 +903,6 @@ func (h *ColorizedHandler) formatStack(b *strings.Builder, stack []byte) { } } - // Handle any remaining unpaired line if len(lines)%2 == 0 && strings.TrimSpace(lines[len(lines)-1]) != "" { b.WriteString(" │ ") b.WriteString(h.palette.Func) @@ -371,110 +915,149 @@ func (h *ColorizedHandler) formatStack(b *strings.Builder, stack []byte) { } // handleDumpOutput formats hex dump output with ANSI color codes. -// It applies colors to position, hex, ASCII, and title components of the dump, -// wrapping the output with colored BEGIN/END separators. -// Returns an error if writing fails. -// Example (internal usage): -// -// h.handleDumpOutput(&lx.Entry{Class: lx.ClassDump, Message: "pos 00 hex: 61 62 'ab'"}) // Writes colored dump func (h *ColorizedHandler) handleDumpOutput(e *lx.Entry) error { - var builder strings.Builder + buf := colorBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer colorBufPool.Put(buf) - // Add timestamp if enabled if h.showTime { - builder.WriteString(e.Timestamp.Format(h.timeFormat)) - builder.WriteString(lx.Newline) + buf.WriteString(e.Timestamp.Format(h.timeFormat)) + buf.WriteString(lx.Newline) } - // Write colored BEGIN separator - builder.WriteString(h.palette.Title) - builder.WriteString("---- BEGIN DUMP ----") - builder.WriteString(h.palette.Reset) - builder.WriteString("\n") + buf.WriteString(h.palette.Title) + buf.WriteString("---- BEGIN DUMP ----") + buf.WriteString(h.palette.Reset) + buf.WriteString("\n") - // Process each line of the dump lines := strings.Split(e.Message, "\n") length := len(lines) for i, line := range lines { if strings.HasPrefix(line, "pos ") { - // Parse and color position and hex/ASCII parts parts := strings.SplitN(line, "hex:", 2) if len(parts) == 2 { - builder.WriteString(h.palette.Pos) - builder.WriteString(parts[0]) - builder.WriteString(h.palette.Reset) + buf.WriteString(h.palette.Pos) + buf.WriteString(parts[0]) + buf.WriteString(h.palette.Reset) hexAscii := strings.SplitN(parts[1], "'", 2) - builder.WriteString(h.palette.Hex) - builder.WriteString("hex:") - builder.WriteString(hexAscii[0]) - builder.WriteString(h.palette.Reset) + buf.WriteString(h.palette.Hex) + buf.WriteString("hex:") + buf.WriteString(hexAscii[0]) + buf.WriteString(h.palette.Reset) if len(hexAscii) > 1 { - builder.WriteString(h.palette.Ascii) - builder.WriteString("'") - builder.WriteString(hexAscii[1]) - builder.WriteString(h.palette.Reset) + buf.WriteString(h.palette.Ascii) + buf.WriteString("'") + buf.WriteString(hexAscii[1]) + buf.WriteString(h.palette.Reset) } } } else if strings.HasPrefix(line, "Dumping value of type:") { - // Color type dump lines - builder.WriteString(h.palette.Header) - builder.WriteString(line) - builder.WriteString(h.palette.Reset) + buf.WriteString(h.palette.Header) + buf.WriteString(line) + buf.WriteString(h.palette.Reset) } else { - // Write non-dump lines as-is - builder.WriteString(line) + buf.WriteString(line) } - // Don't add newline for the last line if i < length-1 { - builder.WriteString("\n") + buf.WriteString("\n") } } - // Write colored END separator - builder.WriteString(h.palette.Title) - builder.WriteString("---- END DUMP ----") - builder.WriteString(h.palette.Reset) - builder.WriteString("\n") + buf.WriteString(h.palette.Title) + buf.WriteString("---- END DUMP ----") + buf.WriteString(h.palette.Reset) + buf.WriteString("\n\n") - // Write formatted output to writer - _, err := h.w.Write([]byte(builder.String())) + _, err := h.writer.Write(buf.Bytes()) return err } // detectPalette selects a color palette based on terminal environment variables. -// It checks TERM_BACKGROUND, COLORFGBG, and AppleInterfaceStyle to determine -// whether a light or dark palette is appropriate, defaulting to darkPalette. -// Example (internal usage): -// -// palette := h.detectPalette() // Returns darkPalette or lightPalette func (h *ColorizedHandler) detectPalette() Palette { - // Check TERM_BACKGROUND (e.g., iTerm2) - if bg, ok := os.LookupEnv("TERM_BACKGROUND"); ok { - if bg == "light" { - return lightPalette // Use light palette for light background + // If colors are explicitly disabled, return noColorPalette + if h.noColor { + return noColorPalette + } + + // Check NO_COLOR environment variable (standard: https://no-color.org/) + if os.Getenv("NO_COLOR") != "" { + return noColorPalette + } + + term := os.Getenv("TERM") + if term == "dumb" || term == "" { + if runtime.GOOS == "windows" && !h.isWindowsTerminalAnsiSupported() { + return noColorPalette } - return darkPalette // Use dark palette otherwise } - // Check COLORFGBG (traditional xterm) - if fgBg, ok := os.LookupEnv("COLORFGBG"); ok { + // First, try to detect background color + isDarkBackground := true // Default to dark + + // Check for common dark/light environment variables + if style, ok := os.LookupEnv("AppleInterfaceStyle"); ok && strings.EqualFold(style, "dark") { + isDarkBackground = true + } else if style, ok := os.LookupEnv("APPEARANCE"); ok && strings.EqualFold(style, "light") { + isDarkBackground = false + } else if bg := os.Getenv("TERM_BACKGROUND"); bg != "" { + isDarkBackground = strings.ToLower(bg) != "light" + } else if fgBg := os.Getenv("COLORFGBG"); fgBg != "" { + // COLORFGBG format: "foreground;background" or "foreground;background;unused" parts := strings.Split(fgBg, ";") if len(parts) >= 2 { - bg := parts[len(parts)-1] // Last part (some terminals add more fields) - if bg == "7" || bg == "15" || bg == "0;15" { // Handle variations - return lightPalette // Use light palette for light background + bg := parts[len(parts)-1] + bgInt, err := strconv.Atoi(bg) + if err == nil { + // According to XTerm documentation: + // 0-7: dark colors, 15: white, 8-14: bright colors + // Typically, 0=black (dark), 7=light gray (light), 15=white (light) + isDarkBackground = (bgInt >= 0 && bgInt <= 6) || (bgInt >= 8 && bgInt <= 14) } } } - // Check macOS dark mode - if style, ok := os.LookupEnv("AppleInterfaceStyle"); ok && strings.EqualFold(style, "dark") { - return darkPalette // Use dark palette for macOS dark mode + if isDarkBackground { + return h.applyIntensity(darkPalette) + } + return h.applyIntensity(lightPalette) +} + +// applyIntensity applies the intensity setting to a base palette +func (h *ColorizedHandler) applyIntensity(basePalette Palette) Palette { + switch h.intensity { + case IntensityNormal: + return basePalette + case IntensityBright: + return brightPalette + case IntensityPastel: + return pastelPalette + case IntensityVibrant: + return vibrantPalette + default: + return basePalette + } +} + +// isWindowsTerminalAnsiSupported checks if the Windows terminal supports ANSI colors +func (h *ColorizedHandler) isWindowsTerminalAnsiSupported() bool { + if runtime.GOOS != "windows" { + return true + } + + if os.Getenv("WT_SESSION") != "" { + return true + } + + if os.Getenv("ConEmuANSI") == "ON" { + return true + } + + if os.Getenv("ANSICON") != "" { + return true } - // Default: dark (conservative choice for terminals) - return darkPalette + return false } diff --git a/vendor/github.com/olekukonko/ll/lh/dedup.go b/vendor/github.com/olekukonko/ll/lh/dedup.go new file mode 100644 index 00000000..41808560 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lh/dedup.go @@ -0,0 +1,163 @@ +package lh + +import ( + "sync" + "time" + + "github.com/olekukonko/ll/lx" +) + +// Dedup is a log handler that suppresses duplicate entries within a TTL window. +// It wraps another handler (H) and filters out repeated log entries that match +// within the deduplication period. +type Dedup[H lx.Handler] struct { + next H + + ttl time.Duration + cleanupEvery time.Duration + keyFn func(*lx.Entry) uint64 + maxKeys int + + // shards reduce lock contention by partitioning the key space + shards [32]dedupShard + + done chan struct{} + wg sync.WaitGroup + once sync.Once +} + +type dedupShard struct { + mu sync.Mutex + seen map[uint64]int64 +} + +// DedupOpt configures a Dedup handler. +type DedupOpt[H lx.Handler] func(*Dedup[H]) + +// WithDedupKeyFunc customizes how deduplication keys are generated. +func WithDedupKeyFunc[H lx.Handler](fn func(*lx.Entry) uint64) DedupOpt[H] { + return func(d *Dedup[H]) { d.keyFn = fn } +} + +// WithDedupCleanupInterval sets how often expired deduplication keys are purged. +func WithDedupCleanupInterval[H lx.Handler](every time.Duration) DedupOpt[H] { + return func(d *Dedup[H]) { + if every > 0 { + d.cleanupEvery = every + } + } +} + +// WithDedupMaxKeys sets a soft limit on tracked deduplication keys. +func WithDedupMaxKeys[H lx.Handler](max int) DedupOpt[H] { + return func(d *Dedup[H]) { + if max > 0 { + d.maxKeys = max + } + } +} + +// NewDedup creates a deduplicating handler wrapper. +func NewDedup[H lx.Handler](next H, ttl time.Duration, opts ...DedupOpt[H]) *Dedup[H] { + if ttl <= 0 { + ttl = 2 * time.Second + } + + d := &Dedup[H]{ + next: next, + ttl: ttl, + cleanupEvery: time.Minute, + keyFn: defaultDedupKey, + done: make(chan struct{}), + } + + // Initialize shards + for i := 0; i < len(d.shards); i++ { + d.shards[i].seen = make(map[uint64]int64, 64) + } + + for _, opt := range opts { + opt(d) + } + + d.wg.Add(1) + go d.cleanupLoop() + + return d +} + +// Handle processes a log entry, suppressing duplicates within the TTL window. +func (d *Dedup[H]) Handle(e *lx.Entry) error { + now := time.Now().UnixNano() + key := d.keyFn(e) + + // Select shard based on key hash + shardIdx := key % uint64(len(d.shards)) + shard := &d.shards[shardIdx] + + shard.mu.Lock() + exp, ok := shard.seen[key] + if ok && now < exp { + shard.mu.Unlock() + return nil + } + + // Basic guard against unbounded growth per shard + // Using strict limits per shard avoids global atomic counters + limitPerShard := d.maxKeys / len(d.shards) + if d.maxKeys > 0 && len(shard.seen) >= limitPerShard { + // Opportunistic cleanup of current shard + d.cleanupShard(shard, now) + } + + shard.seen[key] = now + d.ttl.Nanoseconds() + shard.mu.Unlock() + + return d.next.Handle(e) +} + +// Close stops the cleanup goroutine and closes the underlying handler. +func (d *Dedup[H]) Close() error { + var err error + d.once.Do(func() { + close(d.done) + d.wg.Wait() + + if c, ok := any(d.next).(interface{ Close() error }); ok { + err = c.Close() + } + }) + return err +} + +// cleanupLoop runs periodically to purge expired deduplication keys. +func (d *Dedup[H]) cleanupLoop() { + defer d.wg.Done() + + t := time.NewTicker(d.cleanupEvery) + defer t.Stop() + + for { + select { + case <-t.C: + now := time.Now().UnixNano() + // Cleanup all shards sequentially to avoid massive CPU spike + for i := 0; i < len(d.shards); i++ { + d.shards[i].mu.Lock() + d.cleanupShard(&d.shards[i], now) + d.shards[i].mu.Unlock() + } + case <-d.done: + return + } + } +} + +// cleanupShard removes expired keys from a specific shard. +func (d *Dedup[H]) cleanupShard(shard *dedupShard, now int64) { + for k, exp := range shard.seen { + if now > exp { + delete(shard.seen, k) + } + } +} diff --git a/vendor/github.com/olekukonko/ll/lh/json.go b/vendor/github.com/olekukonko/ll/lh/json.go index c40576d6..6bed3461 100644 --- a/vendor/github.com/olekukonko/ll/lh/json.go +++ b/vendor/github.com/olekukonko/ll/lh/json.go @@ -1,26 +1,34 @@ package lh import ( + "bytes" "encoding/json" "fmt" - "github.com/olekukonko/ll/lx" "io" "os" "strings" "sync" "time" + + "github.com/olekukonko/ll/lx" ) +var jsonBufPool = sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, +} + // JSONHandler is a handler that outputs log entries as JSON objects. // It formats log entries with timestamp, level, message, namespace, fields, and optional // stack traces or dump segments, writing the result to the provided writer. // Thread-safe with a mutex to protect concurrent writes. type JSONHandler struct { - writer io.Writer // Destination for JSON output - timeFmt string // Format for timestamp (default: RFC3339Nano) - pretty bool // Enable pretty printing with indentation if true - fieldMap map[string]string // Optional mapping for field names (not used in provided code) - mu sync.Mutex // Protects concurrent access to writer + writer io.Writer // Destination for JSON output + timeFmt string // Format for timestamp (default: RFC3339Nano) + pretty bool // Enable pretty printing with indentation if true + //fieldMap map[string]string // Optional mapping for field names (not used in provided code) + mu sync.Mutex // Protects concurrent access to writer } // JsonOutput represents the JSON structure for a log entry. @@ -84,6 +92,13 @@ func (h *JSONHandler) Handle(e *lx.Entry) error { return h.handleRegular(e) } +// Output sets the Writer destination for JSONHandler's output, ensuring thread safety with a mutex lock. +func (h *JSONHandler) Output(w io.Writer) { + h.mu.Lock() + defer h.mu.Unlock() + h.writer = w +} + // handleRegular handles standard log entries (non-dump). // It converts the entry to a JsonOutput struct and encodes it as JSON, // applying pretty printing if enabled. Logs encoding errors to stderr for debugging. @@ -92,6 +107,12 @@ func (h *JSONHandler) Handle(e *lx.Entry) error { // // h.handleRegular(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Writes JSON object func (h *JSONHandler) handleRegular(e *lx.Entry) error { + // Convert ordered fields to map for JSON output + fieldsMap := make(map[string]interface{}, len(e.Fields)) + for _, pair := range e.Fields { + fieldsMap[pair.Key] = pair.Value + } + // Create JSON output structure entry := JsonOutput{ Time: e.Timestamp.Format(h.timeFmt), // Format timestamp @@ -100,23 +121,32 @@ func (h *JSONHandler) handleRegular(e *lx.Entry) error { Msg: e.Message, // Set message Namespace: e.Namespace, // Set namespace Dump: nil, // No dump for regular entries - Fields: e.Fields, // Copy fields + Fields: fieldsMap, // Copy fields as map Stack: e.Stack, // Include stack trace if present } - // Create JSON encoder - enc := json.NewEncoder(h.writer) + + // Acquire buffer from pool to avoid allocation and reduce syscalls + buf := jsonBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer jsonBufPool.Put(buf) + + // Create JSON encoder writing to buffer + enc := json.NewEncoder(buf) if h.pretty { // Enable indentation for pretty printing enc.SetIndent("", " ") } - // Log encoding attempt for debugging - fmt.Fprintf(os.Stderr, "Encoding JSON entry: %v\n", e.Message) - // Encode and write JSON + + // Encode JSON to buffer err := enc.Encode(entry) if err != nil { // Log encoding error for debugging fmt.Fprintf(os.Stderr, "JSON encode error: %v\n", err) + return err } + + // Write buffer to underlying writer in one go + _, err = h.writer.Write(buf.Bytes()) return err } @@ -156,15 +186,40 @@ func (h *JSONHandler) handleDump(e *lx.Entry) error { }) } - // Encode JSON output with dump segments - return json.NewEncoder(h.writer).Encode(JsonOutput{ + // Convert ordered fields to map for JSON output + fieldsMap := make(map[string]interface{}, len(e.Fields)) + for _, pair := range e.Fields { + fieldsMap[pair.Key] = pair.Value + } + + // Acquire buffer from pool + buf := jsonBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer jsonBufPool.Put(buf) + + // Encode JSON output with dump segments to buffer + enc := json.NewEncoder(buf) + if h.pretty { + enc.SetIndent("", " ") + } + + err := enc.Encode(JsonOutput{ Time: e.Timestamp.Format(h.timeFmt), // Format timestamp Level: e.Level.String(), // Convert level to string Class: e.Class.String(), // Convert class to string Msg: "dumping segments", // Fixed message for dumps Namespace: e.Namespace, // Set namespace Dump: segments, // Include parsed segments - Fields: e.Fields, // Copy fields + Fields: fieldsMap, // Copy fields as map Stack: e.Stack, // Include stack trace if present }) + + if err != nil { + fmt.Fprintf(os.Stderr, "JSON dump encode error: %v\n", err) + return err + } + + // Write buffer to underlying writer + _, err = h.writer.Write(buf.Bytes()) + return err } diff --git a/vendor/github.com/olekukonko/ll/lh/lh.go b/vendor/github.com/olekukonko/ll/lh/lh.go new file mode 100644 index 00000000..5166c3b5 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lh/lh.go @@ -0,0 +1,67 @@ +package lh + +import ( + "bytes" + "fmt" + "sort" + "strings" + "sync" + + "github.com/cespare/xxhash/v2" + "github.com/olekukonko/ll/lx" +) + +// rightPad pads a string with spaces on the right to reach the specified length. +// Returns the original string if it's already at or exceeds the target length. +// Uses strings.Builder for efficient memory allocation. +func rightPad(str string, length int) string { + if len(str) >= length { + return str + } + var sb strings.Builder + sb.Grow(length) + sb.WriteString(str) + sb.WriteString(strings.Repeat(" ", length-len(str))) + return sb.String() +} + +var dedupBufPool = sync.Pool{ + New: func() any { return new(bytes.Buffer) }, +} + +// defaultDedupKey generates a deduplication key from log level and message. +// Uses FNV-1a hash for speed and good distribution. Override with WithDedupKeyFunc +// to include additional fields like namespace, caller, or structured fields. +func defaultDedupKey(e *lx.Entry) uint64 { + h := xxhash.New() + + _, _ = h.Write([]byte(e.Level.String())) + _, _ = h.Write([]byte{0}) + _, _ = h.Write([]byte(e.Message)) + _, _ = h.Write([]byte{0}) + _, _ = h.Write([]byte(e.Namespace)) + _, _ = h.Write([]byte{0}) + + if len(e.Fields) > 0 { + m := e.Fields.Map() + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + buf := dedupBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer dedupBufPool.Put(buf) + + for _, k := range keys { + fmt.Fprint(buf, k) + buf.WriteByte('=') + fmt.Fprint(buf, m[k]) + buf.WriteByte(0) + } + _, _ = h.Write(buf.Bytes()) + } + + return h.Sum64() +} diff --git a/vendor/github.com/olekukonko/ll/lh/memory.go b/vendor/github.com/olekukonko/ll/lh/memory.go index e3bc9398..42fb8928 100644 --- a/vendor/github.com/olekukonko/ll/lh/memory.go +++ b/vendor/github.com/olekukonko/ll/lh/memory.go @@ -2,9 +2,10 @@ package lh import ( "fmt" - "github.com/olekukonko/ll/lx" "io" "sync" + + "github.com/olekukonko/ll/lx" ) // MemoryHandler is an lx.Handler that stores log entries in memory. @@ -106,7 +107,7 @@ func (h *MemoryHandler) Dump(w io.Writer) error { // Process each entry through the TextHandler for _, entry := range h.entries { if err := tempHandler.Handle(entry); err != nil { - return fmt.Errorf("failed to dump entry: %w", err) // Wrap and return write errors + return fmt.Errorf("failed to dump entry: %writer", err) // Wrap and return write errors } } return nil diff --git a/vendor/github.com/olekukonko/ll/lh/multi.go b/vendor/github.com/olekukonko/ll/lh/multi.go index 8a9d8846..e5eba6cf 100644 --- a/vendor/github.com/olekukonko/ll/lh/multi.go +++ b/vendor/github.com/olekukonko/ll/lh/multi.go @@ -3,6 +3,7 @@ package lh import ( "errors" "fmt" + "github.com/olekukonko/ll/lx" ) @@ -30,6 +31,34 @@ func NewMultiHandler(h ...lx.Handler) *MultiHandler { } } +// Len returns the number of handlers in the MultiHandler. +// Useful for monitoring or debugging handler composition. +// +// Example: +// +// multi := &MultiHandler{} +// multi.Append(h1, h2, h3) +// count := multi.Len() // Returns 3 +func (h *MultiHandler) Len() int { + return len(h.Handlers) +} + +// Append adds one or more handlers to the MultiHandler. +// Handlers will receive log entries in the order they were appended. +// This method modifies the MultiHandler in place. +// +// Example: +// +// multi := &MultiHandler{} +// multi.Append( +// lx.NewJSONHandler(os.Stdout), +// lx.NewTextHandler(logFile), +// ) +// // Now multi broadcasts to both stdout and file +func (h *MultiHandler) Append(handlers ...lx.Handler) { + h.Handlers = append(h.Handlers, handlers...) +} + // Handle implements the Handler interface, calling Handle on each handler in sequence. // It collects any errors from handlers and combines them into a single error using errors.Join. // If no errors occur, it returns nil. Thread-safe if the underlying handlers are thread-safe. @@ -43,7 +72,7 @@ func (h *MultiHandler) Handle(e *lx.Entry) error { if err := handler.Handle(e); err != nil { // fmt.Fprintf(os.Stderr, "MultiHandler error for handler %d: %v\n", i, err) // Wrap error with handler index for context - errs = append(errs, fmt.Errorf("handler %d: %w", i, err)) + errs = append(errs, fmt.Errorf("handler %d: %writer", i, err)) } } // Combine errors into a single error, or return nil if no errors diff --git a/vendor/github.com/olekukonko/ll/lh/pipe.go b/vendor/github.com/olekukonko/ll/lh/pipe.go new file mode 100644 index 00000000..4594c6b7 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lh/pipe.go @@ -0,0 +1,76 @@ +package lh + +import ( + "fmt" + "os" + "time" + + "github.com/olekukonko/ll/lx" +) + +// Pipe chains multiple handler wrappers together, applying them from left to right. +// The wrappers are composed such that the first wrapper in the list becomes +// the innermost layer, and the last wrapper becomes the outermost layer. +// +// Usage pattern: Pipe(baseHandler, wrapper1, wrapper2, wrapper3) +// Result: wrapper3(wrapper2(wrapper1(baseHandler))) +// +// This enables clean, declarative construction of handler middleware chains. +// +// Example - building a processing pipeline: +// +// base := lx.NewJSONHandler(os.Stdout) +// handler := lh.Pipe(base, +// lh.NewDedup(2*time.Second), // 1. Deduplicate first +// lh.NewRateLimit(10, time.Second), // 2. Then rate limit +// ) +// logger := lx.NewLogger(handler) +// +// In this example, logs flow: Dedup → RateLimit → AddTimestamp → JSONHandler +func Pipe(h lx.Handler, wraps ...lx.Wrap) lx.Handler { + for _, w := range wraps { + if w != nil { + h = w(h) + } + } + return h +} + +// PipeDedup returns a wrapper that applies deduplication to the handler. +func PipeDedup(ttl time.Duration, opts ...DedupOpt[lx.Handler]) lx.Wrap { + return func(next lx.Handler) lx.Handler { + return NewDedup(next, ttl, opts...) + } +} + +// PipeBuffer returns a wrapper that applies buffering to the handler. +func PipeBuffer(opts ...BufferingOpt) lx.Wrap { + return func(next lx.Handler) lx.Handler { + return NewBuffered(next, opts...) + } +} + +// PipeRotate returns a wrapper that applies log rotation. +// Ideally, the 'next' handler should be one that writes to a file (like TextHandler or JSONHandler). +// +// If the underlying handler does not implement lx.HandlerOutputter (cannot change output destination), +// or if rotation initialization fails, this will log a warning to stderr and return the +// original handler unmodified to prevent application crashes. +func PipeRotate(maxSizeBytes int64, src RotateSource) lx.Wrap { + return func(next lx.Handler) lx.Handler { + // Attempt to cast to HandlerOutputter (Handler + Outputter interface) + h, ok := next.(lx.HandlerOutputter) + if !ok { + fmt.Fprintf(os.Stderr, "ll/lh: PipeRotate skipped - handler does not implement SetOutput(io.Writer)\n") + return next + } + + // Initialize the rotating handler + r, err := NewRotating(h, maxSizeBytes, src) + if err != nil { + fmt.Fprintf(os.Stderr, "ll/lh: PipeRotate initialization failed: %v\n", err) + return next + } + return r + } +} diff --git a/vendor/github.com/olekukonko/ll/lh/rotate.go b/vendor/github.com/olekukonko/ll/lh/rotate.go new file mode 100644 index 00000000..68cff5ce --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lh/rotate.go @@ -0,0 +1,176 @@ +package lh + +import ( + "io" + "sync" + + "github.com/olekukonko/ll/lx" +) + +// RotateSource defines the callbacks needed to implement log rotation. +// It abstracts the destination lifecycle: opening, sizing, and rotating. +// +// Example for file rotation: +// +// src := lh.RotateSource{ +// Open: func() (io.WriteCloser, error) { +// return os.OpenFile("app.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) +// }, +// Size: func() (int64, error) { +// if fi, err := os.Stat("app.log"); err == nil { +// return fi.Size(), nil +// } +// return 0, nil // File doesn't exist yet +// }, +// Rotate: func() error { +// // Rename current log before creating new one +// return os.Rename("app.log", "app.log."+time.Now().Format("20060102-150405")) +// }, +// } +type RotateSource struct { + // Open returns a fresh destination for log output. + // Called on initialization and after rotation. + Open func() (io.WriteCloser, error) + + // Size returns the current size in bytes of the active destination. + // Return an error if size cannot be determined (rotation will be skipped). + Size func() (int64, error) + + // Rotate performs cleanup/rotation actions before opening a new destination. + // For files: rename or move the current log. Optional for other destinations. + Rotate func() error +} + +// Rotating wraps a handler to rotate its output when maxSize is exceeded. +// The wrapped handler must implement both Handler and Outputter interfaces. +// Rotation is triggered on each Handle call if the current size >= maxSize. +// +// Example: +// +// handler := lx.NewJSONHandler(os.Stdout) +// src := lh.RotateSource{...} // see RotateSource example +// rotator, err := lh.NewRotating(handler, 10*1024*1024, src) // 10 MB +// logger := lx.NewLogger(rotator) +// logger.Info("This log may trigger rotation when file reaches 10MB") +type Rotating[H interface { + lx.Handler + lx.Outputter +}] struct { + mu sync.Mutex + maxSize int64 + src RotateSource + + out io.WriteCloser + handler H +} + +// NewRotating creates a rotating wrapper around handler. +// Handler's output will be replaced with destinations from src.Open. +// If maxSizeBytes <= 0, rotation is disabled. +// src.Rotate may be nil if no pre-open actions are needed. +// +// Example: +// +// // Create a JSON handler that rotates at 5MB +// handler := lx.NewJSONHandler(os.Stdout) +// rotator, err := lh.NewRotating(handler, 5*1024*1024, src) +// if err != nil { +// log.Fatal(err) +// } +// // Use rotator as your logger's handler +// logger := lx.NewLogger(rotator) +func NewRotating[H interface { + lx.Handler + lx.Outputter +}](handler H, maxSizeBytes int64, src RotateSource) (*Rotating[H], error) { + r := &Rotating[H]{ + maxSize: maxSizeBytes, + src: src, + handler: handler, + } + if err := r.reopenLocked(); err != nil { + return nil, err + } + return r, nil +} + +// Handle processes a log entry, rotating output if necessary. +// Thread-safe: can be called concurrently. +// +// Example: +// +// rotator.Handle(&lx.Entry{ +// Level: lx.InfoLevel, +// Message: "Processing request", +// Namespace: "api", +// }) +func (r *Rotating[H]) Handle(e *lx.Entry) error { + r.mu.Lock() + defer r.mu.Unlock() + + if err := r.rotateIfNeededLocked(); err != nil { + return err + } + return r.handler.Handle(e) +} + +// Close releases resources (closes the current output). +// Safe to call multiple times. +// +// Example: +// +// defer rotator.Close() +func (r *Rotating[H]) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + + if r.out != nil { + return r.out.Close() + } + return nil +} + +// rotateIfNeededLocked checks current size and rotates if maxSize exceeded. +// Called with mu already held. +func (r *Rotating[H]) rotateIfNeededLocked() error { + if r.maxSize <= 0 || r.src.Size == nil || r.src.Open == nil { + return nil + } + + size, err := r.src.Size() + if err != nil { + // Size unknown - skip rotation + return nil + } + if size < r.maxSize { + return nil + } + + // Close current output + if r.out != nil { + _ = r.out.Close() + r.out = nil + } + + // Run rotation hook (rename/move/commit) + if r.src.Rotate != nil { + if err := r.src.Rotate(); err != nil { + return err + } + } + + // Open fresh output + return r.reopenLocked() +} + +// reopenLocked opens a new destination and sets it on the handler. +// Called with mu already held. +func (r *Rotating[H]) reopenLocked() error { + out, err := r.src.Open() + if err != nil { + return err + } + r.out = out + r.handler.Output(out) + return nil +} diff --git a/vendor/github.com/olekukonko/ll/lh/slog.go b/vendor/github.com/olekukonko/ll/lh/slog.go index 77584202..e457f2ec 100644 --- a/vendor/github.com/olekukonko/ll/lh/slog.go +++ b/vendor/github.com/olekukonko/ll/lh/slog.go @@ -2,8 +2,9 @@ package lh import ( "context" - "github.com/olekukonko/ll/lx" "log/slog" + + "github.com/olekukonko/ll/lx" ) // SlogHandler adapts a slog.Handler to implement lx.Handler. @@ -27,6 +28,15 @@ func NewSlogHandler(h slog.Handler) *SlogHandler { return &SlogHandler{slogHandler: h} } +// Handle converts an lx.Entry to slog.Record and delegates to the slog.Handler. +// It maps the entry's fields, level, namespace, class, and stack trace to slog attributes, +// passing the resulting record to the underlying slog.Handler. +// Returns an error if the slog.Handler fails to process the record. +// Thread-safe if the underlying slog.Handler is thread-safe. +// Example: +// +// handler.Handle(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Processes as slog record +// // Handle converts an lx.Entry to slog.Record and delegates to the slog.Handler. // It maps the entry's fields, level, namespace, class, and stack trace to slog attributes, // passing the resulting record to the underlying slog.Handler. @@ -58,9 +68,9 @@ func (h *SlogHandler) Handle(e *lx.Entry) error { record.AddAttrs(slog.String("stack", string(e.Stack))) // Add stack trace as string } - // Add custom fields - for k, v := range e.Fields { - record.AddAttrs(slog.Any(k, v)) // Add each field as a key-value attribute + // Add custom fields in order (preserving insertion order) + for _, pair := range e.Fields { + record.AddAttrs(slog.Any(pair.Key, pair.Value)) // Add each field as a key-value attribute } // Handle the record with the underlying slog.Handler @@ -81,7 +91,7 @@ func toSlogLevel(level lx.LevelType) slog.Level { return slog.LevelInfo case lx.LevelWarn: return slog.LevelWarn - case lx.LevelError: + case lx.LevelError, lx.LevelFatal: return slog.LevelError default: return slog.LevelInfo // Default for unknown levels diff --git a/vendor/github.com/olekukonko/ll/lh/text.go b/vendor/github.com/olekukonko/ll/lh/text.go index 0b88cf4b..2bd59ff7 100644 --- a/vendor/github.com/olekukonko/ll/lh/text.go +++ b/vendor/github.com/olekukonko/ll/lh/text.go @@ -1,9 +1,9 @@ package lh import ( + "bytes" "fmt" "io" - "sort" "strings" "sync" "time" @@ -11,12 +11,40 @@ import ( "github.com/olekukonko/ll/lx" ) +type TextOption func(*TextHandler) + +var textBufPool = sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, +} + +// WithTextTimeFormat enables timestamp display and optionally sets a custom time format. +// It configures the TextHandler to include temporal information in each log entry, +// allowing for precise tracking of when log events occur. +// If the format string is empty, it defaults to time.RFC3339. +func WithTextTimeFormat(format string) TextOption { + return func(t *TextHandler) { + t.Timestamped(true, format) + } +} + +// WithTextShowTime enables or disables timestamp display in log entries. +// This option provides direct control over the visibility of the time prefix +// without altering the underlying time format configured in the handler. +// Setting show to true will prepend timestamps to all subsequent regular log outputs. +func WithTextShowTime(show bool) TextOption { + return func(t *TextHandler) { + t.showTime = show + } +} + // TextHandler is a handler that outputs log entries as plain text. // It formats log entries with namespace, level, message, fields, and optional stack traces, // writing the result to the provided writer. // Thread-safe if the underlying writer is thread-safe. type TextHandler struct { - w io.Writer // Destination for formatted log output + writer io.Writer // Destination for formatted log output showTime bool // Whether to display timestamps timeFormat string // Format for timestamps (defaults to time.RFC3339) mu sync.Mutex @@ -29,12 +57,18 @@ type TextHandler struct { // handler := NewTextHandler(os.Stdout) // logger := ll.New("app").Enable().Handler(handler) // logger.Info("Test") // Output: [app] INFO: Test -func NewTextHandler(w io.Writer) *TextHandler { - return &TextHandler{ - w: w, +func NewTextHandler(w io.Writer, opts ...TextOption) *TextHandler { + t := &TextHandler{ + writer: w, showTime: false, timeFormat: time.RFC3339, } + + for _, opt := range opts { + opt(t) + } + + return t } // Timestamped enables or disables timestamp display and optionally sets a custom time format. @@ -50,6 +84,14 @@ func (h *TextHandler) Timestamped(enable bool, format ...string) { } } +// Output sets a new writer for the TextHandler. +// Thread-safe - safe for concurrent use. +func (h *TextHandler) Output(w io.Writer) { + h.mu.Lock() + defer h.mu.Unlock() + h.writer = w +} + // Handle processes a log entry and writes it as plain text. // It delegates to specialized methods based on the entry's class (Dump, Raw, or regular). // Returns an error if writing to the underlying writer fails. @@ -61,18 +103,15 @@ func (h *TextHandler) Handle(e *lx.Entry) error { h.mu.Lock() defer h.mu.Unlock() - // Special handling for dump output if e.Class == lx.ClassDump { return h.handleDumpOutput(e) } - // Raw entries are written directly without formatting if e.Class == lx.ClassRaw { - _, err := h.w.Write([]byte(e.Message)) + _, err := h.writer.Write([]byte(e.Message)) return err } - // Handle standard log entries return h.handleRegularOutput(e) } @@ -84,81 +123,68 @@ func (h *TextHandler) Handle(e *lx.Entry) error { // // h.handleRegularOutput(&lx.Entry{Message: "test", Level: lx.LevelInfo}) // Writes "INFO: test" func (h *TextHandler) handleRegularOutput(e *lx.Entry) error { - var builder strings.Builder // Buffer for building formatted output + buf := textBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer textBufPool.Put(buf) - // Add timestamp if enabled if h.showTime { - builder.WriteString(e.Timestamp.Format(h.timeFormat)) - builder.WriteString(lx.Space) + buf.WriteString(e.Timestamp.Format(h.timeFormat)) + buf.WriteString(lx.Space) } - // Format namespace based on style switch e.Style { case lx.NestedPath: if e.Namespace != "" { - // Split namespace into parts and format as [parent]→[child] parts := strings.Split(e.Namespace, lx.Slash) for i, part := range parts { - builder.WriteString(lx.LeftBracket) - builder.WriteString(part) - builder.WriteString(lx.RightBracket) + buf.WriteString(lx.LeftBracket) + buf.WriteString(part) + buf.WriteString(lx.RightBracket) if i < len(parts)-1 { - builder.WriteString(lx.Arrow) + buf.WriteString(lx.Arrow) } } - builder.WriteString(lx.Colon) - builder.WriteString(lx.Space) + buf.WriteString(lx.Colon) + buf.WriteString(lx.Space) } default: // FlatPath if e.Namespace != "" { - // Format namespace as [parent/child] - builder.WriteString(lx.LeftBracket) - builder.WriteString(e.Namespace) - builder.WriteString(lx.RightBracket) - builder.WriteString(lx.Space) + buf.WriteString(lx.LeftBracket) + buf.WriteString(e.Namespace) + buf.WriteString(lx.RightBracket) + buf.WriteString(lx.Space) } } - // Add level and message - builder.WriteString(e.Level.String()) - builder.WriteString(lx.Colon) - builder.WriteString(lx.Space) - builder.WriteString(e.Message) + buf.WriteString(e.Level.Name(e.Class)) + // buf.WriteString(lx.Space) + buf.WriteString(lx.Colon) + buf.WriteString(lx.Space) + buf.WriteString(e.Message) - // Add fields in sorted order if len(e.Fields) > 0 { - var keys []string - for k := range e.Fields { - keys = append(keys, k) - } - // Sort keys for consistent output - sort.Strings(keys) - builder.WriteString(lx.Space) - builder.WriteString(lx.LeftBracket) - for i, k := range keys { + buf.WriteString(lx.Space) + buf.WriteString(lx.LeftBracket) + for i, pair := range e.Fields { if i > 0 { - builder.WriteString(lx.Space) + buf.WriteString(lx.Space) } - // Format field as key=value - builder.WriteString(k) - builder.WriteString("=") - builder.WriteString(fmt.Sprint(e.Fields[k])) + buf.WriteString(pair.Key) + buf.WriteString("=") + fmt.Fprint(buf, pair.Value) } - builder.WriteString(lx.RightBracket) + buf.WriteString(lx.RightBracket) } - // Add stack trace if present if len(e.Stack) > 0 { - h.formatStack(&builder, e.Stack) + h.formatStack(buf, e.Stack) } - // Append newline for non-None levels if e.Level != lx.LevelNone { - builder.WriteString(lx.Newline) + buf.WriteString(lx.Newline) } - // Write formatted output to writer - _, err := h.w.Write([]byte(builder.String())) + _, err := h.writer.Write(buf.Bytes()) return err } @@ -169,22 +195,20 @@ func (h *TextHandler) handleRegularOutput(e *lx.Entry) error { // // h.handleDumpOutput(&lx.Entry{Class: lx.ClassDump, Message: "pos 00 hex: 61"}) // Writes "---- BEGIN DUMP ----\npos 00 hex: 61\n---- END DUMP ----\n" func (h *TextHandler) handleDumpOutput(e *lx.Entry) error { - // For text handler, we just add a newline before dump output - var builder strings.Builder // Buffer for building formatted output + buf := textBufPool.Get().(*bytes.Buffer) + buf.Reset() + defer textBufPool.Put(buf) - // Add timestamp if enabled if h.showTime { - builder.WriteString(e.Timestamp.Format(h.timeFormat)) - builder.WriteString(lx.Newline) + buf.WriteString(e.Timestamp.Format(h.timeFormat)) + buf.WriteString(lx.Newline) } - // Add separator lines and dump content - builder.WriteString("---- BEGIN DUMP ----\n") - builder.WriteString(e.Message) - builder.WriteString("---- END DUMP ----\n") + buf.WriteString("---- BEGIN DUMP ----\n") + buf.WriteString(e.Message) + buf.WriteString("---- END DUMP ----\n\n") - // Write formatted output to writer - _, err := h.w.Write([]byte(builder.String())) + _, err := h.writer.Write(buf.Bytes()) return err } @@ -194,21 +218,18 @@ func (h *TextHandler) handleDumpOutput(e *lx.Entry) error { // Example (internal usage): // // h.formatStack(&builder, []byte("goroutine 1 [running]:\nmain.main()\n\tmain.go:10")) // Appends formatted stack trace -func (h *TextHandler) formatStack(b *strings.Builder, stack []byte) { +func (h *TextHandler) formatStack(b *bytes.Buffer, stack []byte) { lines := strings.Split(string(stack), "\n") if len(lines) == 0 { return } - // Start stack trace section b.WriteString("\n[stack]\n") - // First line: goroutine b.WriteString(" ┌─ ") b.WriteString(lines[0]) b.WriteString("\n") - // Iterate through remaining lines for i := 1; i < len(lines); i++ { line := strings.TrimSpace(lines[i]) if line == "" { @@ -216,16 +237,13 @@ func (h *TextHandler) formatStack(b *strings.Builder, stack []byte) { } if strings.Contains(line, ".go") { - // File path lines get extra indent b.WriteString(" ├ ") } else { - // Function names b.WriteString(" │ ") } b.WriteString(line) b.WriteString("\n") } - // End stack trace section b.WriteString(" └\n") } diff --git a/vendor/github.com/olekukonko/ll/ll.go b/vendor/github.com/olekukonko/ll/ll.go index 7510b89a..4dfafdd9 100644 --- a/vendor/github.com/olekukonko/ll/ll.go +++ b/vendor/github.com/olekukonko/ll/ll.go @@ -1,7 +1,6 @@ package ll import ( - "bufio" "encoding/binary" "encoding/json" "fmt" @@ -24,21 +23,25 @@ import ( // log level, namespaces, context fields, output style, handler, middleware, and formatting. // It is thread-safe, using a read-write mutex to protect concurrent access to its fields. type Logger struct { - mu sync.RWMutex // Guards concurrent access to fields - enabled bool // Determines if logging is enabled - suspend atomic.Bool // uses suspend path for most actions eg. skipping namespace checks - level lx.LevelType // Minimum log level (e.g., Debug, Info, Warn, Error) - namespaces *lx.Namespace // Manages namespace enable/disable states - currentPath string // Current namespace path (e.g., "parent/child") - context map[string]interface{} // Contextual fields included in all logs - style lx.StyleType // Namespace formatting style (FlatPath or NestedPath) - handler lx.Handler // Output handler for logs (e.g., text, JSON) - middleware []Middleware // Middleware functions to process log entries - prefix string // Prefix prepended to log messages - indent int // Number of double spaces for message indentation - stackBufferSize int // Buffer size for capturing stack traces - separator string // Separator for namespace paths (e.g., "/") - entries atomic.Int64 // Tracks total log entries sent to handler + mu sync.RWMutex // Guards concurrent access to fields + enabled bool // Determines if logging is enabled + suspend atomic.Bool // uses suspend path for most actions eg. skipping namespace checks + level lx.LevelType // Minimum log level (e.g., Debug, Info, Warn, Error) + atomicLevel int32 // Shadow copy of level for lock-free checks + namespaces *lx.Namespace // Manages namespace enable/disable states + currentPath string // Current namespace path (e.g., "parent/child") + context lx.Fields // Contextual fields included in all logs + style lx.StyleType // Namespace formatting style (FlatPath or NestedPath) + handler lx.Handler // Output handler for logs (e.g., text, JSON) + middleware []Middleware // Middleware functions to process log entries + prefix string // Prefix prepended to log messages + indent int // Number of double spaces for message indentation + stackBufferSize int // Buffer size for capturing stack traces + separator string // Separator for namespace paths (e.g., "/") + entries atomic.Int64 // Tracks total log entries sent to handler + fatalExits bool + fatalStack bool + labels atomic.Pointer[[]string] } // New creates a new Logger with the given namespace and optional configurations. @@ -53,9 +56,10 @@ func New(namespace string, opts ...Option) *Logger { logger := &Logger{ enabled: lx.DefaultEnabled, // Defaults to disabled (false) level: lx.LevelDebug, // Default minimum log level + atomicLevel: int32(lx.LevelDebug), // Initialize atomic level namespaces: defaultStore, // Shared namespace store currentPath: namespace, // Initial namespace path - context: make(map[string]interface{}), // Empty context for fields + context: make(lx.Fields, 0, 10), // Empty context for fields style: lx.FlatPath, // Default namespace style ([parent/child]) handler: lh.NewTextHandler(os.Stdout), // Default text output to stdout middleware: make([]Middleware, 0), // Empty middleware chain @@ -71,22 +75,58 @@ func New(namespace string, opts ...Option) *Logger { return logger } -// AddContext adds a key-value pair to the logger's context, modifying it directly. -// Unlike Context, it mutates the existing context. It is thread-safe using a write lock. +// Apply applies one or more functional options to the default/global logger. +// Useful for late configuration (e.g., after migration, attach VictoriaLogs handler, +// set level, add middleware, etc.) without changing existing New() calls. +// // Example: // -// logger := New("app").Enable() -// logger.AddContext("user", "alice") -// logger.Info("Action") // Output: [app] INFO: Action [user=alice] -func (l *Logger) AddContext(key string, value interface{}) *Logger { +// // In main() or init(), after setting up handler +// ll.Apply( +// ll.Handler(vlBatched), +// ll.Level(ll.LevelInfo), +// ll.Use(rateLimiterMiddleware), +// ) +// +// Returns the default logger for chaining (if needed). +func (l *Logger) Apply(opts ...Option) *Logger { + l.mu.Lock() + defer l.mu.Unlock() + for _, opt := range opts { + if opt != nil { + opt(l) + } + } + return l +} + +// AddContext adds one or more key-value pairs to the logger's persistent context. +// These fields will be included in **every** subsequent log message from this logger +// (and its child namespace loggers). +// +// It supports variadic key-value pairs (string key, any value). +// Non-string keys or uneven number of arguments will be safely ignored/logged. +// +// Returns the logger for chaining. +// +// Examples: +// +// logger.AddContext("user", "alice", "env", "prod") +// logger.AddContext("request_id", reqID, "trace_id", traceID) +// logger.AddContext("service", "payment") // single pair +func (l *Logger) AddContext(pairs ...any) *Logger { l.mu.Lock() defer l.mu.Unlock() - // Initialize context map if nil if l.context == nil { - l.context = make(map[string]interface{}) + l.context = make(lx.Fields, 0, len(pairs)/2) + } + + for i := 0; i < len(pairs)-1; i += 2 { + if key, ok := pairs[i].(string); ok { + l.context = append(l.context, lx.Field{Key: key, Value: pairs[i+1]}) + } } - l.context[key] = value return l } @@ -144,18 +184,19 @@ func (l *Logger) Clone() *Logger { defer l.mu.RUnlock() return &Logger{ - enabled: l.enabled, // Copy enablement state - level: l.level, // Copy log level - namespaces: l.namespaces, // Share namespace store - currentPath: l.currentPath, // Copy namespace path - context: make(map[string]interface{}), // Fresh context map - style: l.style, // Copy namespace style - handler: l.handler, // Copy output handler - middleware: l.middleware, // Copy middleware chain - prefix: l.prefix, // Copy message prefix - indent: l.indent, // Copy indentation level - stackBufferSize: l.stackBufferSize, // Copy stack trace buffer size - separator: l.separator, // Default separator ("/") + enabled: l.enabled, // Copy enablement state + level: l.level, // Copy log level + atomicLevel: l.atomicLevel, // Copy atomic level + namespaces: l.namespaces, // Share namespace store + currentPath: l.currentPath, // Copy namespace path + context: make(lx.Fields, 0, 10), // Fresh context map + style: l.style, // Copy namespace style + handler: l.handler, // Copy output handler + middleware: l.middleware, // Copy middleware chain + prefix: l.prefix, // Copy message prefix + indent: l.indent, // Copy indentation level + stackBufferSize: l.stackBufferSize, // Copy stack trace buffer size + separator: l.separator, // Default separator ("/") suspend: l.suspend, } } @@ -176,9 +217,10 @@ func (l *Logger) Context(fields map[string]interface{}) *Logger { newLogger := &Logger{ enabled: l.enabled, level: l.level, + atomicLevel: l.atomicLevel, namespaces: l.namespaces, currentPath: l.currentPath, - context: make(map[string]interface{}), + context: make(lx.Fields, 0, len(l.context)+len(fields)), style: l.style, handler: l.handler, middleware: l.middleware, @@ -187,37 +229,21 @@ func (l *Logger) Context(fields map[string]interface{}) *Logger { stackBufferSize: l.stackBufferSize, separator: l.separator, suspend: l.suspend, + fatalExits: l.fatalExits, + fatalStack: l.fatalStack, } - // Copy parent's context fields - for k, v := range l.context { - newLogger.context[k] = v - } + // Copy parent's context fields (in order) + newLogger.context = append(newLogger.context, l.context...) - // Add new fields + // Add new fields from map for k, v := range fields { - newLogger.context[k] = v + newLogger.context = append(newLogger.context, lx.Field{Key: k, Value: v}) } return newLogger } -// Dbg logs debug information, including the source file, line number, and expression -// value, capturing the calling line of code. It is useful for debugging without temporary -// print statements. -// Example: -// -// x := 42 -// logger.Dbg(x) // Output: [file.go:123] x = 42 -func (l *Logger) Dbg(values ...interface{}) { - // Skip logging if Info level is not enabled - if !l.shouldLog(lx.LevelInfo) { - return - } - - l.dbg(2, values...) -} - // Debug logs a message at Debug level, formatting it and delegating to the internal // log method. It is thread-safe. // Example: @@ -357,6 +383,7 @@ func (l *Logger) Output(values ...interface{}) { l.output(2, values...) } +// mark logs the caller's file and line number along with an optional custom name label for tracing execution flow. func (l *Logger) output(skip int, values ...interface{}) { if !l.shouldLog(lx.LevelInfo) { return @@ -382,7 +409,7 @@ func (l *Logger) output(skip int, values ...interface{}) { "error": err.Error(), }, " ", " ") } - l.log(lx.LevelInfo, lx.ClassText, header+string(b), nil, false) + l.log(lx.LevelInfo, lx.ClassJSON, header+string(b), nil, false) } } @@ -451,10 +478,11 @@ func (l *Logger) Err(errs ...error) { } l.mu.Lock() + defer l.mu.Unlock() - // Initialize context map if nil + // Initialize context slice if nil if l.context == nil { - l.context = make(map[string]interface{}) + l.context = make(lx.Fields, 0, 4) } // Collect non-nil errors and build log message @@ -475,15 +503,14 @@ func (l *Logger) Err(errs ...error) { if count > 0 { if count == 1 { // Store single error directly - l.context["error"] = nonNilErrors[0] + l.context = append(l.context, lx.Field{Key: "error", Value: nonNilErrors[0]}) } else { // Store slice of errors - l.context["error"] = nonNilErrors + l.context = append(l.context, lx.Field{Key: "error", Value: nonNilErrors}) } // Log concatenated error messages l.log(lx.LevelError, lx.ClassText, builder.String(), nil, false) } - l.mu.Unlock() } // Error logs a message at Error level, formatting it and delegating to the internal @@ -536,8 +563,10 @@ func (l *Logger) Fatal(args ...any) { os.Exit(1) } - l.log(lx.LevelError, lx.ClassText, cat.Space(args...), nil, false) - os.Exit(1) + l.log(lx.LevelFatal, lx.ClassText, cat.Space(args...), nil, l.fatalStack) + if l.fatalExits { + os.Exit(1) + } } // Fatalf logs a formatted message at Error level with a stack trace and exits the program. @@ -562,29 +591,27 @@ func (l *Logger) Fatalf(format string, args ...any) { // logger := New("app").Enable() // logger.Field(map[string]interface{}{"user": "alice"}).Info("Action") // Output: [app] INFO: Action [user=alice] func (l *Logger) Field(fields map[string]interface{}) *FieldBuilder { - fb := &FieldBuilder{logger: l, fields: make(map[string]interface{})} + fb := &FieldBuilder{logger: l, fields: make(lx.Fields, 0, len(fields))} - // check if suspended if l.suspend.Load() { return fb } - // Copy fields from input map to FieldBuilder + // Copy fields from input map to FieldBuilder (preserving map iteration order) for k, v := range fields { - fb.fields[k] = v + fb.fields = append(fb.fields, lx.Field{Key: k, Value: v}) } return fb } -// Fields starts a fluent chain for adding fields using variadic key-value pairs, -// creating a FieldBuilder. Non-string keys or uneven pairs add an error field. It is -// thread-safe via the FieldBuilder’s logger. +// Fields starts a fluent chain for adding fields using variadic key-value pairs. +// It creates a FieldBuilder to attach fields, handling non-string keys or uneven pairs by +// adding an error field. Thread-safe via the FieldBuilder's logger. // Example: // -// logger := New("app").Enable() // logger.Fields("user", "alice").Info("Action") // Output: [app] INFO: Action [user=alice] func (l *Logger) Fields(pairs ...any) *FieldBuilder { - fb := &FieldBuilder{logger: l, fields: make(map[string]interface{})} + fb := &FieldBuilder{logger: l, fields: make(lx.Fields, 0, len(pairs)/2)} if l.suspend.Load() { return fb @@ -593,15 +620,21 @@ func (l *Logger) Fields(pairs ...any) *FieldBuilder { // Process key-value pairs for i := 0; i < len(pairs)-1; i += 2 { if key, ok := pairs[i].(string); ok { - fb.fields[key] = pairs[i+1] + fb.fields = append(fb.fields, lx.Field{Key: key, Value: pairs[i+1]}) } else { // Log error for non-string keys - fb.fields["error"] = fmt.Errorf("non-string key in Fields: %v", pairs[i]) + fb.fields = append(fb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("non-string key in Fields: %v", pairs[i]), + }) } } // Log error for uneven pairs if len(pairs)%2 != 0 { - fb.fields["error"] = fmt.Errorf("uneven key-value pairs in Fields: [%v]", pairs[len(pairs)-1]) + fb.fields = append(fb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("uneven key-value pairs in Fields: [%v]", pairs[len(pairs)-1]), + }) } return fb } @@ -615,7 +648,12 @@ func (l *Logger) Fields(pairs ...any) *FieldBuilder { func (l *Logger) GetContext() map[string]interface{} { l.mu.RLock() defer l.mu.RUnlock() - return l.context + // Convert slice to map for backward compatibility + contextMap := make(map[string]interface{}, len(l.context)) + for _, pair := range l.context { + contextMap[pair.Key] = pair.Value + } + return contextMap } // GetHandler returns the logger's current handler for customization or inspection. @@ -747,6 +785,28 @@ func (l *Logger) Len() int64 { return l.entries.Load() } +// Labels temporarily attaches one or more label names to the logger for the next log entry. +// Labels are typically used for metrics, benchmarking, tracing, or categorizing logs in a structured way. +// +// The labels are stored atomically and intended to be short-lived, applying only to the next +// log operation (or until overwritten by a subsequent call to Labels). Multiple labels can +// be provided as separate string arguments. +// +// Example usage: +// +// logger := New("app").Enable() +// +// // Add labels for a specific operation +// logger.Labels("load_users", "process_orders").Measure(func() { +// // ... perform work ... +// }, func() { +// // ... optional callback ... +// }) +func (l *Logger) Labels(names ...string) *Logger { + l.labels.Store(&names) // store temporarily + return l +} + // Level sets the minimum log level, ignoring messages below it. It is thread-safe using // a write lock and returns the logger for chaining. // Example: @@ -758,6 +818,7 @@ func (l *Logger) Level(level lx.LevelType) *Logger { l.mu.Lock() defer l.mu.Unlock() l.level = level + atomic.StoreInt32(&l.atomicLevel, int32(level)) return l } @@ -795,6 +856,7 @@ func (l *Logger) Mark(name ...string) { l.mark(2, name...) } +// mark logs the caller's file and line number along with an optional custom name label for tracing execution flow. func (l *Logger) mark(skip int, names ...string) { // Skip logging if Info level is not enabled if !l.shouldLog(lx.LevelInfo) { @@ -824,32 +886,6 @@ func (l *Logger) mark(skip int, names ...string) { l.log(lx.LevelInfo, lx.ClassRaw, out, nil, false) } -// Measure benchmarks function execution, logging the duration at Info level with a -// "duration" field. It is thread-safe via Fields and log methods. -// Example: -// -// logger := New("app").Enable() -// duration := logger.Measure(func() { time.Sleep(time.Millisecond) }) -// // Output: [app] INFO: function executed [duration=~1ms] -func (l *Logger) Measure(fns ...func()) time.Duration { - start := time.Now() - - for _, fn := range fns { - if fn != nil { - fn() - } - } - - duration := time.Since(start) - l.Fields( - "duration_ns", duration.Nanoseconds(), - "duration", duration.String(), - "duration_ms", fmt.Sprintf("%.3fms", float64(duration.Nanoseconds())/1e6), - ).Infof("execution completed") - - return duration -} - // Namespace creates a child logger with a sub-namespace appended to the current path, // inheriting the parent’s configuration but with an independent context. It is thread-safe // using a read lock. @@ -876,9 +912,10 @@ func (l *Logger) Namespace(name string) *Logger { return &Logger{ enabled: l.enabled, level: l.level, + atomicLevel: l.atomicLevel, namespaces: l.namespaces, currentPath: fullPath, - context: make(map[string]interface{}), + context: make(lx.Fields, 0, 10), style: l.style, handler: l.handler, middleware: l.middleware, @@ -978,7 +1015,7 @@ func (l *Logger) Panic(args ...any) { panic(msg) } - l.log(lx.LevelError, lx.ClassText, msg, nil, true) + l.log(lx.LevelFatal, lx.ClassText, msg, nil, true) panic(msg) } @@ -1198,6 +1235,17 @@ func (l *Logger) Timestamped(enable bool, format ...string) *Logger { return l } +// Toggle enables or disables the logger based on the provided boolean value and returns the updated logger instance. +func (l *Logger) Toggle(v bool) *Logger { + if v { + l.Resume() + return l.Enable() + } + + l.Suspend() + return l.Disable() +} + // Use adds a middleware function to process log entries before they are handled, returning // a Middleware handle for removal. Middleware returning a non-nil error stops the log. // It is thread-safe using a write lock. @@ -1260,58 +1308,6 @@ func (l *Logger) Warnf(format string, args ...any) { l.Warn(fmt.Sprintf(format, args...)) } -// dbg is an internal helper for Dbg, logging debug information with source file and line -// number, extracting the calling line of code. It is thread-safe via the log method. -// Example (internal usage): -// -// logger.Dbg(x) // Calls dbg(2, x) -func (l *Logger) dbg(skip int, values ...interface{}) { - for _, exp := range values { - // Get caller information (file, line) - _, file, line, ok := runtime.Caller(skip) - if !ok { - l.log(lx.LevelError, lx.ClassText, "Dbg: Unable to parse runtime caller", nil, false) - return - } - - // Open source file - f, err := os.Open(file) - if err != nil { - l.log(lx.LevelError, lx.ClassText, "Dbg: Unable to open expected file", nil, false) - return - } - - // Scan file to find the line - scanner := bufio.NewScanner(f) - scanner.Split(bufio.ScanLines) - var out string - i := 1 - for scanner.Scan() { - if i == line { - // Extract expression between parentheses - v := scanner.Text()[strings.Index(scanner.Text(), "(")+1 : len(scanner.Text())-strings.Index(reverseString(scanner.Text()), ")")-1] - // Format output with file, line, expression, and value - out = fmt.Sprintf("[%s:%d] %s = %+v", file[len(file)-strings.Index(reverseString(file), "/"):], line, v, exp) - break - } - i++ - } - if err := scanner.Err(); err != nil { - l.log(lx.LevelError, lx.ClassText, err.Error(), nil, false) - return - } - // Log based on value type - switch exp.(type) { - case error: - l.log(lx.LevelError, lx.ClassText, out, nil, false) - default: - l.log(lx.LevelInfo, lx.ClassText, out, nil, false) - } - - f.Close() - } -} - // joinPath joins a base path and a relative path using the logger's separator, handling // empty base or relative paths. It is used internally for namespace path construction. // Example (internal usage): @@ -1339,7 +1335,7 @@ func (l *Logger) joinPath(base, relative string) string { // // logger := New("app").Enable() // logger.Info("Test") // Calls log(lx.LevelInfo, "Test", nil, false) -func (l *Logger) log(level lx.LevelType, class lx.ClassType, msg string, fields map[string]interface{}, withStack bool) { +func (l *Logger) log(level lx.LevelType, class lx.ClassType, msg string, fields lx.Fields, withStack bool) { // Skip logging if level is not enabled if !l.shouldLog(level) { return @@ -1353,9 +1349,6 @@ func (l *Logger) log(level lx.LevelType, class lx.ClassType, msg string, fields buf := make([]byte, l.stackBufferSize) l.mu.RUnlock() n := runtime.Stack(buf, false) - if fields == nil { - fields = make(map[string]interface{}) - } stack = buf[:n] } @@ -1373,30 +1366,33 @@ func (l *Logger) log(level lx.LevelType, class lx.ClassType, msg string, fields builder.WriteString(msg) finalMsg := builder.String() - // Create log entry + // Create combined fields slice - THIS PRESERVES ORDER! + // Optimized slice allocation + var combinedFields lx.Fields + if len(l.context) == 0 { + combinedFields = fields + } else if len(fields) == 0 { + combinedFields = l.context + } else { + combinedFields = make(lx.Fields, 0, len(l.context)+len(fields)) + // Add context fields first (in order) + combinedFields = append(combinedFields, l.context...) + // Add immediate fields + combinedFields = append(combinedFields, fields...) + } + + // Create log entry with ordered fields entry := &lx.Entry{ Timestamp: time.Now(), Level: level, Message: finalMsg, Namespace: l.currentPath, - Fields: fields, + Fields: combinedFields, // Already ordered! Style: l.style, Class: class, Stack: stack, } - // Merge context fields, avoiding overwrites - if len(l.context) > 0 { - if entry.Fields == nil { - entry.Fields = make(map[string]interface{}) - } - for k, v := range l.context { - if _, exists := entry.Fields[k]; !exists { - entry.Fields[k] = v - } - } - } - // Apply middleware, stopping if any returns an error for _, mw := range l.middleware { if err := mw.fn.Handle(entry); err != nil { @@ -1431,8 +1427,8 @@ func (l *Logger) shouldLog(level lx.LevelType) bool { return false } - // Skip if log level is below minimum - if level > l.level { + // Atomic fast path: read level without lock + if level > lx.LevelType(atomic.LoadInt32(&l.atomicLevel)) { return false } @@ -1459,54 +1455,3 @@ func (l *Logger) shouldLog(level lx.LevelType) bool { return true } - -// WithHandler sets the handler for the logger as a functional option for configuring -// a new logger instance. -// Example: -// -// logger := New("app", WithHandler(lh.NewJSONHandler(os.Stdout))) -func WithHandler(handler lx.Handler) Option { - return func(l *Logger) { - l.handler = handler - } -} - -// WithTimestamped returns an Option that configures timestamp settings for the logger's existing handler. -// It enables or disables timestamp logging and optionally sets the timestamp format if the handler -// supports the lx.Timestamper interface. If no handler is set, the function has no effect. -// Parameters: -// -// enable: Boolean to enable or disable timestamp logging -// format: Optional string(s) to specify the timestamp format -func WithTimestamped(enable bool, format ...string) Option { - return func(l *Logger) { - if l.handler != nil { // Check if a handler is set - // Verify if the handler supports the lx.Timestamper interface - if h, ok := l.handler.(lx.Timestamper); ok { - h.Timestamped(enable, format...) // Apply timestamp settings to the handler - } - } - } -} - -// WithLevel sets the minimum log level for the logger as a functional option for -// configuring a new logger instance. -// Example: -// -// logger := New("app", WithLevel(lx.LevelWarn)) -func WithLevel(level lx.LevelType) Option { - return func(l *Logger) { - l.level = level - } -} - -// WithStyle sets the namespace formatting style for the logger as a functional option -// for configuring a new logger instance. -// Example: -// -// logger := New("app", WithStyle(lx.NestedPath)) -func WithStyle(style lx.StyleType) Option { - return func(l *Logger) { - l.style = style - } -} diff --git a/vendor/github.com/olekukonko/ll/lx/field.go b/vendor/github.com/olekukonko/ll/lx/field.go new file mode 100644 index 00000000..f662201c --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lx/field.go @@ -0,0 +1,140 @@ +package lx + +import ( + "fmt" + "strings" +) + +// Field represents a key-value pair where the key is a string and the value is of any type. +type Field struct { + Key string + Value interface{} +} + +// Fields represents a slice of key-value pairs. +type Fields []Field + +// Map converts the Fields slice to a map[string]interface{}. +// This is useful for backward compatibility or when map operations are needed. +// Example: +// +// fields := lx.Fields{{"user", "alice"}, {"age", 30}} +// m := fields.Map() // Returns map[string]interface{}{"user": "alice", "age": 30} +func (f Fields) Map() map[string]interface{} { + m := make(map[string]interface{}, len(f)) + for _, pair := range f { + m[pair.Key] = pair.Value + } + return m +} + +// Get returns the value for a given key and a boolean indicating if the key was found. +// This provides O(n) lookup, which is fine for small numbers of fields. +// Example: +// +// fields := lx.Fields{{"user", "alice"}, {"age", 30}} +// value, found := fields.Get("user") // Returns "alice", true +func (f Fields) Get(key string) (interface{}, bool) { + for _, pair := range f { + if pair.Key == key { + return pair.Value, true + } + } + return nil, false +} + +// Filter returns a new Fields slice containing only pairs where the predicate returns true. +// Example: +// +// fields := lx.Fields{{"user", "alice"}, {"password", "secret"}, {"age", 30}} +// filtered := fields.Filter(func(key string, value interface{}) bool { +// return key != "password" // Remove sensitive fields +// }) +func (f Fields) Filter(predicate func(key string, value interface{}) bool) Fields { + result := make(Fields, 0, len(f)) + for _, pair := range f { + if predicate(pair.Key, pair.Value) { + result = append(result, pair) + } + } + return result +} + +// Translate returns a new Fields slice with keys translated according to the provided mapping. +// Keys not in the mapping are passed through unchanged. This is useful for adapters like Victoria. +// Example: +// +// fields := lx.Fields{{"user", "alice"}, {"timestamp", time.Now()}} +// translated := fields.Translate(map[string]string{ +// "user": "username", +// "timestamp": "ts", +// }) +// // Returns: {{"username", "alice"}, {"ts", time.Now()}} +func (f Fields) Translate(mapping map[string]string) Fields { + result := make(Fields, len(f)) + for i, pair := range f { + if newKey, ok := mapping[pair.Key]; ok { + result[i] = Field{Key: newKey, Value: pair.Value} + } else { + result[i] = pair + } + } + return result +} + +// Merge merges another Fields slice into this one, with the other slice's fields taking precedence +// for duplicate keys (overwrites existing keys). +// Example: +// +// base := lx.Fields{{"user", "alice"}, {"age", 30}} +// additional := lx.Fields{{"age", 31}, {"city", "NYC"}} +// merged := base.Merge(additional) +// // Returns: {{"user", "alice"}, {"age", 31}, {"city", "NYC"}} +func (f Fields) Merge(other Fields) Fields { + result := make(Fields, 0, len(f)+len(other)) + + // Create a map to track which keys from 'other' we've seen + seen := make(map[string]bool, len(other)) + + // First add all fields from 'f' + result = append(result, f...) + + // Then add fields from 'other', overwriting duplicates + for _, pair := range other { + // Check if this key already exists in result + found := false + for i, existing := range result { + if existing.Key == pair.Key { + result[i] = pair // Overwrite + found = true + break + } + } + if !found { + result = append(result, pair) + } + seen[pair.Key] = true + } + + return result +} + +// String returns a human-readable string representation of the fields. +// Example: +// +// fields := lx.Fields{{"user", "alice"}, {"age", 30}} +// str := fields.String() // Returns: "[user=alice age=30]" +func (f Fields) String() string { + var builder strings.Builder + builder.WriteString(LeftBracket) + for i, pair := range f { + if i > 0 { + builder.WriteString(Space) + } + builder.WriteString(pair.Key) + builder.WriteString("=") + builder.WriteString(fmt.Sprint(pair.Value)) + } + builder.WriteString(RightBracket) + return builder.String() +} diff --git a/vendor/github.com/olekukonko/ll/lx/interface.go b/vendor/github.com/olekukonko/ll/lx/interface.go new file mode 100644 index 00000000..8d1c5339 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lx/interface.go @@ -0,0 +1,67 @@ +package lx + +import "io" + +// Handler defines the interface for processing log entries. +// Implementations (e.g., TextHandler, JSONHandler) format and output log entries to various +// destinations (e.g., stdout, files). The Handle method returns an error if processing fails, +// allowing the logger to handle output failures gracefully. +// Example (simplified handler implementation): +// +// type MyHandler struct{} +// func (h *MyHandler) Handle(e *Entry) error { +// fmt.Printf("[%s] %s: %s\n", e.Namespace, e.Level.String(), e.Message) +// return nil +// } +type Handler interface { + Handle(e *Entry) error // Processes a log entry, returning any error +} + +// Outputter defines the interface for handlers that support dynamic output +// destination changes. Implementations can switch their output writer at runtime. +// +// Example usage: +// +// h := &JSONHandler{} +// h.Output(os.Stderr) // Switch to stderr +// h.Output(file) // Switch to file +type Outputter interface { + Output(w io.Writer) +} + +// HandlerOutputter combines the Handler and Outputter interfaces. +// Types implementing this interface can both process log entries and +// dynamically change their output destination at runtime. +// +// This is useful for creating flexible logging handlers that support +// features like log rotation, output redirection, or runtime configuration. +// +// Example usage: +// +// var ho HandlerOutputter = &TextHandler{} +// // Handle log entries +// ho.Handle(&Entry{...}) +// // Switch output destination +// ho.Output(os.Stderr) +// +// Common implementations include TextHandler and JSONHandler when they +// support output destination changes. +type HandlerOutputter interface { + Handler // Can process log entries + Outputter // Can change output destination (has Output(w io.Writer) method) +} + +// Timestamper defines an interface for handlers that support timestamp configuration. +// It includes a method to enable or disable timestamp logging and optionally set the timestamp format. +type Timestamper interface { + // Timestamped enables or disables timestamp logging and allows specifying an optional format. + // Parameters: + // enable: Boolean to enable or disable timestamp logging + // format: Optional string(s) to specify the timestamp format + Timestamped(enable bool, format ...string) +} + +// Wrap is a handler decorator function that transforms a log handler. +// It takes an existing handler as input and returns a new, wrapped handler +// that adds functionality (like filtering, transformation, or routing). +type Wrap func(next Handler) Handler diff --git a/vendor/github.com/olekukonko/ll/lx/lx.go b/vendor/github.com/olekukonko/ll/lx/lx.go index d370cb2d..f0b8743c 100644 --- a/vendor/github.com/olekukonko/ll/lx/lx.go +++ b/vendor/github.com/olekukonko/ll/lx/lx.go @@ -1,10 +1,5 @@ package lx -import ( - "strings" - "time" -) - // Formatting constants for log output. // These constants define the characters used to format log messages, ensuring consistency // across handlers (e.g., text, JSON, colorized). They are used to construct namespace paths, @@ -16,7 +11,7 @@ const ( Arrow = "→" // Arrow for NestedPath style namespaces (e.g., [parent]→[child]) LeftBracket = "[" // Opening bracket for namespaces and fields (e.g., [app]) RightBracket = "]" // Closing bracket for namespaces and fields (e.g., [app]) - Colon = ":" // Separator after namespace or level (e.g., [app]: INFO:) + Colon = ":" // Separator after namespace or level (e.g., [app]: INFO:) can also be "|" Dot = "." // Separator for namespace paths (e.g., "parent.child") Newline = "\n" // Newline for separating log entries or stack trace lines ) @@ -25,7 +20,7 @@ const ( // It specifies whether logging is enabled by default for new Logger instances in the ll package. // Set to false to prevent logging until explicitly enabled. const ( - DefaultEnabled = false // Default state for new loggers (disabled) + DefaultEnabled = true // Default state for new loggers (disabled) ) // Log level constants, ordered by increasing severity. @@ -36,6 +31,7 @@ const ( LevelInfo // Info level for general operational messages LevelWarn // Warn level for warning conditions LevelError // Error level for error conditions requiring attention + LevelFatal // Fatal level for critical error conditions LevelDebug // None level for logs without a specific severity (e.g., raw output) LevelUnknown // None level for logs without a specific severity (e.g., raw output) ) @@ -45,7 +41,9 @@ const ( DebugString = "DEBUG" InfoString = "INFO" WarnString = "WARN" + WarningString = "WARNING" ErrorString = "ERROR" + FatalString = "FATAL" NoneString = "NONE" UnknownString = "UNKNOWN" @@ -54,6 +52,9 @@ const ( DumpString = "DUMP" SpecialString = "SPECIAL" RawString = "RAW" + InspectString = "INSPECT" + DbgString = "DBG" + TimedString = "TIMED" ) // Log class constants, defining the type of log entry. @@ -65,7 +66,10 @@ const ( ClassDump // Dump entries for hex/ASCII dumps ClassSpecial // Special entries for custom or non-standard logs ClassRaw // Raw entries for unformatted output - ClassUnknown // Raw entries for unformatted output + ClassInspect // Inspect entries for debugging + ClassDbg // Inspect entries for debugging + ClassTimed // Inspect entries for debugging + ClassUnknown // Unknown output ) // Namespace style constants. @@ -75,149 +79,3 @@ const ( FlatPath StyleType = iota // Formats namespaces as [parent/child] NestedPath // Formats namespaces as [parent]→[child] ) - -// LevelType represents the severity of a log message. -// It is an integer type used to define log levels (Debug, Info, Warn, Error, None), with associated -// string representations for display in log output. -type LevelType int - -// String converts a LevelType to its string representation. -// It maps each level constant to a human-readable string, returning "UNKNOWN" for invalid levels. -// Used by handlers to display the log level in output. -// Example: -// -// var level lx.LevelType = lx.LevelInfo -// fmt.Println(level.String()) // Output: INFO -func (l LevelType) String() string { - switch l { - case LevelDebug: - return DebugString - case LevelInfo: - return InfoString - case LevelWarn: - return WarnString - case LevelError: - return ErrorString - case LevelNone: - return NoneString - default: - return UnknownString - } -} - -// LevelParse converts a string to its corresponding LevelType. -// It parses a string (case-insensitive) and returns the corresponding LevelType, defaulting to -// LevelUnknown for unrecognized strings. Supports "WARNING" as an alias for "WARN". -func LevelParse(s string) LevelType { - switch strings.ToUpper(s) { - case DebugString: - return LevelDebug - case InfoString: - return LevelInfo - case WarnString, "WARNING": // Allow both "WARN" and "WARNING" - return LevelWarn - case ErrorString: - return LevelError - case NoneString: - return LevelNone - default: - return LevelUnknown - } -} - -// StyleType defines how namespace paths are formatted in log output. -// It is an integer type used to select between FlatPath ([parent/child]) and NestedPath -// ([parent]→[child]) styles, affecting how handlers render namespace hierarchies. -type StyleType int - -// Entry represents a single log entry passed to handlers. -// It encapsulates all information about a log message, including its timestamp, severity, -// content, namespace, metadata, and formatting style. Handlers process Entry instances -// to produce formatted output (e.g., text, JSON). The struct is immutable once created, -// ensuring thread-safety in handler processing. -type Entry struct { - Timestamp time.Time // Time the log was created - Level LevelType // Severity level of the log (Debug, Info, Warn, Error, None) - Message string // Log message content - Namespace string // Namespace path (e.g., "parent/child") - Fields map[string]interface{} // Additional key-value metadata (e.g., {"user": "alice"}) - Style StyleType // Namespace formatting style (FlatPath or NestedPath) - Error error // Associated error, if any (e.g., for error logs) - Class ClassType // Type of log entry (Text, JSON, Dump, Special, Raw) - Stack []byte // Stack trace data (if present) - Id int `json:"-"` // Unique ID for the entry, ignored in JSON output -} - -// Handler defines the interface for processing log entries. -// Implementations (e.g., TextHandler, JSONHandler) format and output log entries to various -// destinations (e.g., stdout, files). The Handle method returns an error if processing fails, -// allowing the logger to handle output failures gracefully. -// Example (simplified handler implementation): -// -// type MyHandler struct{} -// func (h *MyHandler) Handle(e *Entry) error { -// fmt.Printf("[%s] %s: %s\n", e.Namespace, e.Level.String(), e.Message) -// return nil -// } -type Handler interface { - Handle(e *Entry) error // Processes a log entry, returning any error -} - -// Timestamper defines an interface for handlers that support timestamp configuration. -// It includes a method to enable or disable timestamp logging and optionally set the timestamp format. -type Timestamper interface { - // Timestamped enables or disables timestamp logging and allows specifying an optional format. - // Parameters: - // enable: Boolean to enable or disable timestamp logging - // format: Optional string(s) to specify the timestamp format - Timestamped(enable bool, format ...string) -} - -// ClassType represents the type of a log entry. -// It is an integer type used to categorize log entries (Text, JSON, Dump, Special, Raw), -// influencing how handlers process and format them. -type ClassType int - -// String converts a ClassType to its string representation. -// It maps each class constant to a human-readable string, returning "UNKNOWN" for invalid classes. -// Used by handlers to indicate the entry type in output (e.g., JSON fields). -// Example: -// -// var class lx.ClassType = lx.ClassText -// fmt.Println(class.String()) // Output: TEST -func (t ClassType) String() string { - switch t { - case ClassText: - return TextString - case ClassJSON: - return JSONString - case ClassDump: - return DumpString - case ClassSpecial: - return SpecialString - case ClassRaw: - return RawString - default: - return UnknownString - } -} - -// ParseClass converts a string to its corresponding ClassType. -// It parses a string (case-insensitive) and returns the corresponding ClassType, defaulting to -// ClassUnknown for unrecognized strings. -func ParseClass(s string) ClassType { - switch strings.ToUpper(s) { - case TextString: - return ClassText - case JSONString: - return ClassJSON - case DumpString: - return ClassDump - case SpecialString: - return ClassSpecial - case RawString: - return ClassRaw - default: - return ClassUnknown - } -} diff --git a/vendor/github.com/olekukonko/ll/lx/ns.go b/vendor/github.com/olekukonko/ll/lx/namespace.go similarity index 100% rename from vendor/github.com/olekukonko/ll/lx/ns.go rename to vendor/github.com/olekukonko/ll/lx/namespace.go diff --git a/vendor/github.com/olekukonko/ll/lx/types.go b/vendor/github.com/olekukonko/ll/lx/types.go new file mode 100644 index 00000000..58013fa2 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/lx/types.go @@ -0,0 +1,144 @@ +package lx + +import ( + "strings" + "time" +) + +// LevelType represents the severity of a log message. +// It is an integer type used to define log levels (Debug, Info, Warn, Error, None), with associated +// string representations for display in log output. +type LevelType int + +// String converts a LevelType to its string representation. +// It maps each level constant to a human-readable string, returning "UNKNOWN" for invalid levels. +// Used by handlers to display the log level in output. +// Example: +// +// var level lx.LevelType = lx.LevelInfo +// fmt.Println(level.String()) // Output: INFO +func (l LevelType) String() string { + switch l { + case LevelDebug: + return DebugString + case LevelInfo: + return InfoString + case LevelWarn: + return WarnString + case LevelError: + return ErrorString + case LevelFatal: + return FatalString + case LevelNone: + return NoneString + default: + return UnknownString + } +} + +func (l LevelType) Name(class ClassType) string { + if class == ClassRaw || class == ClassDump || class == ClassInspect || class == ClassDbg || class == ClassTimed { + return class.String() + } + return l.String() +} + +// LevelParse converts a string to its corresponding LevelType. +// It parses a string (case-insensitive) and returns the corresponding LevelType, defaulting to +// LevelUnknown for unrecognized strings. Supports "WARNING" as an alias for "WARN". +func LevelParse(s string) LevelType { + switch strings.ToUpper(s) { + case DebugString: + return LevelDebug + case InfoString: + return LevelInfo + case WarnString, WarningString: // Allow both "WARN" and "WARNING" + return LevelWarn + case ErrorString: + return LevelError + case NoneString: + return LevelNone + default: + return LevelUnknown + } +} + +// Entry represents a single log entry passed to handlers. +// It encapsulates all information about a log message, including its timestamp, severity, +// content, namespace, metadata, and formatting style. Handlers process Entry instances +// to produce formatted output (e.g., text, JSON). The struct is immutable once created, +// ensuring thread-safety in handler processing. +type Entry struct { + Timestamp time.Time // Time the log was created + Level LevelType // Severity level of the log (Debug, Info, Warn, Error, None) + Message string // Log message content + Namespace string // Namespace path (e.g., "parent/child") + Fields Fields // Additional key-value metadata (e.g., {"user": "alice"}) + Style StyleType // Namespace formatting style (FlatPath or NestedPath) + Error error // Associated error, if any (e.g., for error logs) + Class ClassType // Type of log entry (Text, JSON, Dump, Special, Raw) + Stack []byte // Stack trace data (if present) + Id int `json:"-"` // Unique ID for the entry, ignored in JSON output +} + +// StyleType defines how namespace paths are formatted in log output. +// It is an integer type used to select between FlatPath ([parent/child]) and NestedPath +// ([parent]→[child]) styles, affecting how handlers render namespace hierarchies. +type StyleType int + +// ClassType represents the type of a log entry. +// It is an integer type used to categorize log entries (Text, JSON, Dump, Special, Raw), +// influencing how handlers process and format them. +type ClassType int + +// String converts a ClassType to its string representation. +// It maps each class constant to a human-readable string, returning "UNKNOWN" for invalid classes. +// Used by handlers to indicate the entry type in output (e.g., JSON fields). +// Example: +// +// var class lx.ClassType = lx.ClassText +// fmt.Println(class.String()) // Output: TEST +func (t ClassType) String() string { + switch t { + case ClassText: + + return TextString + case ClassJSON: + + return JSONString + case ClassDump: + return DumpString + case ClassSpecial: + return SpecialString + case ClassInspect: + return InspectString + case ClassDbg: + return DbgString + case ClassRaw: + return RawString + case ClassTimed: + return TimedString + default: + return UnknownString + } +} + +// ParseClass converts a string to its corresponding ClassType. +// It parses a string (case-insensitive) and returns the corresponding ClassType, defaulting to +// ClassUnknown for unrecognized strings. +func ParseClass(s string) ClassType { + switch strings.ToUpper(s) { + case TextString: + return ClassText + case JSONString: + return ClassJSON + case DumpString: + return ClassDump + case SpecialString: + return ClassSpecial + case RawString: + return ClassRaw + default: + return ClassUnknown + } +} diff --git a/vendor/github.com/olekukonko/ll/options.go b/vendor/github.com/olekukonko/ll/options.go new file mode 100644 index 00000000..50fa1944 --- /dev/null +++ b/vendor/github.com/olekukonko/ll/options.go @@ -0,0 +1,67 @@ +package ll + +import "github.com/olekukonko/ll/lx" + +// WithHandler sets the handler for the logger as a functional option for configuring +// a new logger instance. +// Example: +// +// logger := New("app", WithHandler(lh.NewJSONHandler(os.Stdout))) +func WithHandler(handler lx.Handler) Option { + return func(l *Logger) { + l.handler = handler + } +} + +// WithTimestamped returns an Option that configures timestamp settings for the logger's existing handler. +// It enables or disables timestamp logging and optionally sets the timestamp format if the handler +// supports the lx.Timestamper interface. If no handler is set, the function has no effect. +// Parameters: +// +// enable: Boolean to enable or disable timestamp logging +// format: Optional string(s) to specify the timestamp format +func WithTimestamped(enable bool, format ...string) Option { + return func(l *Logger) { + if l.handler != nil { // Check if a handler is set + // Verify if the handler supports the lx.Timestamper interface + if h, ok := l.handler.(lx.Timestamper); ok { + h.Timestamped(enable, format...) // Apply timestamp settings to the handler + } + } + } +} + +// WithLevel sets the minimum log level for the logger as a functional option for +// configuring a new logger instance. +// Example: +// +// logger := New("app", WithLevel(lx.LevelWarn)) +func WithLevel(level lx.LevelType) Option { + return func(l *Logger) { + l.level = level + } +} + +// WithStyle sets the namespace formatting style for the logger as a functional option +// for configuring a new logger instance. +// Example: +// +// logger := New("app", WithStyle(lx.NestedPath)) +func WithStyle(style lx.StyleType) Option { + return func(l *Logger) { + l.style = style + } +} + +// Functional options (can be passed to New() or applied later) +func WithFatalExits(enabled bool) Option { + return func(l *Logger) { + l.fatalExits = enabled + } +} + +func WithFatalStack(enabled bool) Option { + return func(l *Logger) { + l.fatalStack = enabled + } +} diff --git a/vendor/github.com/olekukonko/ll/since.go b/vendor/github.com/olekukonko/ll/since.go new file mode 100644 index 00000000..22be568a --- /dev/null +++ b/vendor/github.com/olekukonko/ll/since.go @@ -0,0 +1,388 @@ +package ll + +import ( + "fmt" + "strings" + "time" + + "github.com/olekukonko/ll/lx" +) + +// Measure executes one or more functions and logs the duration of each. +// It returns the total cumulative duration across all functions. +// +// Each function in `fns` is run sequentially. If a function is `nil`, it is skipped. +// +// Optional labels previously set via `Labels(...)` are applied to the corresponding function +// by position. If there are fewer labels than functions, missing labels are replaced with +// default names like "fn_0", "fn_1", etc. Labels are cleared after the call to prevent reuse. +// +// Example usage: +// +// logger := New("app").Enable() +// +// // Optional: add labels for functions +// logger.Labels("load_users", "process_orders") +// +// total := logger.Measure( +// func() { +// // simulate work 1 +// time.Sleep(100 * time.Millisecond) +// }, +// func() { +// // simulate work 2 +// time.Sleep(200 * time.Millisecond) +// }, +// func() { +// // simulate work 3 +// time.Sleep(50 * time.Millisecond) +// }, +// ) +// +// // Logs something like: +// // [load_users] completed duration=100ms +// // [process_orders] completed duration=200ms +// // [fn_2] completed duration=50ms +// +// Returns the sum of durations of all executed functions. +func (l *Logger) Measure(fns ...func()) time.Duration { + if len(fns) == 0 { + return 0 + } + + var total time.Duration + lblPtr := l.labels.Swap(nil) + var lbls []string + if lblPtr != nil { + lbls = *lblPtr + } + + for i, fn := range fns { + if fn == nil { + continue + } + // Use SinceBuilder instead of manual timing + sb := l.Since() // starts timer internally + fn() + duration := sb.Fields( + "index", i, + ).Info(fmt.Sprintf("[%s] completed", func() string { + if i < len(lbls) && lbls[i] != "" { + return lbls[i] + } + return fmt.Sprintf("fn_%d", i) + }())) + + total += duration + } + + return total +} + +// Since creates a timer that will log the duration when completed +// If startTime is provided, uses that as the start time; otherwise uses time.Now() +// +// defer logger.Since().Info("request") // Auto-start +// logger.Since(start).Info("request") // Manual timing +// logger.Since().If(debug).Debug("timing") // Conditional +func (l *Logger) Since(startTime ...time.Time) *SinceBuilder { + start := time.Now() + if len(startTime) > 0 && !startTime[0].IsZero() { + start = startTime[0] + } + + return &SinceBuilder{ + logger: l, + start: start, + condition: true, + fields: nil, // Lazily initialized + } +} + +// SinceBuilder provides a fluent API for logging timed operations +// It mirrors FieldBuilder exactly for field operations +type SinceBuilder struct { + logger *Logger + start time.Time + condition bool + fields lx.Fields +} + +// --------------------------------------------------------------------- +// Conditional Methods (match conditional.go pattern) +// --------------------------------------------------------------------- + +// If adds a condition to this timer - only logs if condition is true +func (sb *SinceBuilder) If(condition bool) *SinceBuilder { + sb.condition = sb.condition && condition + return sb +} + +// IfErr adds an error condition - only logs if err != nil +func (sb *SinceBuilder) IfErr(err error) *SinceBuilder { + sb.condition = sb.condition && (err != nil) + return sb +} + +// IfAny logs if ANY condition is true +func (sb *SinceBuilder) IfAny(conditions ...bool) *SinceBuilder { + if !sb.condition { + return sb + } + + for _, cond := range conditions { + if cond { + return sb + } + } + sb.condition = false + return sb +} + +// IfOne logs if ALL conditions are true +func (sb *SinceBuilder) IfOne(conditions ...bool) *SinceBuilder { + if !sb.condition { + return sb + } + + for _, cond := range conditions { + if !cond { + sb.condition = false + return sb + } + } + return sb +} + +// --------------------------------------------------------------------- +// Field Methods - EXACT MATCH with FieldBuilder API +// --------------------------------------------------------------------- + +// Fields adds key-value pairs as fields (variadic) +// EXACT match to FieldBuilder.Fields() +func (sb *SinceBuilder) Fields(pairs ...any) *SinceBuilder { + if sb.logger.suspend.Load() || !sb.condition { + return sb + } + + // Lazy initialization + if sb.fields == nil { + sb.fields = make(lx.Fields, 0, len(pairs)/2) + } + + // Process key-value pairs + for i := 0; i < len(pairs)-1; i += 2 { + if key, ok := pairs[i].(string); ok { + sb.fields = append(sb.fields, lx.Field{Key: key, Value: pairs[i+1]}) + } else { + // Log error for non-string keys (matches Fields behavior) + sb.fields = append(sb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("missing key '%v'", pairs[i]), + }) + } + } + + // Handle uneven pairs (matches Fields behavior) + if len(pairs)%2 != 0 { + sb.fields = append(sb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("missing key '%v'", pairs[len(pairs)-1]), + }) + } + + return sb +} + +// Field adds fields from a map +// EXACT match to FieldBuilder.Field() +func (sb *SinceBuilder) Field(fields map[string]interface{}) *SinceBuilder { + if sb.logger.suspend.Load() || !sb.condition || len(fields) == 0 { + return sb + } + + // Lazy initialization + if sb.fields == nil { + sb.fields = make(lx.Fields, 0, len(fields)) + } + + // Copy fields from input map (preserves iteration order) + for k, v := range fields { + sb.fields = append(sb.fields, lx.Field{Key: k, Value: v}) + } + + return sb +} + +// Err adds one or more errors as a field +// EXACT match to FieldBuilder.Err() +func (sb *SinceBuilder) Err(errs ...error) *SinceBuilder { + if sb.logger.suspend.Load() || !sb.condition { + return sb + } + + // Lazy initialization + if sb.fields == nil { + sb.fields = make(lx.Fields, 0, 2) + } + + // Collect non-nil errors + var nonNilErrors []error + var builder strings.Builder + count := 0 + + for i, err := range errs { + if err != nil { + if i > 0 && count > 0 { + builder.WriteString("; ") + } + builder.WriteString(err.Error()) + nonNilErrors = append(nonNilErrors, err) + count++ + } + } + + if count > 0 { + if count == 1 { + sb.fields = append(sb.fields, lx.Field{Key: "error", Value: nonNilErrors[0]}) + } else { + sb.fields = append(sb.fields, lx.Field{Key: "error", Value: nonNilErrors}) + } + // Note: Unlike FieldBuilder.Err(), we DON'T log immediately + // The error will be included in the timing log + } + + return sb +} + +// Merge adds additional key-value pairs to the fields +// EXACT match to FieldBuilder.Merge() +func (sb *SinceBuilder) Merge(pairs ...any) *SinceBuilder { + if sb.logger.suspend.Load() || !sb.condition { + return sb + } + + // Lazy initialization + if sb.fields == nil { + sb.fields = make(lx.Fields, 0, len(pairs)/2) + } + + // Process pairs as key-value + for i := 0; i < len(pairs)-1; i += 2 { + if key, ok := pairs[i].(string); ok { + sb.fields = append(sb.fields, lx.Field{Key: key, Value: pairs[i+1]}) + } else { + sb.fields = append(sb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("non-string key in Merge: %v", pairs[i]), + }) + } + } + + if len(pairs)%2 != 0 { + sb.fields = append(sb.fields, lx.Field{ + Key: "error", + Value: fmt.Errorf("uneven key-value pairs in Merge: [%v]", pairs[len(pairs)-1]), + }) + } + + return sb +} + +// --------------------------------------------------------------------- +// Logging Methods (match logger pattern) +// --------------------------------------------------------------------- + +// Debug logs the duration at Debug level with message +func (sb *SinceBuilder) Debug(msg string) time.Duration { + return sb.logAtLevel(lx.LevelDebug, msg) +} + +// Info logs the duration at Info level with message +func (sb *SinceBuilder) Info(msg string) time.Duration { + return sb.logAtLevel(lx.LevelInfo, msg) +} + +// Warn logs the duration at Warn level with message +func (sb *SinceBuilder) Warn(msg string) time.Duration { + return sb.logAtLevel(lx.LevelWarn, msg) +} + +// Error logs the duration at Error level with message +func (sb *SinceBuilder) Error(msg string) time.Duration { + return sb.logAtLevel(lx.LevelError, msg) +} + +// Log is an alias for Info (for backward compatibility) +func (sb *SinceBuilder) Log(msg string) time.Duration { + return sb.Info(msg) +} + +// logAtLevel internal method that handles the actual logging +func (sb *SinceBuilder) logAtLevel(level lx.LevelType, msg string) time.Duration { + // Fast path - don't even compute duration if we're not logging + if !sb.condition || sb.logger.suspend.Load() || !sb.logger.shouldLog(level) { + return time.Since(sb.start) + } + + duration := time.Since(sb.start) + + // Build final fields in this order: + // 1. Logger context fields (from logger.context) + // 2. Builder fields (from sb.fields) + // 3. Duration fields (always last) + + // Pre-allocate with exact capacity + totalFields := 0 + if sb.logger.context != nil { + totalFields += len(sb.logger.context) + } + if sb.fields != nil { + totalFields += len(sb.fields) + } + totalFields += 2 // duration_ms, duration + + fields := make(lx.Fields, 0, totalFields) + + // Add logger context fields first (preserves order) + if sb.logger.context != nil { + fields = append(fields, sb.logger.context...) + } + + // Add builder fields + if sb.fields != nil { + fields = append(fields, sb.fields...) + } + + // Add duration fields last (so they're visible at the end) + fields = append(fields, + lx.Field{Key: "duration_ms", Value: duration.Milliseconds()}, + lx.Field{Key: "duration", Value: duration.String()}, + ) + + sb.logger.log(level, lx.ClassTimed, msg, fields, false) + return duration +} + +// --------------------------------------------------------------------- +// Utility Methods +// --------------------------------------------------------------------- + +// Reset allows reusing the builder with a new start time +// Zero-allocation - keeps fields slice capacity +func (sb *SinceBuilder) Reset(startTime ...time.Time) *SinceBuilder { + sb.start = time.Now() + if len(startTime) > 0 && !startTime[0].IsZero() { + sb.start = startTime[0] + } + sb.condition = true + if sb.fields != nil { + sb.fields = sb.fields[:0] // Keep capacity, zero length + } + return sb +} + +// Elapsed returns the current duration without logging +func (sb *SinceBuilder) Elapsed() time.Duration { + return time.Since(sb.start) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index d2c8241b..adadd554 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,6 +4,9 @@ github.com/Masterminds/semver/v3 # github.com/cenkalti/backoff/v4 v4.2.1 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 +# github.com/cespare/xxhash/v2 v2.3.0 +## explicit; go 1.11 +github.com/cespare/xxhash/v2 # github.com/clipperhouse/displaywidth v0.6.1 ## explicit; go 1.18 github.com/clipperhouse/displaywidth @@ -70,7 +73,7 @@ github.com/olekukonko/cat # github.com/olekukonko/errors v1.1.0 ## explicit; go 1.21 github.com/olekukonko/errors -# github.com/olekukonko/ll v0.1.3 +# github.com/olekukonko/ll v0.1.6 ## explicit; go 1.21 github.com/olekukonko/ll github.com/olekukonko/ll/lh