chore(deps): update module github.com/rs/zerolog to v1.33.0
Some checks failed
Dev Version / Release (push) Has been cancelled

This commit is contained in:
Renovate Bot 2024-09-14 01:21:06 +00:00
parent ea786500b5
commit 2cfe1d1831
22 changed files with 515 additions and 137 deletions

2
go.mod
View file

@ -5,7 +5,7 @@ go 1.21
require (
github.com/aws/aws-sdk-go v1.45.25
github.com/pkg/sftp v1.13.6
github.com/rs/zerolog v1.31.0
github.com/rs/zerolog v1.33.0
github.com/stretchr/testify v1.8.4
github.com/urfave/cli/v2 v2.25.7
golang.org/x/crypto v0.14.0

2
go.sum
View file

@ -26,6 +26,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A=
github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=

View file

@ -60,7 +60,7 @@ func main() {
// Output: {"time":1516134303,"level":"debug","message":"hello world"}
```
> Note: By default log writes to `os.Stderr`
> Note: The default log level for `log.Print` is *debug*
> Note: The default log level for `log.Print` is *trace*
### Contextual Logging
@ -412,15 +412,7 @@ Equivalent of `Lshortfile`:
```go
zerolog.CallerMarshalFunc = func(pc uintptr, file string, line int) string {
short := file
for i := len(file) - 1; i > 0; i-- {
if file[i] == '/' {
short = file[i+1:]
break
}
}
file = short
return file + ":" + strconv.Itoa(line)
return filepath.Base(file) + ":" + strconv.Itoa(line)
}
log.Logger = log.With().Caller().Logger()
log.Info().Msg("hello world")
@ -547,7 +539,7 @@ and facilitates the unification of logging and tracing in some systems:
type TracingHook struct{}
func (h TracingHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
ctx := e.Ctx()
ctx := e.GetCtx()
spanId := getSpanIdFromContext(ctx) // as per your tracing framework
e.Str("span-id", spanId)
}
@ -646,10 +638,14 @@ Some settings can be changed and will be applied to all loggers:
* `zerolog.LevelFieldName`: Can be set to customize level field name.
* `zerolog.MessageFieldName`: Can be set to customize message field name.
* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name.
* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp.
* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formatted as UNIX timestamp.
* `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`).
* `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`).
* `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking.
* `zerolog.FloatingPointPrecision`: If set to a value other than -1, controls the number
of digits when formatting float numbers in JSON. See
[strconv.FormatFloat](https://pkg.go.dev/strconv#FormatFloat)
for more details.
## Field Types

View file

@ -183,13 +183,13 @@ func (a *Array) Uint64(i uint64) *Array {
// Float32 appends f as a float32 to the array.
func (a *Array) Float32(f float32) *Array {
a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f)
a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f, FloatingPointPrecision)
return a
}
// Float64 appends f as a float64 to the array.
func (a *Array) Float64(f float64) *Array {
a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f)
a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f, FloatingPointPrecision)
return a
}
@ -201,7 +201,7 @@ func (a *Array) Time(t time.Time) *Array {
// Dur appends d to the array.
func (a *Array) Dur(d time.Duration) *Array {
a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger)
a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
return a
}

View file

@ -28,6 +28,8 @@ const (
colorBold = 1
colorDarkGray = 90
unknownLevel = "???"
)
var (
@ -57,12 +59,21 @@ type ConsoleWriter struct {
// TimeFormat specifies the format for timestamp in output.
TimeFormat string
// TimeLocation tells ConsoleWriters default FormatTimestamp
// how to localize the time.
TimeLocation *time.Location
// PartsOrder defines the order of parts in output.
PartsOrder []string
// PartsExclude defines parts to not display in output.
PartsExclude []string
// FieldsOrder defines the order of contextual fields in output.
FieldsOrder []string
fieldIsOrdered map[string]int
// FieldsExclude defines contextual fields to not display in output.
FieldsExclude []string
@ -76,14 +87,16 @@ type ConsoleWriter struct {
FormatErrFieldValue Formatter
FormatExtra func(map[string]interface{}, *bytes.Buffer) error
FormatPrepare func(map[string]interface{}) error
}
// NewConsoleWriter creates and initializes a new ConsoleWriter.
func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter {
w := ConsoleWriter{
Out: os.Stdout,
TimeFormat: consoleDefaultTimeFormat,
PartsOrder: consoleDefaultPartsOrder(),
Out: os.Stdout,
TimeFormat: consoleDefaultTimeFormat,
PartsOrder: consoleDefaultPartsOrder(),
}
for _, opt := range options {
@ -124,6 +137,13 @@ func (w ConsoleWriter) Write(p []byte) (n int, err error) {
return n, fmt.Errorf("cannot decode event: %s", err)
}
if w.FormatPrepare != nil {
err = w.FormatPrepare(evt)
if err != nil {
return n, err
}
}
for _, p := range w.PartsOrder {
w.writePart(buf, evt, p)
}
@ -146,6 +166,15 @@ func (w ConsoleWriter) Write(p []byte) (n int, err error) {
return len(p), err
}
// Call the underlying writer's Close method if it is an io.Closer. Otherwise
// does nothing.
func (w ConsoleWriter) Close() error {
if closer, ok := w.Out.(io.Closer); ok {
return closer.Close()
}
return nil
}
// writeFields appends formatted key-value pairs to buf.
func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) {
var fields = make([]string, 0, len(evt))
@ -167,7 +196,12 @@ func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer
}
fields = append(fields, field)
}
sort.Strings(fields)
if len(w.FieldsOrder) > 0 {
w.orderFields(fields)
} else {
sort.Strings(fields)
}
// Write space only if something has already been written to the buffer, and if there are fields.
if buf.Len() > 0 && len(fields) > 0 {
@ -266,13 +300,13 @@ func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{},
}
case TimestampFieldName:
if w.FormatTimestamp == nil {
f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor)
f = consoleDefaultFormatTimestamp(w.TimeFormat, w.TimeLocation, w.NoColor)
} else {
f = w.FormatTimestamp
}
case MessageFieldName:
if w.FormatMessage == nil {
f = consoleDefaultFormatMessage
f = consoleDefaultFormatMessage(w.NoColor, evt[LevelFieldName])
} else {
f = w.FormatMessage
}
@ -300,6 +334,32 @@ func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{},
}
}
// orderFields takes an array of field names and an array representing field order
// and returns an array with any ordered fields at the beginning, in order,
// and the remaining fields after in their original order.
func (w ConsoleWriter) orderFields(fields []string) {
if w.fieldIsOrdered == nil {
w.fieldIsOrdered = make(map[string]int)
for i, fieldName := range w.FieldsOrder {
w.fieldIsOrdered[fieldName] = i
}
}
sort.Slice(fields, func(i, j int) bool {
ii, iOrdered := w.fieldIsOrdered[fields[i]]
jj, jOrdered := w.fieldIsOrdered[fields[j]]
if iOrdered && jOrdered {
return ii < jj
}
if iOrdered {
return true
}
if jOrdered {
return false
}
return fields[i] < fields[j]
})
}
// needsQuote returns true when the string s should be quoted in output.
func needsQuote(s string) bool {
for i := range s {
@ -310,10 +370,10 @@ func needsQuote(s string) bool {
return false
}
// colorize returns the string s wrapped in ANSI code c, unless disabled is true.
// colorize returns the string s wrapped in ANSI code c, unless disabled is true or c is 0.
func colorize(s interface{}, c int, disabled bool) string {
e := os.Getenv("NO_COLOR")
if e != "" {
if e != "" || c == 0 {
disabled = true
}
@ -334,19 +394,23 @@ func consoleDefaultPartsOrder() []string {
}
}
func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter {
func consoleDefaultFormatTimestamp(timeFormat string, location *time.Location, noColor bool) Formatter {
if timeFormat == "" {
timeFormat = consoleDefaultTimeFormat
}
if location == nil {
location = time.Local
}
return func(i interface{}) string {
t := "<nil>"
switch tt := i.(type) {
case string:
ts, err := time.ParseInLocation(TimeFieldFormat, tt, time.Local)
ts, err := time.ParseInLocation(TimeFieldFormat, tt, location)
if err != nil {
t = tt
} else {
t = ts.Local().Format(timeFormat)
t = ts.In(location).Format(timeFormat)
}
case json.Number:
i, err := tt.Int64()
@ -367,43 +431,37 @@ func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter {
}
ts := time.Unix(sec, nsec)
t = ts.Format(timeFormat)
t = ts.In(location).Format(timeFormat)
}
}
return colorize(t, colorDarkGray, noColor)
}
}
func stripLevel(ll string) string {
if len(ll) == 0 {
return unknownLevel
}
if len(ll) > 3 {
ll = ll[:3]
}
return strings.ToUpper(ll)
}
func consoleDefaultFormatLevel(noColor bool) Formatter {
return func(i interface{}) string {
var l string
if ll, ok := i.(string); ok {
switch ll {
case LevelTraceValue:
l = colorize("TRC", colorMagenta, noColor)
case LevelDebugValue:
l = colorize("DBG", colorYellow, noColor)
case LevelInfoValue:
l = colorize("INF", colorGreen, noColor)
case LevelWarnValue:
l = colorize("WRN", colorRed, noColor)
case LevelErrorValue:
l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor)
case LevelFatalValue:
l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor)
case LevelPanicValue:
l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor)
default:
l = colorize(ll, colorBold, noColor)
}
} else {
if i == nil {
l = colorize("???", colorBold, noColor)
} else {
l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3]
level, _ := ParseLevel(ll)
fl, ok := FormattedLevels[level]
if ok {
return colorize(fl, LevelColors[level], noColor)
}
return stripLevel(ll)
}
return l
if i == nil {
return unknownLevel
}
return stripLevel(fmt.Sprintf("%s", i))
}
}
@ -425,11 +483,18 @@ func consoleDefaultFormatCaller(noColor bool) Formatter {
}
}
func consoleDefaultFormatMessage(i interface{}) string {
if i == nil {
return ""
func consoleDefaultFormatMessage(noColor bool, level interface{}) Formatter {
return func(i interface{}) string {
if i == nil || i == "" {
return ""
}
switch level {
case LevelInfoValue, LevelWarnValue, LevelErrorValue, LevelFatalValue, LevelPanicValue:
return colorize(fmt.Sprintf("%s", i), colorBold, noColor)
default:
return fmt.Sprintf("%s", i)
}
}
return fmt.Sprintf("%s", i)
}
func consoleDefaultFormatFieldName(noColor bool) Formatter {
@ -450,6 +515,6 @@ func consoleDefaultFormatErrFieldName(noColor bool) Formatter {
func consoleDefaultFormatErrFieldValue(noColor bool) Formatter {
return func(i interface{}) string {
return colorize(fmt.Sprintf("%s", i), colorRed, noColor)
return colorize(colorize(fmt.Sprintf("%s", i), colorBold, noColor), colorRed, noColor)
}
}

View file

@ -3,7 +3,7 @@ package zerolog
import (
"context"
"fmt"
"io/ioutil"
"io"
"math"
"net"
"time"
@ -23,7 +23,7 @@ func (c Context) Logger() Logger {
// Only map[string]interface{} and []interface{} are accepted. []interface{} must
// alternate string keys and arbitrary values, and extraneous ones are ignored.
func (c Context) Fields(fields interface{}) Context {
c.l.context = appendFields(c.l.context, fields)
c.l.context = appendFields(c.l.context, fields, c.l.stack)
return c
}
@ -57,7 +57,7 @@ func (c Context) Array(key string, arr LogArrayMarshaler) Context {
// Object marshals an object that implement the LogObjectMarshaler interface.
func (c Context) Object(key string, obj LogObjectMarshaler) Context {
e := newEvent(LevelWriterAdapter{ioutil.Discard}, 0)
e := newEvent(LevelWriterAdapter{io.Discard}, 0)
e.Object(key, obj)
c.l.context = enc.AppendObjectData(c.l.context, e.buf)
putEvent(e)
@ -66,7 +66,7 @@ func (c Context) Object(key string, obj LogObjectMarshaler) Context {
// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface.
func (c Context) EmbedObject(obj LogObjectMarshaler) Context {
e := newEvent(LevelWriterAdapter{ioutil.Discard}, 0)
e := newEvent(LevelWriterAdapter{io.Discard}, 0)
e.EmbedObject(obj)
c.l.context = enc.AppendObjectData(c.l.context, e.buf)
putEvent(e)
@ -163,6 +163,22 @@ func (c Context) Errs(key string, errs []error) Context {
// Err adds the field "error" with serialized err to the logger context.
func (c Context) Err(err error) Context {
if c.l.stack && ErrorStackMarshaler != nil {
switch m := ErrorStackMarshaler(err).(type) {
case nil:
case LogObjectMarshaler:
c = c.Object(ErrorStackFieldName, m)
case error:
if m != nil && !isNilValue(m) {
c = c.Str(ErrorStackFieldName, m.Error())
}
case string:
c = c.Str(ErrorStackFieldName, m)
default:
c = c.Interface(ErrorStackFieldName, m)
}
}
return c.AnErr(ErrorFieldName, err)
}
@ -309,25 +325,25 @@ func (c Context) Uints64(key string, i []uint64) Context {
// Float32 adds the field key with f as a float32 to the logger context.
func (c Context) Float32(key string, f float32) Context {
c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f)
c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
return c
}
// Floats32 adds the field key with f as a []float32 to the logger context.
func (c Context) Floats32(key string, f []float32) Context {
c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f)
c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
return c
}
// Float64 adds the field key with f as a float64 to the logger context.
func (c Context) Float64(key string, f float64) Context {
c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f)
c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
return c
}
// Floats64 adds the field key with f as a []float64 to the logger context.
func (c Context) Floats64(key string, f []float64) Context {
c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f)
c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
return c
}
@ -349,13 +365,13 @@ func (c Context) Timestamp() Context {
return c
}
// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
// Time adds the field key with t formatted as string using zerolog.TimeFieldFormat.
func (c Context) Time(key string, t time.Time) Context {
c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
return c
}
// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
// Times adds the field key with t formatted as string using zerolog.TimeFieldFormat.
func (c Context) Times(key string, t []time.Time) Context {
c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
return c
@ -363,27 +379,42 @@ func (c Context) Times(key string, t []time.Time) Context {
// Dur adds the fields key with d divided by unit and stored as a float.
func (c Context) Dur(key string, d time.Duration) Context {
c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
return c
}
// Durs adds the fields key with d divided by unit and stored as a float.
func (c Context) Durs(key string, d []time.Duration) Context {
c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
return c
}
// Interface adds the field key with obj marshaled using reflection.
func (c Context) Interface(key string, i interface{}) Context {
if obj, ok := i.(LogObjectMarshaler); ok {
return c.Object(key, obj)
}
c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i)
return c
}
// Type adds the field key with val's type using reflection.
func (c Context) Type(key string, val interface{}) Context {
c.l.context = enc.AppendType(enc.AppendKey(c.l.context, key), val)
return c
}
// Any is a wrapper around Context.Interface.
func (c Context) Any(key string, i interface{}) Context {
return c.Interface(key, i)
}
// Reset removes all the context fields.
func (c Context) Reset() Context {
c.l.context = enc.AppendBeginMarker(make([]byte, 0, 500))
return c
}
type callerHook struct {
callerSkipFrameCount int
}

View file

@ -13,13 +13,13 @@ type encoder interface {
AppendBool(dst []byte, val bool) []byte
AppendBools(dst []byte, vals []bool) []byte
AppendBytes(dst, s []byte) []byte
AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte
AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte
AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool, precision int) []byte
AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool, precision int) []byte
AppendEndMarker(dst []byte) []byte
AppendFloat32(dst []byte, val float32) []byte
AppendFloat64(dst []byte, val float64) []byte
AppendFloats32(dst []byte, vals []float32) []byte
AppendFloats64(dst []byte, vals []float64) []byte
AppendFloat32(dst []byte, val float32, precision int) []byte
AppendFloat64(dst []byte, val float64, precision int) []byte
AppendFloats32(dst []byte, vals []float32, precision int) []byte
AppendFloats64(dst []byte, vals []float64, precision int) []byte
AppendHex(dst, s []byte) []byte
AppendIPAddr(dst []byte, ip net.IP) []byte
AppendIPPrefix(dst []byte, pfx net.IPNet) []byte

View file

@ -164,7 +164,7 @@ func (e *Event) Fields(fields interface{}) *Event {
if e == nil {
return e
}
e.buf = appendFields(e.buf, fields)
e.buf = appendFields(e.buf, fields, e.stack)
return e
}
@ -644,7 +644,7 @@ func (e *Event) Float32(key string, f float32) *Event {
if e == nil {
return e
}
e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f)
e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
return e
}
@ -653,7 +653,7 @@ func (e *Event) Floats32(key string, f []float32) *Event {
if e == nil {
return e
}
e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f)
e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
return e
}
@ -662,7 +662,7 @@ func (e *Event) Float64(key string, f float64) *Event {
if e == nil {
return e
}
e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f)
e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
return e
}
@ -671,7 +671,7 @@ func (e *Event) Floats64(key string, f []float64) *Event {
if e == nil {
return e
}
e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f)
e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
return e
}
@ -713,7 +713,7 @@ func (e *Event) Dur(key string, d time.Duration) *Event {
if e == nil {
return e
}
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
return e
}
@ -724,7 +724,7 @@ func (e *Event) Durs(key string, d []time.Duration) *Event {
if e == nil {
return e
}
e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
return e
}
@ -739,7 +739,7 @@ func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event {
if t.After(start) {
d = t.Sub(start)
}
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
return e
}

7
vendor/github.com/rs/zerolog/example.jsonl generated vendored Normal file
View file

@ -0,0 +1,7 @@
{"time":"5:41PM","level":"info","message":"Starting listener","listen":":8080","pid":37556}
{"time":"5:41PM","level":"debug","message":"Access","database":"myapp","host":"localhost:4962","pid":37556}
{"time":"5:41PM","level":"info","message":"Access","method":"GET","path":"/users","pid":37556,"resp_time":23}
{"time":"5:41PM","level":"info","message":"Access","method":"POST","path":"/posts","pid":37556,"resp_time":532}
{"time":"5:41PM","level":"warn","message":"Slow request","method":"POST","path":"/posts","pid":37556,"resp_time":532}
{"time":"5:41PM","level":"info","message":"Access","method":"GET","path":"/users","pid":37556,"resp_time":10}
{"time":"5:41PM","level":"error","message":"Database connection lost","database":"myapp","pid":37556,"error":"connection reset by peer"}

View file

@ -12,13 +12,13 @@ func isNilValue(i interface{}) bool {
return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0
}
func appendFields(dst []byte, fields interface{}) []byte {
func appendFields(dst []byte, fields interface{}, stack bool) []byte {
switch fields := fields.(type) {
case []interface{}:
if n := len(fields); n&0x1 == 1 { // odd number
fields = fields[:n-1]
}
dst = appendFieldList(dst, fields)
dst = appendFieldList(dst, fields, stack)
case map[string]interface{}:
keys := make([]string, 0, len(fields))
for key := range fields {
@ -28,13 +28,13 @@ func appendFields(dst []byte, fields interface{}) []byte {
kv := make([]interface{}, 2)
for _, key := range keys {
kv[0], kv[1] = key, fields[key]
dst = appendFieldList(dst, kv)
dst = appendFieldList(dst, kv, stack)
}
}
return dst
}
func appendFieldList(dst []byte, kvList []interface{}) []byte {
func appendFieldList(dst []byte, kvList []interface{}, stack bool) []byte {
for i, n := 0, len(kvList); i < n; i += 2 {
key, val := kvList[i], kvList[i+1]
if key, ok := key.(string); ok {
@ -74,6 +74,21 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte {
default:
dst = enc.AppendInterface(dst, m)
}
if stack && ErrorStackMarshaler != nil {
dst = enc.AppendKey(dst, ErrorStackFieldName)
switch m := ErrorStackMarshaler(val).(type) {
case nil:
case error:
if m != nil && !isNilValue(m) {
dst = enc.AppendString(dst, m.Error())
}
case string:
dst = enc.AppendString(dst, m)
default:
dst = enc.AppendInterface(dst, m)
}
}
case []error:
dst = enc.AppendArrayStart(dst)
for i, err := range val {
@ -124,13 +139,13 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte {
case uint64:
dst = enc.AppendUint64(dst, val)
case float32:
dst = enc.AppendFloat32(dst, val)
dst = enc.AppendFloat32(dst, val, FloatingPointPrecision)
case float64:
dst = enc.AppendFloat64(dst, val)
dst = enc.AppendFloat64(dst, val, FloatingPointPrecision)
case time.Time:
dst = enc.AppendTime(dst, val, TimeFieldFormat)
case time.Duration:
dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger)
dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
case *string:
if val != nil {
dst = enc.AppendString(dst, *val)
@ -205,13 +220,13 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte {
}
case *float32:
if val != nil {
dst = enc.AppendFloat32(dst, *val)
dst = enc.AppendFloat32(dst, *val, FloatingPointPrecision)
} else {
dst = enc.AppendNil(dst)
}
case *float64:
if val != nil {
dst = enc.AppendFloat64(dst, *val)
dst = enc.AppendFloat64(dst, *val, FloatingPointPrecision)
} else {
dst = enc.AppendNil(dst)
}
@ -223,7 +238,7 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte {
}
case *time.Duration:
if val != nil {
dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger)
dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
} else {
dst = enc.AppendNil(dst)
}
@ -252,13 +267,13 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte {
case []uint64:
dst = enc.AppendUints64(dst, val)
case []float32:
dst = enc.AppendFloats32(dst, val)
dst = enc.AppendFloats32(dst, val, FloatingPointPrecision)
case []float64:
dst = enc.AppendFloats64(dst, val)
dst = enc.AppendFloats64(dst, val, FloatingPointPrecision)
case []time.Time:
dst = enc.AppendTimes(dst, val, TimeFieldFormat)
case []time.Duration:
dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger)
dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
case nil:
dst = enc.AppendNil(dst)
case net.IP:

View file

@ -1,6 +1,7 @@
package zerolog
import (
"bytes"
"encoding/json"
"strconv"
"sync/atomic"
@ -81,8 +82,22 @@ var (
}
// InterfaceMarshalFunc allows customization of interface marshaling.
// Default: "encoding/json.Marshal"
InterfaceMarshalFunc = json.Marshal
// Default: "encoding/json.Marshal" with disabled HTML escaping
InterfaceMarshalFunc = func(v interface{}) ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
encoder.SetEscapeHTML(false)
err := encoder.Encode(v)
if err != nil {
return nil, err
}
b := buf.Bytes()
if len(b) > 0 {
// Remove trailing \n which is added by Encode.
return b[:len(b)-1], nil
}
return b, nil
}
// TimeFieldFormat defines the time format of the Time field type. If set to
// TimeFormatUnix, TimeFormatUnixMs, TimeFormatUnixMicro or TimeFormatUnixNano, the time is formatted as a UNIX
@ -108,6 +123,39 @@ var (
// DefaultContextLogger is returned from Ctx() if there is no logger associated
// with the context.
DefaultContextLogger *Logger
// LevelColors are used by ConsoleWriter's consoleDefaultFormatLevel to color
// log levels.
LevelColors = map[Level]int{
TraceLevel: colorBlue,
DebugLevel: 0,
InfoLevel: colorGreen,
WarnLevel: colorYellow,
ErrorLevel: colorRed,
FatalLevel: colorRed,
PanicLevel: colorRed,
}
// FormattedLevels are used by ConsoleWriter's consoleDefaultFormatLevel
// for a short level name.
FormattedLevels = map[Level]string{
TraceLevel: "TRC",
DebugLevel: "DBG",
InfoLevel: "INF",
WarnLevel: "WRN",
ErrorLevel: "ERR",
FatalLevel: "FTL",
PanicLevel: "PNC",
}
// TriggerLevelWriterBufferReuseLimit is a limit in bytes that a buffer is dropped
// from the TriggerLevelWriter buffer pool if the buffer grows above the limit.
TriggerLevelWriterBufferReuseLimit = 64 * 1024
// FloatingPointPrecision, if set to a value other than -1, controls the number
// of digits when formatting float numbers in JSON. See strconv.FormatFloat for
// more details.
FloatingPointPrecision = -1
)
var (

View file

@ -95,7 +95,7 @@ func decodeFloat(src *bufio.Reader) (float64, int) {
switch minor {
case additionalTypeFloat16:
panic(fmt.Errorf("float16 is not suppported in decodeFloat"))
panic(fmt.Errorf("float16 is not supported in decodeFloat"))
case additionalTypeFloat32:
pb := readNBytes(src, 4)

View file

@ -29,7 +29,7 @@ func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte {
nanos := t.Nanosecond()
var val float64
val = float64(secs)*1.0 + float64(nanos)*1e-9
return e.AppendFloat64(dst, val)
return e.AppendFloat64(dst, val, -1)
}
// AppendTime encodes and adds a timestamp to the dst byte array.
@ -64,17 +64,17 @@ func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte
// AppendDuration encodes and adds a duration to the dst byte array.
// useInt field indicates whether to store the duration as seconds (integer) or
// as seconds+nanoseconds (float).
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool, unused int) []byte {
if useInt {
return e.AppendInt64(dst, int64(d/unit))
}
return e.AppendFloat64(dst, float64(d)/float64(unit))
return e.AppendFloat64(dst, float64(d)/float64(unit), unused)
}
// AppendDurations encodes and adds an array of durations to the dst byte array.
// useInt field indicates whether to store the duration as seconds (integer) or
// as seconds+nanoseconds (float).
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool, unused int) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
@ -87,7 +87,7 @@ func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Dur
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, d := range vals {
dst = e.AppendDuration(dst, d, unit, useInt)
dst = e.AppendDuration(dst, d, unit, useInt, unused)
}
return dst
}

View file

@ -352,7 +352,7 @@ func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
}
// AppendFloat32 encodes and inserts a single precision float value into the dst byte array.
func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
func (Encoder) AppendFloat32(dst []byte, val float32, unused int) []byte {
switch {
case math.IsNaN(float64(val)):
return append(dst, "\xfa\x7f\xc0\x00\x00"...)
@ -372,7 +372,7 @@ func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
}
// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array.
func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
func (e Encoder) AppendFloats32(dst []byte, vals []float32, unused int) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
@ -385,13 +385,13 @@ func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = e.AppendFloat32(dst, v)
dst = e.AppendFloat32(dst, v, unused)
}
return dst
}
// AppendFloat64 encodes and inserts a double precision float value into the dst byte array.
func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
func (Encoder) AppendFloat64(dst []byte, val float64, unused int) []byte {
switch {
case math.IsNaN(val):
return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...)
@ -412,7 +412,7 @@ func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
}
// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array.
func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
func (e Encoder) AppendFloats64(dst []byte, vals []float64, unused int) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
@ -425,7 +425,7 @@ func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = e.AppendFloat64(dst, v)
dst = e.AppendFloat64(dst, v, unused)
}
return dst
}

View file

@ -88,24 +88,24 @@ func appendUnixNanoTimes(dst []byte, vals []time.Time, div int64) []byte {
// AppendDuration formats the input duration with the given unit & format
// and appends the encoded string to the input byte slice.
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool, precision int) []byte {
if useInt {
return strconv.AppendInt(dst, int64(d/unit), 10)
}
return e.AppendFloat64(dst, float64(d)/float64(unit))
return e.AppendFloat64(dst, float64(d)/float64(unit), precision)
}
// AppendDurations formats the input durations with the given unit & format
// and appends the encoded string list to the input byte slice.
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool, precision int) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = e.AppendDuration(dst, vals[0], unit, useInt)
dst = e.AppendDuration(dst, vals[0], unit, useInt, precision)
if len(vals) > 1 {
for _, d := range vals[1:] {
dst = e.AppendDuration(append(dst, ','), d, unit, useInt)
dst = e.AppendDuration(append(dst, ','), d, unit, useInt, precision)
}
}
dst = append(dst, ']')

View file

@ -299,7 +299,7 @@ func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
return dst
}
func appendFloat(dst []byte, val float64, bitSize int) []byte {
func appendFloat(dst []byte, val float64, bitSize, precision int) []byte {
// JSON does not permit NaN or Infinity. A typical JSON encoder would fail
// with an error, but a logging library wants the data to get through so we
// make a tradeoff and store those types as string.
@ -311,26 +311,47 @@ func appendFloat(dst []byte, val float64, bitSize int) []byte {
case math.IsInf(val, -1):
return append(dst, `"-Inf"`...)
}
return strconv.AppendFloat(dst, val, 'f', -1, bitSize)
// convert as if by es6 number to string conversion
// see also https://cs.opensource.google/go/go/+/refs/tags/go1.20.3:src/encoding/json/encode.go;l=573
strFmt := byte('f')
// If precision is set to a value other than -1, we always just format the float using that precision.
if precision == -1 {
// Use float32 comparisons for underlying float32 value to get precise cutoffs right.
if abs := math.Abs(val); abs != 0 {
if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
strFmt = 'e'
}
}
}
dst = strconv.AppendFloat(dst, val, strFmt, precision, bitSize)
if strFmt == 'e' {
// Clean up e-09 to e-9
n := len(dst)
if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' {
dst[n-2] = dst[n-1]
dst = dst[:n-1]
}
}
return dst
}
// AppendFloat32 converts the input float32 to a string and
// appends the encoded string to the input byte slice.
func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
return appendFloat(dst, float64(val), 32)
func (Encoder) AppendFloat32(dst []byte, val float32, precision int) []byte {
return appendFloat(dst, float64(val), 32, precision)
}
// AppendFloats32 encodes the input float32s to json and
// appends the encoded string list to the input byte slice.
func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
func (Encoder) AppendFloats32(dst []byte, vals []float32, precision int) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = appendFloat(dst, float64(vals[0]), 32)
dst = appendFloat(dst, float64(vals[0]), 32, precision)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = appendFloat(append(dst, ','), float64(val), 32)
dst = appendFloat(append(dst, ','), float64(val), 32, precision)
}
}
dst = append(dst, ']')
@ -339,21 +360,21 @@ func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
// AppendFloat64 converts the input float64 to a string and
// appends the encoded string to the input byte slice.
func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
return appendFloat(dst, val, 64)
func (Encoder) AppendFloat64(dst []byte, val float64, precision int) []byte {
return appendFloat(dst, val, 64, precision)
}
// AppendFloats64 encodes the input float64s to json and
// appends the encoded string list to the input byte slice.
func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
func (Encoder) AppendFloats64(dst []byte, vals []float64, precision int) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = appendFloat(dst, vals[0], 64)
dst = appendFloat(dst, vals[0], 64, precision)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = appendFloat(append(dst, ','), val, 64)
dst = appendFloat(append(dst, ','), val, 64, precision)
}
}
dst = append(dst, ']')

34
vendor/github.com/rs/zerolog/log.go generated vendored
View file

@ -24,7 +24,7 @@
//
// Sub-loggers let you chain loggers with additional context:
//
// sublogger := log.With().Str("component": "foo").Logger()
// sublogger := log.With().Str("component", "foo").Logger()
// sublogger.Info().Msg("hello world")
// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"}
//
@ -118,7 +118,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
@ -246,7 +245,7 @@ type Logger struct {
// you may consider using sync wrapper.
func New(w io.Writer) Logger {
if w == nil {
w = ioutil.Discard
w = io.Discard
}
lw, ok := w.(LevelWriter)
if !ok {
@ -326,10 +325,13 @@ func (l Logger) Sample(s Sampler) Logger {
}
// Hook returns a logger with the h Hook.
func (l Logger) Hook(h Hook) Logger {
newHooks := make([]Hook, len(l.hooks), len(l.hooks)+1)
func (l Logger) Hook(hooks ...Hook) Logger {
if len(hooks) == 0 {
return l
}
newHooks := make([]Hook, len(l.hooks), len(l.hooks)+len(hooks))
copy(newHooks, l.hooks)
l.hooks = append(newHooks, h)
l.hooks = append(newHooks, hooks...)
return l
}
@ -385,7 +387,14 @@ func (l *Logger) Err(err error) *Event {
//
// You must call Msg on the returned event in order to send the event.
func (l *Logger) Fatal() *Event {
return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) })
return l.newEvent(FatalLevel, func(msg string) {
if closer, ok := l.w.(io.Closer); ok {
// Close the writer to flush any buffered message. Otherwise the message
// will be lost as os.Exit() terminates the program immediately.
closer.Close()
}
os.Exit(1)
})
}
// Panic starts a new message with panic level. The panic() function
@ -450,6 +459,14 @@ func (l *Logger) Printf(format string, v ...interface{}) {
}
}
// Println sends a log event using debug level and no extra field.
// Arguments are handled in the manner of fmt.Println.
func (l *Logger) Println(v ...interface{}) {
if e := l.Debug(); e.Enabled() {
e.CallerSkipFrame(1).Msg(fmt.Sprintln(v...))
}
}
// Write implements the io.Writer interface. This is useful to set as a writer
// for the standard library log.
func (l Logger) Write(p []byte) (n int, err error) {
@ -488,6 +505,9 @@ func (l *Logger) newEvent(level Level, done func(string)) *Event {
// should returns true if the log event should be logged.
func (l *Logger) should(lvl Level) bool {
if l.w == nil {
return false
}
if lvl < l.level || lvl < GlobalLevel() {
return false
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 82 KiB

After

Width:  |  Height:  |  Size: 116 KiB

View file

@ -84,7 +84,7 @@ func (s *BurstSampler) Sample(lvl Level) bool {
}
func (s *BurstSampler) inc() uint32 {
now := time.Now().UnixNano()
now := TimestampFunc().UnixNano()
resetAt := atomic.LoadInt64(&s.resetAt)
var c uint32
if now > resetAt {

View file

@ -78,3 +78,12 @@ func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) {
n = len(p)
return
}
// Call the underlying writer's Close method if it is an io.Closer. Otherwise
// does nothing.
func (sw syslogWriter) Close() error {
if c, ok := sw.w.(io.Closer); ok {
return c.Close()
}
return nil
}

View file

@ -27,6 +27,15 @@ func (lw LevelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) {
return lw.Write(p)
}
// Call the underlying writer's Close method if it is an io.Closer. Otherwise
// does nothing.
func (lw LevelWriterAdapter) Close() error {
if closer, ok := lw.Writer.(io.Closer); ok {
return closer.Close()
}
return nil
}
type syncWriter struct {
mu sync.Mutex
lw LevelWriter
@ -57,6 +66,15 @@ func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) {
return s.lw.WriteLevel(l, p)
}
func (s *syncWriter) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
if closer, ok := s.lw.(io.Closer); ok {
return closer.Close()
}
return nil
}
type multiLevelWriter struct {
writers []LevelWriter
}
@ -89,6 +107,20 @@ func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) {
return n, err
}
// Calls close on all the underlying writers that are io.Closers. If any of the
// Close methods return an error, the remainder of the closers are not closed
// and the error is returned.
func (t multiLevelWriter) Close() error {
for _, w := range t.writers {
if closer, ok := w.(io.Closer); ok {
if err := closer.Close(); err != nil {
return err
}
}
}
return nil
}
// MultiLevelWriter creates a writer that duplicates its writes to all the
// provided writers, similar to the Unix tee(1) command. If some writers
// implement LevelWriter, their WriteLevel method will be used instead of Write.
@ -180,3 +212,135 @@ func (w *FilteredLevelWriter) WriteLevel(level Level, p []byte) (int, error) {
}
return len(p), nil
}
var triggerWriterPool = &sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(make([]byte, 0, 1024))
},
}
// TriggerLevelWriter buffers log lines at the ConditionalLevel or below
// until a trigger level (or higher) line is emitted. Log lines with level
// higher than ConditionalLevel are always written out to the destination
// writer. If trigger never happens, buffered log lines are never written out.
//
// It can be used to configure "log level per request".
type TriggerLevelWriter struct {
// Destination writer. If LevelWriter is provided (usually), its WriteLevel is used
// instead of Write.
io.Writer
// ConditionalLevel is the level (and below) at which lines are buffered until
// a trigger level (or higher) line is emitted. Usually this is set to DebugLevel.
ConditionalLevel Level
// TriggerLevel is the lowest level that triggers the sending of the conditional
// level lines. Usually this is set to ErrorLevel.
TriggerLevel Level
buf *bytes.Buffer
triggered bool
mu sync.Mutex
}
func (w *TriggerLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) {
w.mu.Lock()
defer w.mu.Unlock()
// At first trigger level or above log line, we flush the buffer and change the
// trigger state to triggered.
if !w.triggered && l >= w.TriggerLevel {
err := w.trigger()
if err != nil {
return 0, err
}
}
// Unless triggered, we buffer everything at and below ConditionalLevel.
if !w.triggered && l <= w.ConditionalLevel {
if w.buf == nil {
w.buf = triggerWriterPool.Get().(*bytes.Buffer)
}
// We prefix each log line with a byte with the level.
// Hopefully we will never have a level value which equals a newline
// (which could interfere with reconstruction of log lines in the trigger method).
w.buf.WriteByte(byte(l))
w.buf.Write(p)
return len(p), nil
}
// Anything above ConditionalLevel is always passed through.
// Once triggered, everything is passed through.
if lw, ok := w.Writer.(LevelWriter); ok {
return lw.WriteLevel(l, p)
}
return w.Write(p)
}
// trigger expects lock to be held.
func (w *TriggerLevelWriter) trigger() error {
if w.triggered {
return nil
}
w.triggered = true
if w.buf == nil {
return nil
}
p := w.buf.Bytes()
for len(p) > 0 {
// We do not use bufio.Scanner here because we already have full buffer
// in the memory and we do not want extra copying from the buffer to
// scanner's token slice, nor we want to hit scanner's token size limit,
// and we also want to preserve newlines.
i := bytes.IndexByte(p, '\n')
line := p[0 : i+1]
p = p[i+1:]
// We prefixed each log line with a byte with the level.
level := Level(line[0])
line = line[1:]
var err error
if lw, ok := w.Writer.(LevelWriter); ok {
_, err = lw.WriteLevel(level, line)
} else {
_, err = w.Write(line)
}
if err != nil {
return err
}
}
return nil
}
// Trigger forces flushing the buffer and change the trigger state to
// triggered, if the writer has not already been triggered before.
func (w *TriggerLevelWriter) Trigger() error {
w.mu.Lock()
defer w.mu.Unlock()
return w.trigger()
}
// Close closes the writer and returns the buffer to the pool.
func (w *TriggerLevelWriter) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.buf == nil {
return nil
}
// We return the buffer only if it has not grown above the limit.
// This prevents accumulation of large buffers in the pool just
// because occasionally a large buffer might be needed.
if w.buf.Cap() <= TriggerLevelWriterBufferReuseLimit {
w.buf.Reset()
triggerWriterPool.Put(w.buf)
}
w.buf = nil
return nil
}

2
vendor/modules.txt vendored
View file

@ -76,7 +76,7 @@ github.com/pkg/sftp/internal/encoding/ssh/filexfer
# github.com/pmezard/go-difflib v1.0.0
## explicit
github.com/pmezard/go-difflib/difflib
# github.com/rs/zerolog v1.31.0
# github.com/rs/zerolog v1.33.0
## explicit; go 1.15
github.com/rs/zerolog
github.com/rs/zerolog/internal/cbor