New upstream release.
Debian Janitor
2 years ago
1 | 1 | All notable changes to this project will be documented in this file. |
2 | 2 | |
3 | 3 | This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). |
4 | ||
5 | ## 1.21.0 (7 Feb 2022) | |
6 | ||
7 | Enhancements: | |
8 | * [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string. | |
9 | * [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a | |
10 | string. | |
11 | ||
12 | Bugfixes: | |
13 | * [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset. | |
14 | ||
15 | Other changes: | |
16 | * [#1052][]: Improve encoding performance when the `AddCaller` and | |
17 | `AddStacktrace` options are used together. | |
18 | ||
19 | [#1047]: https://github.com/uber-go/zap/pull/1047 | |
20 | [#1048]: https://github.com/uber-go/zap/pull/1048 | |
21 | [#1052]: https://github.com/uber-go/zap/pull/1052 | |
22 | [#1058]: https://github.com/uber-go/zap/pull/1058 | |
23 | ||
24 | Thanks to @aerosol and @Techassi for their contributions to this release. | |
4 | 25 | |
5 | 26 | ## 1.20.0 (4 Jan 2022) |
6 | 27 |
41 | 41 | withLogger(t, DebugLevel, []Option{WithClock(clock)}, func(log *Logger, logs *observer.ObservedLogs) { |
42 | 42 | log.Info("") |
43 | 43 | require.Equal(t, 1, logs.Len(), "Expected only one log entry to be written.") |
44 | assert.Equal(t, date, logs.All()[0].Entry.Time, "Unexpected entry time.") | |
44 | assert.Equal(t, date, logs.All()[0].Time, "Unexpected entry time.") | |
45 | 45 | }) |
46 | 46 | } |
0 | golang-go.uber-zap (1.21.0-1) UNRELEASED; urgency=low | |
1 | ||
2 | * New upstream release. | |
3 | ||
4 | -- Debian Janitor <janitor@jelmer.uk> Sun, 15 May 2022 10:52:00 -0000 | |
5 | ||
0 | 6 | golang-go.uber-zap (1.20.0-1) unstable; urgency=medium |
1 | 7 | |
2 | 8 | [ Nobuhiro Iwamatsu ] |
170 | 170 | log.Print("redirected") |
171 | 171 | entries := logs.All() |
172 | 172 | require.Len(t, entries, 1, "Unexpected number of logs.") |
173 | assert.Contains(t, entries[0].Entry.Caller.File, "global_test.go", "Unexpected caller annotation.") | |
173 | assert.Contains(t, entries[0].Caller.File, "global_test.go", "Unexpected caller annotation.") | |
174 | 174 | }) |
175 | 175 | } |
176 | 176 | |
209 | 209 | log.Print("redirected") |
210 | 210 | entries := logs.All() |
211 | 211 | require.Len(t, entries, 1, "Unexpected number of logs.") |
212 | assert.Contains(t, entries[0].Entry.Caller.File, "global_test.go", "Unexpected caller annotation.") | |
212 | assert.Contains(t, entries[0].Caller.File, "global_test.go", "Unexpected caller annotation.") | |
213 | 213 | }) |
214 | 214 | } |
215 | 215 | } |
269 | 269 | require.Equal(t, 1, logs.Len(), "Expected exactly one entry to be logged") |
270 | 270 | entry := logs.AllUntimed()[0] |
271 | 271 | assert.Equal(t, []Field{}, entry.Context, "Unexpected entry context.") |
272 | assert.Equal(t, "redirected", entry.Entry.Message, "Unexpected entry message.") | |
272 | assert.Equal(t, "redirected", entry.Message, "Unexpected entry message.") | |
273 | 273 | assert.Regexp( |
274 | 274 | t, |
275 | 275 | `/global_test.go:\d+$`, |
276 | entry.Entry.Caller.String(), | |
276 | entry.Caller.String(), | |
277 | 277 | "Unexpected caller annotation.", |
278 | 278 | ) |
279 | 279 | } |
85 | 85 | return a |
86 | 86 | } |
87 | 87 | |
88 | // ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII | |
89 | // representation of the log level. If the provided ASCII representation is | |
90 | // invalid an error is returned. | |
91 | // | |
92 | // This is particularly useful when dealing with text input to configure log | |
93 | // levels. | |
94 | func ParseAtomicLevel(text string) (AtomicLevel, error) { | |
95 | a := NewAtomicLevel() | |
96 | l, err := zapcore.ParseLevel(text) | |
97 | if err != nil { | |
98 | return a, err | |
99 | } | |
100 | ||
101 | a.SetLevel(l) | |
102 | return a, nil | |
103 | } | |
104 | ||
88 | 105 | // Enabled implements the zapcore.LevelEnabler interface, which allows the |
89 | 106 | // AtomicLevel to be used in place of traditional static levels. |
90 | 107 | func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { |
54 | 54 | assert.Equal(t, ErrorLevel, lvl.Level(), "Unexpected level after SetLevel.") |
55 | 55 | lvl = NewAtomicLevelAt(WarnLevel) |
56 | 56 | assert.Equal(t, WarnLevel, lvl.Level(), "Unexpected level after SetLevel.") |
57 | } | |
58 | ||
59 | func TestParseAtomicLevel(t *testing.T) { | |
60 | tests := []struct { | |
61 | text string | |
62 | level AtomicLevel | |
63 | err string | |
64 | }{ | |
65 | {"info", NewAtomicLevel(), ""}, | |
66 | {"DEBUG", NewAtomicLevelAt(DebugLevel), ""}, | |
67 | {"FOO", NewAtomicLevel(), `unrecognized level: "FOO"`}, | |
68 | } | |
69 | ||
70 | for _, tt := range tests { | |
71 | parsedAtomicLevel, err := ParseAtomicLevel(tt.text) | |
72 | if len(tt.err) == 0 { | |
73 | assert.NoError(t, err) | |
74 | assert.Equal(t, tt.level, parsedAtomicLevel) | |
75 | } else { | |
76 | assert.Error(t, err) | |
77 | assert.Contains(t, err.Error(), tt.err) | |
78 | } | |
79 | } | |
57 | 80 | } |
58 | 81 | |
59 | 82 | func TestAtomicLevelMutation(t *testing.T) { |
23 | 23 | "fmt" |
24 | 24 | "io/ioutil" |
25 | 25 | "os" |
26 | "runtime" | |
27 | 26 | "strings" |
28 | 27 | |
28 | "go.uber.org/zap/internal/bufferpool" | |
29 | 29 | "go.uber.org/zap/zapcore" |
30 | 30 | ) |
31 | 31 | |
258 | 258 | } |
259 | 259 | |
260 | 260 | func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { |
261 | // check must always be called directly by a method in the Logger interface | |
262 | // (e.g., Check, Info, Fatal). | |
261 | // Logger.check must always be called directly by a method in the | |
262 | // Logger interface (e.g., Check, Info, Fatal). | |
263 | // This skips Logger.check and the Info/Fatal/Check/etc. method that | |
264 | // called it. | |
263 | 265 | const callerSkipOffset = 2 |
264 | 266 | |
265 | 267 | // Check the level first to reduce the cost of disabled log calls. |
306 | 308 | |
307 | 309 | // Thread the error output through to the CheckedEntry. |
308 | 310 | ce.ErrorOutput = log.errorOutput |
309 | if log.addCaller { | |
310 | frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) | |
311 | if !defined { | |
311 | ||
312 | addStack := log.addStack.Enabled(ce.Level) | |
313 | if !log.addCaller && !addStack { | |
314 | return ce | |
315 | } | |
316 | ||
317 | // Adding the caller or stack trace requires capturing the callers of | |
318 | // this function. We'll share information between these two. | |
319 | stackDepth := stacktraceFirst | |
320 | if addStack { | |
321 | stackDepth = stacktraceFull | |
322 | } | |
323 | stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth) | |
324 | defer stack.Free() | |
325 | ||
326 | if stack.Count() == 0 { | |
327 | if log.addCaller { | |
312 | 328 | fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) |
313 | 329 | log.errorOutput.Sync() |
314 | 330 | } |
315 | ||
316 | ce.Entry.Caller = zapcore.EntryCaller{ | |
317 | Defined: defined, | |
331 | return ce | |
332 | } | |
333 | ||
334 | frame, more := stack.Next() | |
335 | ||
336 | if log.addCaller { | |
337 | ce.Caller = zapcore.EntryCaller{ | |
338 | Defined: frame.PC != 0, | |
318 | 339 | PC: frame.PC, |
319 | 340 | File: frame.File, |
320 | 341 | Line: frame.Line, |
321 | 342 | Function: frame.Function, |
322 | 343 | } |
323 | 344 | } |
324 | if log.addStack.Enabled(ce.Entry.Level) { | |
325 | ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String | |
345 | ||
346 | if addStack { | |
347 | buffer := bufferpool.Get() | |
348 | defer buffer.Free() | |
349 | ||
350 | stackfmt := newStackFormatter(buffer) | |
351 | ||
352 | // We've already extracted the first frame, so format that | |
353 | // separately and defer to stackfmt for the rest. | |
354 | stackfmt.FormatFrame(frame) | |
355 | if more { | |
356 | stackfmt.FormatStack(stack) | |
357 | } | |
358 | ce.Stack = buffer.String() | |
326 | 359 | } |
327 | 360 | |
328 | 361 | return ce |
329 | 362 | } |
330 | ||
331 | // getCallerFrame gets caller frame. The argument skip is the number of stack | |
332 | // frames to ascend, with 0 identifying the caller of getCallerFrame. The | |
333 | // boolean ok is false if it was not possible to recover the information. | |
334 | // | |
335 | // Note: This implementation is similar to runtime.Caller, but it returns the whole frame. | |
336 | func getCallerFrame(skip int) (frame runtime.Frame, ok bool) { | |
337 | const skipOffset = 2 // skip getCallerFrame and Callers | |
338 | ||
339 | pc := make([]uintptr, 1) | |
340 | numFrames := runtime.Callers(skip+skipOffset, pc) | |
341 | if numFrames < 1 { | |
342 | return | |
343 | } | |
344 | ||
345 | frame, _ = runtime.CallersFrames(pc).Next() | |
346 | return frame, frame.PC != 0 | |
347 | } |
178 | 178 | }) |
179 | 179 | } |
180 | 180 | |
181 | func BenchmarkAddCallerAndStacktrace(b *testing.B) { | |
182 | logger := New( | |
183 | zapcore.NewCore( | |
184 | zapcore.NewJSONEncoder(NewProductionConfig().EncoderConfig), | |
185 | &ztest.Discarder{}, | |
186 | InfoLevel, | |
187 | ), | |
188 | AddCaller(), | |
189 | AddStacktrace(WarnLevel), | |
190 | ) | |
191 | b.ResetTimer() | |
192 | b.RunParallel(func(pb *testing.PB) { | |
193 | for pb.Next() { | |
194 | logger.Warn("Caller and stacktrace.") | |
195 | } | |
196 | }) | |
197 | } | |
198 | ||
181 | 199 | func Benchmark10Fields(b *testing.B) { |
182 | 200 | withBenchedLogger(b, func(log *Logger) { |
183 | 201 | log.Info("Ten fields, passed at the log site.", |
283 | 283 | } |
284 | 284 | log.Info("") |
285 | 285 | require.Equal(t, 1, logs.Len(), "Expected only one log entry to be written.") |
286 | assert.Equal(t, tt.expected, logs.AllUntimed()[0].Entry.LoggerName, "Unexpected logger name.") | |
286 | assert.Equal(t, tt.expected, logs.AllUntimed()[0].LoggerName, "Unexpected logger name.") | |
287 | 287 | }) |
288 | 288 | withSugar(t, DebugLevel, nil, func(log *SugaredLogger, logs *observer.ObservedLogs) { |
289 | 289 | for _, n := range tt.names { |
291 | 291 | } |
292 | 292 | log.Infow("") |
293 | 293 | require.Equal(t, 1, logs.Len(), "Expected only one log entry to be written.") |
294 | assert.Equal(t, tt.expected, logs.AllUntimed()[0].Entry.LoggerName, "Unexpected logger name.") | |
294 | assert.Equal(t, tt.expected, logs.AllUntimed()[0].LoggerName, "Unexpected logger name.") | |
295 | 295 | }) |
296 | 296 | } |
297 | 297 | } |
358 | 358 | assert.Regexp( |
359 | 359 | t, |
360 | 360 | tt.pat, |
361 | output[0].Entry.Caller, | |
361 | output[0].Caller, | |
362 | 362 | "Expected to find package name and file name in output.", |
363 | 363 | ) |
364 | 364 | }) |
431 | 431 | assert.Regexp( |
432 | 432 | t, |
433 | 433 | tt.loggerFunction, |
434 | entry.Entry.Caller.Function, | |
434 | entry.Caller.Function, | |
435 | 435 | "Expected to find function name in output.", |
436 | 436 | ) |
437 | 437 | } |
438 | 438 | assert.Regexp( |
439 | 439 | t, |
440 | 440 | tt.sugaredFunction, |
441 | entries[1].Entry.Caller.Function, | |
441 | entries[1].Caller.Function, | |
442 | 442 | "Expected to find function name in output.", |
443 | 443 | ) |
444 | 444 | }) |
457 | 457 | ) |
458 | 458 | assert.Equal( |
459 | 459 | t, |
460 | logs.AllUntimed()[0].Entry.Message, | |
460 | logs.AllUntimed()[0].Message, | |
461 | 461 | "Failure.", |
462 | 462 | "Expected original message to survive failures in runtime.Caller.") |
463 | 463 | assert.Equal( |
464 | 464 | t, |
465 | logs.AllUntimed()[0].Entry.Caller.Function, | |
465 | logs.AllUntimed()[0].Caller.Function, | |
466 | 466 | "", |
467 | 467 | "Expected function name to be empty string.") |
468 | 468 | }) |
488 | 488 | require.Equal(t, 2, logs.Len(), "expected only warn + error logs due to IncreaseLevel.") |
489 | 489 | assert.Equal( |
490 | 490 | t, |
491 | logs.AllUntimed()[0].Entry.Message, | |
491 | logs.AllUntimed()[0].Message, | |
492 | 492 | "logger.Warn", |
493 | 493 | "Expected first logged message to be warn level message", |
494 | 494 | ) |
23 | 23 | "runtime" |
24 | 24 | "sync" |
25 | 25 | |
26 | "go.uber.org/zap/buffer" | |
26 | 27 | "go.uber.org/zap/internal/bufferpool" |
27 | 28 | ) |
28 | 29 | |
29 | var ( | |
30 | _stacktracePool = sync.Pool{ | |
31 | New: func() interface{} { | |
32 | return newProgramCounters(64) | |
33 | }, | |
34 | } | |
30 | var _stacktracePool = sync.Pool{ | |
31 | New: func() interface{} { | |
32 | return &stacktrace{ | |
33 | storage: make([]uintptr, 64), | |
34 | } | |
35 | }, | |
36 | } | |
37 | ||
38 | type stacktrace struct { | |
39 | pcs []uintptr // program counters; always a subslice of storage | |
40 | frames *runtime.Frames | |
41 | ||
42 | // The size of pcs varies depending on requirements: | |
43 | // it will be one if the only the first frame was requested, | |
44 | // and otherwise it will reflect the depth of the call stack. | |
45 | // | |
46 | // storage decouples the slice we need (pcs) from the slice we pool. | |
47 | // We will always allocate a reasonably large storage, but we'll use | |
48 | // only as much of it as we need. | |
49 | storage []uintptr | |
50 | } | |
51 | ||
52 | // stacktraceDepth specifies how deep of a stack trace should be captured. | |
53 | type stacktraceDepth int | |
54 | ||
55 | const ( | |
56 | // stacktraceFirst captures only the first frame. | |
57 | stacktraceFirst stacktraceDepth = iota | |
58 | ||
59 | // stacktraceFull captures the entire call stack, allocating more | |
60 | // storage for it if needed. | |
61 | stacktraceFull | |
35 | 62 | ) |
36 | 63 | |
64 | // captureStacktrace captures a stack trace of the specified depth, skipping | |
65 | // the provided number of frames. skip=0 identifies the caller of | |
66 | // captureStacktrace. | |
67 | // | |
68 | // The caller must call Free on the returned stacktrace after using it. | |
69 | func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { | |
70 | stack := _stacktracePool.Get().(*stacktrace) | |
71 | ||
72 | switch depth { | |
73 | case stacktraceFirst: | |
74 | stack.pcs = stack.storage[:1] | |
75 | case stacktraceFull: | |
76 | stack.pcs = stack.storage | |
77 | } | |
78 | ||
79 | // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers | |
80 | // itself. +2 to skip captureStacktrace and runtime.Callers. | |
81 | numFrames := runtime.Callers( | |
82 | skip+2, | |
83 | stack.pcs, | |
84 | ) | |
85 | ||
86 | // runtime.Callers truncates the recorded stacktrace if there is no | |
87 | // room in the provided slice. For the full stack trace, keep expanding | |
88 | // storage until there are fewer frames than there is room. | |
89 | if depth == stacktraceFull { | |
90 | pcs := stack.pcs | |
91 | for numFrames == len(pcs) { | |
92 | pcs = make([]uintptr, len(pcs)*2) | |
93 | numFrames = runtime.Callers(skip+2, pcs) | |
94 | } | |
95 | ||
96 | // Discard old storage instead of returning it to the pool. | |
97 | // This will adjust the pool size over time if stack traces are | |
98 | // consistently very deep. | |
99 | stack.storage = pcs | |
100 | stack.pcs = pcs[:numFrames] | |
101 | } else { | |
102 | stack.pcs = stack.pcs[:numFrames] | |
103 | } | |
104 | ||
105 | stack.frames = runtime.CallersFrames(stack.pcs) | |
106 | return stack | |
107 | } | |
108 | ||
109 | // Free releases resources associated with this stacktrace | |
110 | // and returns it back to the pool. | |
111 | func (st *stacktrace) Free() { | |
112 | st.frames = nil | |
113 | st.pcs = nil | |
114 | _stacktracePool.Put(st) | |
115 | } | |
116 | ||
117 | // Count reports the total number of frames in this stacktrace. | |
118 | // Count DOES NOT change as Next is called. | |
119 | func (st *stacktrace) Count() int { | |
120 | return len(st.pcs) | |
121 | } | |
122 | ||
123 | // Next returns the next frame in the stack trace, | |
124 | // and a boolean indicating whether there are more after it. | |
125 | func (st *stacktrace) Next() (_ runtime.Frame, more bool) { | |
126 | return st.frames.Next() | |
127 | } | |
128 | ||
37 | 129 | func takeStacktrace(skip int) string { |
130 | stack := captureStacktrace(skip+1, stacktraceFull) | |
131 | defer stack.Free() | |
132 | ||
38 | 133 | buffer := bufferpool.Get() |
39 | 134 | defer buffer.Free() |
40 | programCounters := _stacktracePool.Get().(*programCounters) | |
41 | defer _stacktracePool.Put(programCounters) | |
42 | 135 | |
43 | var numFrames int | |
44 | for { | |
45 | // Skip the call to runtime.Callers and takeStacktrace so that the | |
46 | // program counters start at the caller of takeStacktrace. | |
47 | numFrames = runtime.Callers(skip+2, programCounters.pcs) | |
48 | if numFrames < len(programCounters.pcs) { | |
49 | break | |
50 | } | |
51 | // Don't put the too-short counter slice back into the pool; this lets | |
52 | // the pool adjust if we consistently take deep stacktraces. | |
53 | programCounters = newProgramCounters(len(programCounters.pcs) * 2) | |
54 | } | |
136 | stackfmt := newStackFormatter(buffer) | |
137 | stackfmt.FormatStack(stack) | |
138 | return buffer.String() | |
139 | } | |
55 | 140 | |
56 | i := 0 | |
57 | frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) | |
141 | // stackFormatter formats a stack trace into a readable string representation. | |
142 | type stackFormatter struct { | |
143 | b *buffer.Buffer | |
144 | nonEmpty bool // whehther we've written at least one frame already | |
145 | } | |
58 | 146 | |
147 | // newStackFormatter builds a new stackFormatter. | |
148 | func newStackFormatter(b *buffer.Buffer) stackFormatter { | |
149 | return stackFormatter{b: b} | |
150 | } | |
151 | ||
152 | // FormatStack formats all remaining frames in the provided stacktrace -- minus | |
153 | // the final runtime.main/runtime.goexit frame. | |
154 | func (sf *stackFormatter) FormatStack(stack *stacktrace) { | |
59 | 155 | // Note: On the last iteration, frames.Next() returns false, with a valid |
60 | 156 | // frame, but we ignore this frame. The last frame is a a runtime frame which |
61 | 157 | // adds noise, since it's only either runtime.main or runtime.goexit. |
62 | for frame, more := frames.Next(); more; frame, more = frames.Next() { | |
63 | if i != 0 { | |
64 | buffer.AppendByte('\n') | |
65 | } | |
66 | i++ | |
67 | buffer.AppendString(frame.Function) | |
68 | buffer.AppendByte('\n') | |
69 | buffer.AppendByte('\t') | |
70 | buffer.AppendString(frame.File) | |
71 | buffer.AppendByte(':') | |
72 | buffer.AppendInt(int64(frame.Line)) | |
158 | for frame, more := stack.Next(); more; frame, more = stack.Next() { | |
159 | sf.FormatFrame(frame) | |
73 | 160 | } |
74 | ||
75 | return buffer.String() | |
76 | 161 | } |
77 | 162 | |
78 | type programCounters struct { | |
79 | pcs []uintptr | |
163 | // FormatFrame formats the given frame. | |
164 | func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { | |
165 | if sf.nonEmpty { | |
166 | sf.b.AppendByte('\n') | |
167 | } | |
168 | sf.nonEmpty = true | |
169 | sf.b.AppendString(frame.Function) | |
170 | sf.b.AppendByte('\n') | |
171 | sf.b.AppendByte('\t') | |
172 | sf.b.AppendString(frame.File) | |
173 | sf.b.AppendByte(':') | |
174 | sf.b.AppendInt(int64(frame.Line)) | |
80 | 175 | } |
81 | ||
82 | func newProgramCounters(size int) *programCounters { | |
83 | return &programCounters{make([]uintptr, size)} | |
84 | } |
20 | 20 | package zap |
21 | 21 | |
22 | 22 | import ( |
23 | "bytes" | |
23 | 24 | "strings" |
24 | 25 | "testing" |
25 | 26 | |
66 | 67 | ) |
67 | 68 | } |
68 | 69 | |
70 | func TestTakeStacktraceDeepStack(t *testing.T) { | |
71 | const ( | |
72 | N = 500 | |
73 | withStackDepthName = "go.uber.org/zap.withStackDepth" | |
74 | ) | |
75 | withStackDepth(N, func() { | |
76 | trace := takeStacktrace(0) | |
77 | for found := 0; found < N; found++ { | |
78 | i := strings.Index(trace, withStackDepthName) | |
79 | if i < 0 { | |
80 | t.Fatalf(`expected %v occurrences of %q, found %d`, | |
81 | N, withStackDepthName, found) | |
82 | } | |
83 | trace = trace[i+len(withStackDepthName):] | |
84 | } | |
85 | }) | |
86 | } | |
87 | ||
69 | 88 | func BenchmarkTakeStacktrace(b *testing.B) { |
70 | 89 | for i := 0; i < b.N; i++ { |
71 | 90 | takeStacktrace(0) |
72 | 91 | } |
73 | 92 | } |
93 | ||
94 | func withStackDepth(depth int, f func()) { | |
95 | var recurse func(rune) rune | |
96 | recurse = func(r rune) rune { | |
97 | if r > 0 { | |
98 | bytes.Map(recurse, []byte(string([]rune{r - 1}))) | |
99 | } else { | |
100 | f() | |
101 | } | |
102 | return 0 | |
103 | } | |
104 | recurse(rune(depth)) | |
105 | } |
343 | 343 | assert.Regexp( |
344 | 344 | t, |
345 | 345 | tt.pat, |
346 | output[0].Entry.Caller, | |
346 | output[0].Caller, | |
347 | 347 | "Expected to find package name and file name in output.", |
348 | 348 | ) |
349 | 349 | }) |
362 | 362 | ) |
363 | 363 | assert.Equal( |
364 | 364 | t, |
365 | logs.AllUntimed()[0].Entry.Message, | |
365 | logs.AllUntimed()[0].Message, | |
366 | 366 | "Failure.", |
367 | 367 | "Expected original message to survive failures in runtime.Caller.") |
368 | 368 | }) |
363 | 363 | final := enc.clone() |
364 | 364 | final.buf.AppendByte('{') |
365 | 365 | |
366 | if final.LevelKey != "" { | |
366 | if final.LevelKey != "" && final.EncodeLevel != nil { | |
367 | 367 | final.addKey(final.LevelKey) |
368 | 368 | cur := final.buf.Len() |
369 | 369 | final.EncodeLevel(ent.Level, final) |
135 | 135 | } |
136 | 136 | } |
137 | 137 | |
138 | func TestNoEncodeLevelSupplied(t *testing.T) { | |
139 | enc := zapcore.NewJSONEncoder(zapcore.EncoderConfig{ | |
140 | MessageKey: "M", | |
141 | LevelKey: "L", | |
142 | TimeKey: "T", | |
143 | NameKey: "N", | |
144 | CallerKey: "C", | |
145 | FunctionKey: "F", | |
146 | StacktraceKey: "S", | |
147 | EncodeTime: zapcore.ISO8601TimeEncoder, | |
148 | EncodeDuration: zapcore.SecondsDurationEncoder, | |
149 | EncodeCaller: zapcore.ShortCallerEncoder, | |
150 | }) | |
151 | ||
152 | ent := zapcore.Entry{ | |
153 | Level: zapcore.InfoLevel, | |
154 | Time: time.Date(2018, 6, 19, 16, 33, 42, 99, time.UTC), | |
155 | LoggerName: "bob", | |
156 | Message: "lob law", | |
157 | } | |
158 | ||
159 | fields := []zapcore.Field{ | |
160 | zap.Int("answer", 42), | |
161 | } | |
162 | ||
163 | _, err := enc.EncodeEntry(ent, fields) | |
164 | assert.NoError(t, err, "Unexpected JSON encoding error.") | |
165 | } | |
166 | ||
138 | 167 | func TestJSONEmptyConfig(t *testing.T) { |
139 | 168 | tests := []struct { |
140 | 169 | name string |
53 | 53 | _minLevel = DebugLevel |
54 | 54 | _maxLevel = FatalLevel |
55 | 55 | ) |
56 | ||
57 | // ParseLevel parses a level based on the lower-case or all-caps ASCII | |
58 | // representation of the log level. If the provided ASCII representation is | |
59 | // invalid an error is returned. | |
60 | // | |
61 | // This is particularly useful when dealing with text input to configure log | |
62 | // levels. | |
63 | func ParseLevel(text string) (Level, error) { | |
64 | var level Level | |
65 | err := level.UnmarshalText([]byte(text)) | |
66 | return level, err | |
67 | } | |
56 | 68 | |
57 | 69 | // String returns a lower-case ASCII representation of the log level. |
58 | 70 | func (l Level) String() string { |
72 | 72 | err := unmarshaled.UnmarshalText([]byte(tt.text)) |
73 | 73 | assert.NoError(t, err, `Unexpected error unmarshaling text %q to level.`, tt.text) |
74 | 74 | assert.Equal(t, tt.level, unmarshaled, `Text %q unmarshaled to an unexpected level.`, tt.text) |
75 | } | |
76 | } | |
77 | ||
78 | func TestParseLevel(t *testing.T) { | |
79 | tests := []struct { | |
80 | text string | |
81 | level Level | |
82 | err string | |
83 | }{ | |
84 | {"info", InfoLevel, ""}, | |
85 | {"DEBUG", DebugLevel, ""}, | |
86 | {"FOO", 0, `unrecognized level: "FOO"`}, | |
87 | } | |
88 | for _, tt := range tests { | |
89 | parsedLevel, err := ParseLevel(tt.text) | |
90 | if len(tt.err) == 0 { | |
91 | assert.NoError(t, err) | |
92 | assert.Equal(t, tt.level, parsedLevel) | |
93 | } else { | |
94 | assert.Error(t, err) | |
95 | assert.Contains(t, err.Error(), tt.err) | |
96 | } | |
75 | 97 | } |
76 | 98 | } |
77 | 99 |