Merge tag 'upstream/0.10.0'
Upstream version 0.10.0
Agustin Henze
8 years ago
9 | 9 | - "3.3" |
10 | 10 | - "3.4" |
11 | 11 | - "pypy" |
12 | - "pypy3" | |
12 | 13 | |
13 | 14 | install: |
14 | 15 | # this fixes SemLock issues on travis |
31 | 32 | exclude: |
32 | 33 | - python: "pypy" |
33 | 34 | env: COMMAND="make cybuild test" |
35 | - python: "pypy3" | |
36 | env: COMMAND="make cybuild test" | |
34 | 37 | |
35 | 38 | notifications: |
36 | 39 | email: |
1 | 1 | ================= |
2 | 2 | |
3 | 3 | Here you can see the full list of changes between each Logbook release. |
4 | ||
5 | Version 0.10.0 | |
6 | -------------- | |
7 | ||
8 | Released on July 14th 2015 | |
9 | ||
10 | - Removed the global handler which was installed by default so far. The rationale is to ease adoption for 3rd party libraries who don't want output to go to stderr by default. The old behavior can still be restored by setting the LOGBOOK_INSTALL_DEFAULT_HANDLER environment variable | |
11 | - Fix small Python 3 compatibility issues | |
12 | ||
13 | Version 0.9.1 | |
14 | ------------- | |
15 | ||
16 | Released on May 4th 2015 | |
17 | ||
18 | - Minor fixes | |
19 | ||
20 | ||
21 | Version 0.9.0 | |
22 | ------------- | |
23 | ||
24 | Released on February 12th 2015 | |
25 | ||
26 | - Added frame correction option, to properly mark log record origins (Thanks Roey Darwish) | |
27 | - Fixed MailHandler infinite recursion bug (Thanks Tal Yalon) | |
28 | ||
29 | ||
30 | Version 0.8.1 | |
31 | ------------- | |
32 | ||
33 | Released on November 30th 2014 | |
34 | ||
35 | - Fix support for gevent, along with various other fixes (Thanks Tal Yalon) | |
36 | ||
37 | Version 0.8.0 | |
38 | ------------- | |
39 | ||
40 | Released on November 11th 2014. Codename "improve_some_more" | |
41 | ||
42 | - Added Pushover handler (thanks @owo) | |
43 | - Default logging format now includes seconds and microseconds (#110) | |
44 | - Several fixes regarding possible deadlocks in log formatting (thanks Guy Rozendorn) | |
45 | - Fixed packaging so that the speedups module now gets packaged by default (#122) | |
46 | - ThreadedWrapperHandler now supports maxsize (#121) (thanks Ken Jung) | |
47 | - Fixes in rotating handlers (thanks zhangliyong) | |
48 | - Added Pushover handler (thanks Ossama W. Obeid) | |
49 | - RedisHandler now supports lpush as an option (thanks Bruno Rocha) | |
50 | ||
4 | 51 | |
5 | 52 | Version 0.7.0 |
6 | 53 | ------------- |
8 | 8 | @python scripts/test_setup.py |
9 | 9 | |
10 | 10 | test: |
11 | @nosetests -w tests | |
11 | @py.test tests | |
12 | 12 | |
13 | 13 | toxtest: |
14 | 14 | @tox |
26 | 26 | docs: |
27 | 27 | make -C docs html SPHINXOPTS=-Aonline=1 |
28 | 28 | |
29 | release: upload-docs | |
29 | release: logbook/_speedups.so upload-docs | |
30 | 30 | python scripts/make-release.py |
31 | 31 | |
32 | 32 | logbook/_speedups.so: logbook/_speedups.pyx |
13 | 13 | |
14 | 14 | It should be easy to setup, use and configure and support web applications :) |
15 | 15 | |
16 | For more information look at http://logbook.pocoo.org/ | |
16 | For more information look at http://pythonhosted.org/Logbook |
27 | 27 | return result |
28 | 28 | |
29 | 29 | |
30 | def run_bench(name): | |
30 | def run_bench(name, use_gevent=False): | |
31 | 31 | sys.stdout.write('%-32s' % name) |
32 | 32 | sys.stdout.flush() |
33 | 33 | Popen([sys.executable, '-mtimeit', '-s', |
34 | 34 | 'from bench_%s import run' % name, |
35 | 'from logbook.concurrency import enable_gevent', | |
36 | 'enable_gevent()' if use_gevent else '', | |
35 | 37 | 'run()']).wait() |
36 | 38 | |
37 | 39 | |
38 | def main(): | |
40 | def bench_wrapper(use_gevent=False): | |
39 | 41 | print '=' * 80 |
40 | print 'Running benchmark with Logbook %s' % version | |
42 | print 'Running benchmark with Logbook %s (gevent enabled=%s)' % (version, use_gevent) | |
41 | 43 | print '-' * 80 |
42 | 44 | os.chdir(bench_directory) |
43 | 45 | for bench in list_benchmarks(): |
44 | run_bench(bench) | |
46 | run_bench(bench, use_gevent) | |
45 | 47 | print '-' * 80 |
48 | ||
49 | ||
50 | def main(): | |
51 | bench_wrapper(False) | |
52 | try: | |
53 | import gevent | |
54 | bench_wrapper(True) | |
55 | except ImportError: | |
56 | pass | |
46 | 57 | |
47 | 58 | |
48 | 59 | if __name__ == '__main__': |
19 | 19 | |
20 | 20 | .. module:: logbook.handlers |
21 | 21 | |
22 | .. autoclass:: RotatingFileHandlerBase | |
23 | :members: | |
24 | ||
25 | 22 | .. autoclass:: StringFormatterHandlerMixin |
26 | 23 | :members: |
32 | 32 | .. autoclass:: NotifoHandler |
33 | 33 | :members: |
34 | 34 | |
35 | .. autoclass:: PushoverHandler | |
36 | :members: | |
37 | ||
35 | 38 | Base Interface |
36 | 39 | -------------- |
37 | 40 |
47 | 47 | # built documents. |
48 | 48 | # |
49 | 49 | # The short X.Y version. |
50 | version = '0.7.0' | |
50 | version = '0.10.0' | |
51 | 51 | # The full version, including alpha/beta/rc tags. |
52 | release = '0.7.0' | |
52 | release = '0.10.0' | |
53 | 53 | |
54 | 54 | # The language for content autogenerated by Sphinx. Refer to documentation |
55 | 55 | # for a list of supported languages. |
21 | 21 | stack. Setting a handler does not replace existing handlers, but gives it |
22 | 22 | higher priority. Each handler has the ability to prevent records from |
23 | 23 | propagating to lower-priority handlers. |
24 | - Logbook comes with a useful default configuration that spits all the | |
25 | information to stderr in a useful manner. | |
24 | - Logbook comes with a quick optional configuration that spits all the | |
25 | information to stderr in a useful manner (by setting the LOGBOOK_INSTALL_DEFAULT_HANDLER | |
26 | environment variable). This is useful for webapps, for example. | |
26 | 27 | - All of the built-in handlers have a useful default configuration applied with |
27 | 28 | formatters that provide all the available information in a format that |
28 | 29 | makes the most sense for the given handler. For example, a default stream |
8 | 8 | >>> from logbook import Logger |
9 | 9 | >>> log = Logger('My Awesome Logger') |
10 | 10 | >>> log.warn('This is too cool for stdlib') |
11 | [2010-07-23 16:34] WARNING: My Awesome Logger: This is too cool for stdlib | |
11 | [2010-07-23 16:34:42.687111] WARNING: My Awesome Logger: This is too cool for stdlib | |
12 | 12 | |
13 | 13 | A logger is a so-called :class:`~logbook.base.RecordDispatcher`, which is |
14 | 14 | commonly referred to as a "logging channel". The name you give such a channel |
209 | 209 | only accepts loggers of a specific kind. You can also do that with a |
210 | 210 | filter function:: |
211 | 211 | |
212 | handler = MyHandler(filter=lambda r: r.channel == 'app.database') | |
212 | handler = MyHandler(filter=lambda r, h: r.channel == 'app.database') | |
213 | 213 | |
214 | 214 | Keep in mind that the channel is intended to be a human readable string |
215 | 215 | and is not necessarily unique. If you really need to keep loggers apart |
219 | 219 | You can also compare the dispatcher on the log record:: |
220 | 220 | |
221 | 221 | from yourapplication.yourmodule import logger |
222 | handler = MyHandler(filter=lambda r: r.dispatcher is logger) | |
222 | handler = MyHandler(filter=lambda r, h: r.dispatcher is logger) | |
223 | 223 | |
224 | 224 | This however has the disadvantage that the dispatcher entry on the log |
225 | 225 | record is a weak reference and might go away unexpectedly and will not be |
9 | 9 | :license: BSD, see LICENSE for more details. |
10 | 10 | """ |
11 | 11 | |
12 | ||
12 | import os | |
13 | 13 | from logbook.base import LogRecord, Logger, LoggerGroup, NestedSetup, \ |
14 | 14 | Processor, Flags, get_level_name, lookup_level, dispatch_record, \ |
15 | 15 | CRITICAL, ERROR, WARNING, NOTICE, INFO, DEBUG, NOTSET, \ |
22 | 22 | LimitingHandlerMixin, WrapperHandler, FingersCrossedHandler, \ |
23 | 23 | GroupHandler |
24 | 24 | |
25 | __version__ = '0.7.0' | |
25 | __version__ = '0.10.0' | |
26 | 26 | |
27 | 27 | # create an anonymous default logger and provide all important |
28 | 28 | # methods of that logger as global functions |
37 | 37 | exception = _default_logger.exception |
38 | 38 | catch_exceptions = _default_logger.catch_exceptions |
39 | 39 | critical = _default_logger.critical |
40 | exception = _default_logger.exception | |
41 | catch_exceptions = _default_logger.catch_exceptions | |
42 | 40 | log = _default_logger.log |
43 | 41 | del _default_logger |
44 | 42 | |
45 | 43 | |
46 | 44 | # install a default global handler |
47 | default_handler = StderrHandler() | |
48 | default_handler.push_application() | |
45 | if os.environ.get('LOGBOOK_INSTALL_DEFAULT_HANDLER'): | |
46 | default_handler = StderrHandler() | |
47 | default_handler.push_application() |
7 | 7 | :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. |
8 | 8 | :license: BSD, see LICENSE for more details. |
9 | 9 | """ |
10 | import threading | |
11 | 10 | from itertools import count |
12 | try: | |
13 | from thread import get_ident as current_thread | |
14 | except ImportError: | |
15 | from _thread import get_ident as current_thread | |
16 | 11 | from logbook.helpers import get_iterator_next_method |
12 | from logbook.concurrency import (thread_get_ident, greenlet_get_ident, | |
13 | thread_local, greenlet_local, | |
14 | ThreadLock, GreenletRLock, is_gevent_enabled) | |
17 | 15 | |
18 | 16 | _missing = object() |
19 | 17 | _MAX_CONTEXT_OBJECT_CACHE = 256 |
18 | ||
20 | 19 | |
21 | 20 | def group_reflected_property(name, default, fallback=_missing): |
22 | 21 | """Returns a property for a given name that falls back to the |
57 | 56 | operations. |
58 | 57 | """ |
59 | 58 | |
59 | def push_greenlet(self): | |
60 | """Pushes the stacked object to the greenlet stack.""" | |
61 | raise NotImplementedError() | |
62 | ||
63 | def pop_greenlet(self): | |
64 | """Pops the stacked object from the greenlet stack.""" | |
65 | raise NotImplementedError() | |
66 | ||
60 | 67 | def push_thread(self): |
61 | 68 | """Pushes the stacked object to the thread stack.""" |
62 | 69 | raise NotImplementedError() |
74 | 81 | raise NotImplementedError() |
75 | 82 | |
76 | 83 | def __enter__(self): |
77 | self.push_thread() | |
84 | if is_gevent_enabled(): | |
85 | self.push_greenlet() | |
86 | else: | |
87 | self.push_thread() | |
78 | 88 | return self |
79 | 89 | |
80 | 90 | def __exit__(self, exc_type, exc_value, tb): |
81 | self.pop_thread() | |
91 | if is_gevent_enabled(): | |
92 | self.pop_greenlet() | |
93 | else: | |
94 | self.pop_thread() | |
95 | ||
96 | def greenletbound(self, _cls=_StackBound): | |
97 | """Can be used in combination with the `with` statement to | |
98 | execute code while the object is bound to the greenlet. | |
99 | """ | |
100 | return _cls(self, self.push_greenlet, self.pop_greenlet) | |
82 | 101 | |
83 | 102 | def threadbound(self, _cls=_StackBound): |
84 | 103 | """Can be used in combination with the `with` statement to |
100 | 119 | |
101 | 120 | def __init__(self): |
102 | 121 | self._global = [] |
103 | self._context_lock = threading.Lock() | |
104 | self._context = threading.local() | |
122 | self._thread_context_lock = ThreadLock() | |
123 | self._thread_context = thread_local() | |
124 | self._greenlet_context_lock = GreenletRLock() | |
125 | self._greenlet_context = greenlet_local() | |
105 | 126 | self._cache = {} |
106 | 127 | self._stackop = get_iterator_next_method(count()) |
107 | 128 | |
109 | 130 | """Returns an iterator over all objects for the combined |
110 | 131 | application and context cache. |
111 | 132 | """ |
112 | tid = current_thread() | |
133 | use_gevent = is_gevent_enabled() | |
134 | tid = greenlet_get_ident() if use_gevent else thread_get_ident() | |
113 | 135 | objects = self._cache.get(tid) |
114 | 136 | if objects is None: |
115 | 137 | if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE: |
116 | 138 | self._cache.clear() |
117 | 139 | objects = self._global[:] |
118 | objects.extend(getattr(self._context, 'stack', ())) | |
140 | objects.extend(getattr(self._thread_context, 'stack', ())) | |
141 | if use_gevent: | |
142 | objects.extend(getattr(self._greenlet_context, 'stack', ())) | |
119 | 143 | objects.sort(reverse=True) |
120 | 144 | objects = [x[1] for x in objects] |
121 | 145 | self._cache[tid] = objects |
122 | 146 | return iter(objects) |
123 | 147 | |
124 | def push_thread(self, obj): | |
125 | self._context_lock.acquire() | |
126 | try: | |
127 | self._cache.pop(current_thread(), None) | |
148 | def push_greenlet(self, obj): | |
149 | self._greenlet_context_lock.acquire() | |
150 | try: | |
151 | self._cache.pop(greenlet_get_ident(), None) # remote chance to conflict with thread ids | |
128 | 152 | item = (self._stackop(), obj) |
129 | stack = getattr(self._context, 'stack', None) | |
153 | stack = getattr(self._greenlet_context, 'stack', None) | |
130 | 154 | if stack is None: |
131 | self._context.stack = [item] | |
155 | self._greenlet_context.stack = [item] | |
132 | 156 | else: |
133 | 157 | stack.append(item) |
134 | 158 | finally: |
135 | self._context_lock.release() | |
136 | ||
137 | def pop_thread(self): | |
138 | self._context_lock.acquire() | |
139 | try: | |
140 | self._cache.pop(current_thread(), None) | |
141 | stack = getattr(self._context, 'stack', None) | |
159 | self._greenlet_context_lock.release() | |
160 | ||
161 | def pop_greenlet(self): | |
162 | self._greenlet_context_lock.acquire() | |
163 | try: | |
164 | self._cache.pop(greenlet_get_ident(), None) # remote chance to conflict with thread ids | |
165 | stack = getattr(self._greenlet_context, 'stack', None) | |
142 | 166 | assert stack, 'no objects on stack' |
143 | 167 | return stack.pop()[1] |
144 | 168 | finally: |
145 | self._context_lock.release() | |
169 | self._greenlet_context_lock.release() | |
170 | ||
171 | def push_thread(self, obj): | |
172 | self._thread_context_lock.acquire() | |
173 | try: | |
174 | self._cache.pop(thread_get_ident(), None) | |
175 | item = (self._stackop(), obj) | |
176 | stack = getattr(self._thread_context, 'stack', None) | |
177 | if stack is None: | |
178 | self._thread_context.stack = [item] | |
179 | else: | |
180 | stack.append(item) | |
181 | finally: | |
182 | self._thread_context_lock.release() | |
183 | ||
184 | def pop_thread(self): | |
185 | self._thread_context_lock.acquire() | |
186 | try: | |
187 | self._cache.pop(thread_get_ident(), None) | |
188 | stack = getattr(self._thread_context, 'stack', None) | |
189 | assert stack, 'no objects on stack' | |
190 | return stack.pop()[1] | |
191 | finally: | |
192 | self._thread_context_lock.release() | |
146 | 193 | |
147 | 194 | def push_application(self, obj): |
148 | 195 | self._global.append((self._stackop(), obj)) |
8 | 8 | :license: BSD, see LICENSE for more details. |
9 | 9 | """ |
10 | 10 | |
11 | import thread | |
12 | import threading | |
13 | 11 | import platform |
12 | from logbook.concurrency import (is_gevent_enabled, thread_get_ident, greenlet_get_ident, thread_local, | |
13 | GreenletRLock, greenlet_local) | |
14 | 14 | |
15 | 15 | from cpython.dict cimport PyDict_Clear, PyDict_SetItem |
16 | 16 | from cpython.list cimport PyList_New, PyList_Append, PyList_Sort, \ |
23 | 23 | cdef enum: |
24 | 24 | _MAX_CONTEXT_OBJECT_CACHE = 256 |
25 | 25 | |
26 | cdef current_thread = thread.get_ident | |
27 | 26 | |
28 | 27 | cdef class group_reflected_property: |
29 | cdef char* name | |
30 | cdef char* _name | |
28 | cdef object name | |
29 | cdef object _name | |
31 | 30 | cdef object default |
32 | 31 | cdef object fallback |
33 | 32 | |
34 | def __init__(self, char* name, object default, object fallback=_missing): | |
33 | def __init__(self, name, object default, object fallback=_missing): | |
35 | 34 | self.name = name |
36 | _name = '_' + name | |
37 | self._name = _name | |
35 | self._name = '_' + name | |
38 | 36 | self.default = default |
39 | 37 | self.fallback = fallback |
40 | 38 | |
101 | 99 | operations. |
102 | 100 | """ |
103 | 101 | |
102 | cpdef push_greenlet(self): | |
103 | """Pushes the stacked object to the greenlet stack.""" | |
104 | raise NotImplementedError() | |
105 | ||
106 | cpdef pop_greenlet(self): | |
107 | """Pops the stacked object from the greenlet stack.""" | |
108 | raise NotImplementedError() | |
109 | ||
104 | 110 | cpdef push_thread(self): |
105 | 111 | """Pushes the stacked object to the thread stack.""" |
106 | 112 | raise NotImplementedError() |
118 | 124 | raise NotImplementedError() |
119 | 125 | |
120 | 126 | def __enter__(self): |
121 | self.push_thread() | |
127 | if is_gevent_enabled(): | |
128 | self.push_greenlet() | |
129 | else: | |
130 | self.push_thread() | |
122 | 131 | return self |
123 | 132 | |
124 | 133 | def __exit__(self, exc_type, exc_value, tb): |
125 | self.pop_thread() | |
134 | if is_gevent_enabled(): | |
135 | self.pop_greenlet() | |
136 | else: | |
137 | self.pop_thread() | |
138 | ||
139 | cpdef greenletbound(self): | |
140 | """Can be used in combination with the `with` statement to | |
141 | execute code while the object is bound to the greenlet. | |
142 | """ | |
143 | return _StackBound(self, self.push_greenlet, self.pop_greenlet) | |
126 | 144 | |
127 | 145 | cpdef threadbound(self): |
128 | 146 | """Can be used in combination with the `with` statement to |
139 | 157 | |
140 | 158 | cdef class ContextStackManager: |
141 | 159 | cdef list _global |
142 | cdef PyThread_type_lock _context_lock | |
143 | cdef object _context | |
160 | cdef PyThread_type_lock _thread_context_lock | |
161 | cdef object _thread_context | |
162 | cdef object _greenlet_context_lock | |
163 | cdef object _greenlet_context | |
144 | 164 | cdef dict _cache |
145 | 165 | cdef int _stackcnt |
146 | 166 | |
147 | 167 | def __init__(self): |
148 | 168 | self._global = [] |
149 | self._context_lock = PyThread_allocate_lock() | |
150 | self._context = threading.local() | |
169 | self._thread_context_lock = PyThread_allocate_lock() | |
170 | self._thread_context = thread_local() | |
171 | self._greenlet_context_lock = GreenletRLock() | |
172 | self._greenlet_context = greenlet_local() | |
151 | 173 | self._cache = {} |
152 | 174 | self._stackcnt = 0 |
153 | 175 | |
156 | 178 | return self._stackcnt |
157 | 179 | |
158 | 180 | cpdef iter_context_objects(self): |
159 | tid = current_thread() | |
181 | use_gevent = is_gevent_enabled() | |
182 | tid = greenlet_get_ident() if use_gevent else thread_get_ident() | |
160 | 183 | objects = self._cache.get(tid) |
161 | 184 | if objects is None: |
162 | 185 | if PyList_GET_SIZE(self._cache) > _MAX_CONTEXT_OBJECT_CACHE: |
163 | 186 | PyDict_Clear(self._cache) |
164 | 187 | objects = self._global[:] |
165 | objects.extend(getattr3(self._context, 'stack', ())) | |
188 | objects.extend(getattr3(self._thread_context, 'stack', ())) | |
189 | if use_gevent: | |
190 | objects.extend(getattr3(self._greenlet_context, 'stack', ())) | |
166 | 191 | PyList_Sort(objects) |
167 | 192 | objects = [(<_StackItem>x).val for x in objects] |
168 | 193 | PyDict_SetItem(self._cache, tid, objects) |
169 | 194 | return iter(objects) |
170 | 195 | |
171 | cpdef push_thread(self, obj): | |
172 | PyThread_acquire_lock(self._context_lock, WAIT_LOCK) | |
173 | try: | |
174 | self._cache.pop(current_thread(), None) | |
196 | cpdef push_greenlet(self, obj): | |
197 | self._greenlet_context_lock.acquire() | |
198 | try: | |
199 | self._cache.pop(greenlet_get_ident(), None) | |
175 | 200 | item = _StackItem(self._stackop(), obj) |
176 | stack = getattr3(self._context, 'stack', None) | |
201 | stack = getattr3(self._greenlet_context, 'stack', None) | |
177 | 202 | if stack is None: |
178 | self._context.stack = [item] | |
203 | self._greenlet_context.stack = [item] | |
179 | 204 | else: |
180 | 205 | PyList_Append(stack, item) |
181 | 206 | finally: |
182 | PyThread_release_lock(self._context_lock) | |
183 | ||
184 | cpdef pop_thread(self): | |
185 | PyThread_acquire_lock(self._context_lock, WAIT_LOCK) | |
186 | try: | |
187 | self._cache.pop(current_thread(), None) | |
188 | stack = getattr3(self._context, 'stack', None) | |
207 | self._greenlet_context_lock.release() | |
208 | ||
209 | cpdef pop_greenlet(self): | |
210 | self._greenlet_context_lock.acquire() | |
211 | try: | |
212 | self._cache.pop(greenlet_get_ident(), None) | |
213 | stack = getattr3(self._greenlet_context, 'stack', None) | |
189 | 214 | assert stack, 'no objects on stack' |
190 | 215 | return (<_StackItem>stack.pop()).val |
191 | 216 | finally: |
192 | PyThread_release_lock(self._context_lock) | |
217 | self._greenlet_context_lock.release() | |
218 | ||
219 | cpdef push_thread(self, obj): | |
220 | PyThread_acquire_lock(self._thread_context_lock, WAIT_LOCK) | |
221 | try: | |
222 | self._cache.pop(thread_get_ident(), None) | |
223 | item = _StackItem(self._stackop(), obj) | |
224 | stack = getattr3(self._thread_context, 'stack', None) | |
225 | if stack is None: | |
226 | self._thread_context.stack = [item] | |
227 | else: | |
228 | PyList_Append(stack, item) | |
229 | finally: | |
230 | PyThread_release_lock(self._thread_context_lock) | |
231 | ||
232 | cpdef pop_thread(self): | |
233 | PyThread_acquire_lock(self._thread_context_lock, WAIT_LOCK) | |
234 | try: | |
235 | self._cache.pop(thread_get_ident(), None) | |
236 | stack = getattr3(self._thread_context, 'stack', None) | |
237 | assert stack, 'no objects on stack' | |
238 | return (<_StackItem>stack.pop()).val | |
239 | finally: | |
240 | PyThread_release_lock(self._thread_context_lock) | |
193 | 241 | |
194 | 242 | cpdef push_application(self, obj): |
195 | 243 | self._global.append(_StackItem(self._stackop(), obj)) |
9 | 9 | """ |
10 | 10 | import os |
11 | 11 | import sys |
12 | try: | |
13 | import thread | |
14 | except ImportError: | |
15 | # for python 3.1,3.2 | |
16 | import _thread as thread | |
17 | import threading | |
18 | 12 | import traceback |
19 | 13 | from itertools import chain |
20 | 14 | from weakref import ref as weakref |
21 | 15 | from datetime import datetime |
16 | from logbook import helpers | |
17 | from logbook.concurrency import thread_get_name, thread_get_ident, greenlet_get_ident | |
22 | 18 | |
23 | 19 | from logbook.helpers import to_safe_json, parse_iso8601, cached_property, \ |
24 | 20 | PY2, u, string_types, iteritems, integer_types |
189 | 185 | #: subclasses of it. |
190 | 186 | stack_manager = None |
191 | 187 | |
188 | def push_greenlet(self): | |
189 | """Pushes the context object to the greenlet stack.""" | |
190 | self.stack_manager.push_greenlet(self) | |
191 | ||
192 | def pop_greenlet(self): | |
193 | """Pops the context object from the stack.""" | |
194 | popped = self.stack_manager.pop_greenlet() | |
195 | assert popped is self, 'popped unexpected object' | |
196 | ||
192 | 197 | def push_thread(self): |
193 | 198 | """Pushes the context object to the thread stack.""" |
194 | 199 | self.stack_manager.push_thread(self) |
231 | 236 | def pop_thread(self): |
232 | 237 | for obj in reversed(self.objects): |
233 | 238 | obj.pop_thread() |
239 | ||
240 | def push_greenlet(self): | |
241 | for obj in self.objects: | |
242 | obj.push_greenlet() | |
243 | ||
244 | def pop_greenlet(self): | |
245 | for obj in reversed(self.objects): | |
246 | obj.pop_greenlet() | |
234 | 247 | |
235 | 248 | |
236 | 249 | class Processor(ContextObject): |
329 | 342 | """ |
330 | 343 | _pullable_information = frozenset(( |
331 | 344 | 'func_name', 'module', 'filename', 'lineno', 'process_name', 'thread', |
332 | 'thread_name', 'formatted_exception', 'message', 'exception_name', | |
345 | 'thread_name', 'greenlet', 'formatted_exception', 'message', 'exception_name', | |
333 | 346 | 'exception_message' |
334 | 347 | )) |
335 | 348 | _noned_on_close = frozenset(('exc_info', 'frame', 'calling_frame')) |
355 | 368 | information_pulled = False |
356 | 369 | |
357 | 370 | def __init__(self, channel, level, msg, args=None, kwargs=None, |
358 | exc_info=None, extra=None, frame=None, dispatcher=None): | |
371 | exc_info=None, extra=None, frame=None, dispatcher=None, frame_correction=0): | |
359 | 372 | #: the name of the logger that created it or any other textual |
360 | 373 | #: channel description. This is a descriptive name and can be |
361 | 374 | #: used for filtering. |
383 | 396 | #: Might not be available for all calls and is removed when the log |
384 | 397 | #: record is closed. |
385 | 398 | self.frame = frame |
399 | #: A positive integer telling the number of frames to go back from | |
400 | #: the frame which triggered the log entry. This is mainly useful | |
401 | #: for decorators that want to show that the log was emitted from | |
402 | #: form the function they decorate | |
403 | self.frame_correction = frame_correction | |
386 | 404 | #: the PID of the current process |
387 | 405 | self.process = None |
388 | 406 | if dispatcher is not None: |
476 | 494 | self.extra = ExtraDict(self.extra) |
477 | 495 | return self |
478 | 496 | |
497 | def _format_message(self, msg, *args, **kwargs): | |
498 | """Called if the record's message needs to be formatted. | |
499 | Subclasses can implement their own formatting. | |
500 | """ | |
501 | return msg.format(*args, **kwargs) | |
502 | ||
479 | 503 | @cached_property |
480 | 504 | def message(self): |
481 | 505 | """The formatted message.""" |
483 | 507 | return self.msg |
484 | 508 | try: |
485 | 509 | try: |
486 | return self.msg.format(*self.args, **self.kwargs) | |
510 | return self._format_message(self.msg, *self.args, **self.kwargs) | |
487 | 511 | except UnicodeDecodeError: |
488 | 512 | # Assume an unicode message but mixed-up args |
489 | 513 | msg = self.msg.encode('utf-8', 'replace') |
490 | return msg.format(*self.args, **self.kwargs) | |
514 | return self._format_message(msg, *self.args, **self.kwargs) | |
491 | 515 | except (UnicodeEncodeError, AttributeError): |
492 | 516 | # we catch AttributeError since if msg is bytes, it won't have the 'format' method |
493 | 517 | if sys.exc_info()[0] is AttributeError and (PY2 or not isinstance(self.msg, bytes)): |
498 | 522 | # but this codepath is unlikely (if the message is a constant |
499 | 523 | # string in the caller's source file) |
500 | 524 | msg = self.msg.decode('utf-8', 'replace') |
501 | return msg.format(*self.args, **self.kwargs) | |
525 | return self._format_message(msg, *self.args, **self.kwargs) | |
502 | 526 | |
503 | 527 | except Exception: |
504 | 528 | # this obviously will not give a proper error message if the |
529 | 553 | globs = globals() |
530 | 554 | while frm is not None and frm.f_globals is globs: |
531 | 555 | frm = frm.f_back |
556 | ||
557 | for _ in helpers.xrange(self.frame_correction): | |
558 | frm = frm.f_back | |
559 | ||
532 | 560 | return frm |
533 | 561 | |
534 | 562 | @cached_property |
573 | 601 | return cf.f_lineno |
574 | 602 | |
575 | 603 | @cached_property |
604 | def greenlet(self): | |
605 | """The ident of the greenlet. This is evaluated late and means that | |
606 | if the log record is passed to another greenlet, :meth:`pull_information` | |
607 | was called in the old greenlet. | |
608 | """ | |
609 | return greenlet_get_ident() | |
610 | ||
611 | @cached_property | |
576 | 612 | def thread(self): |
577 | 613 | """The ident of the thread. This is evaluated late and means that |
578 | 614 | if the log record is passed to another thread, :meth:`pull_information` |
579 | 615 | was called in the old thread. |
580 | 616 | """ |
581 | return thread.get_ident() | |
617 | return thread_get_ident() | |
582 | 618 | |
583 | 619 | @cached_property |
584 | 620 | def thread_name(self): |
586 | 622 | if the log record is passed to another thread, :meth:`pull_information` |
587 | 623 | was called in the old thread. |
588 | 624 | """ |
589 | return threading.currentThread().getName() | |
625 | return thread_get_name() | |
590 | 626 | |
591 | 627 | @cached_property |
592 | 628 | def process_name(self): |
607 | 643 | """The formatted exception which caused this record to be created |
608 | 644 | in case there was any. |
609 | 645 | """ |
610 | if self.exc_info is not None: | |
646 | if self.exc_info is not None and self.exc_info != (None, None, None): | |
611 | 647 | rv = ''.join(traceback.format_exception(*self.exc_info)) |
612 | 648 | if PY2: |
613 | 649 | rv = rv.decode('utf-8', 'replace') |
631 | 667 | if self.exc_info is not None: |
632 | 668 | val = self.exc_info[1] |
633 | 669 | try: |
634 | return u(str(val)) | |
670 | if PY2: | |
671 | return unicode(val) | |
672 | else: | |
673 | return str(val) | |
635 | 674 | except UnicodeError: |
636 | 675 | return str(val).decode('utf-8', 'replace') |
637 | 676 | |
747 | 786 | def _log(self, level, args, kwargs): |
748 | 787 | exc_info = kwargs.pop('exc_info', None) |
749 | 788 | extra = kwargs.pop('extra', None) |
789 | frame_correction = kwargs.pop('frame_correction', 0) | |
750 | 790 | self.make_record_and_handle(level, args[0], args[1:], kwargs, |
751 | exc_info, extra) | |
791 | exc_info, extra, frame_correction) | |
752 | 792 | |
753 | 793 | |
754 | 794 | class RecordDispatcher(object): |
785 | 825 | self.call_handlers(record) |
786 | 826 | |
787 | 827 | def make_record_and_handle(self, level, msg, args, kwargs, exc_info, |
788 | extra): | |
828 | extra, frame_correction): | |
789 | 829 | """Creates a record from some given arguments and heads it |
790 | 830 | over to the handling system. |
791 | 831 | """ |
799 | 839 | channel = self |
800 | 840 | |
801 | 841 | record = LogRecord(self.name, level, msg, args, kwargs, exc_info, |
802 | extra, None, channel) | |
842 | extra, None, channel, frame_correction) | |
803 | 843 | |
804 | 844 | # after handling the log record is closed which will remove some |
805 | 845 | # referenes that would require a GC run on cpython. This includes |
842 | 882 | if not handler.should_handle(record): |
843 | 883 | continue |
844 | 884 | |
885 | # first case of blackhole (without filter). | |
886 | # this should discard all further processing and we don't have to heavy_init to know that... | |
887 | if handler.filter is None and handler.blackhole: | |
888 | break | |
889 | ||
890 | # we are about to handle the record. If it was not yet | |
891 | # processed by context-specific record processors we | |
892 | # have to do that now and remeber that we processed | |
893 | # the record already. | |
894 | if not record_initialized: | |
895 | record.heavy_init() | |
896 | self.process_record(record) | |
897 | record_initialized = True | |
898 | ||
899 | ||
845 | 900 | # a filter can still veto the handling of the record. This |
846 | 901 | # however is already operating on an initialized and processed |
847 | 902 | # record. The impact is that filters are slower than the |
851 | 906 | and not handler.filter(record, handler): |
852 | 907 | continue |
853 | 908 | |
854 | # if this is a blackhole handler, don't even try to | |
855 | # do further processing, stop right away. Technically | |
856 | # speaking this is not 100% correct because if the handler | |
857 | # is bubbling we shouldn't apply this logic, but then we | |
858 | # won't enter this branch anyways. The result is that a | |
859 | # bubbling blackhole handler will never have this shortcut | |
860 | # applied and do the heavy init at one point. This is fine | |
861 | # however because a bubbling blackhole handler is not very | |
862 | # useful in general. | |
909 | # We might have a filter, so now that we know we *should* handle | |
910 | # this record, we should consider the case of us being a black hole... | |
863 | 911 | if handler.blackhole: |
864 | 912 | break |
865 | 913 | |
866 | # we are about to handle the record. If it was not yet | |
867 | # processed by context-specific record processors we | |
868 | # have to do that now and remeber that we processed | |
869 | # the record already. | |
870 | if not record_initialized: | |
871 | record.heavy_init() | |
872 | self.process_record(record) | |
873 | record_initialized = True | |
874 | 914 | |
875 | 915 | # handle the record. If the record was handled and |
876 | 916 | # the record is not bubbling we can abort now. |
56 | 56 | |
57 | 57 | __enter__ = start |
58 | 58 | __exit__ = end |
59 | ||
60 | ||
61 | class LoggingCompatRecord(logbook.LogRecord): | |
62 | ||
63 | def _format_message(self, msg, *args, **kwargs): | |
64 | assert not kwargs | |
65 | return msg % tuple(args) | |
59 | 66 | |
60 | 67 | |
61 | 68 | class RedirectLoggingHandler(logging.Handler): |
92 | 99 | 'pathname', 'filename', 'module', 'exc_info', |
93 | 100 | 'exc_text', 'lineno', 'funcName', 'created', |
94 | 101 | 'msecs', 'relativeCreated', 'thread', 'threadName', |
95 | 'processName', 'process'): | |
102 | 'greenlet', 'processName', 'process'): | |
96 | 103 | rv.pop(key, None) |
97 | 104 | return rv |
98 | 105 | |
115 | 122 | |
116 | 123 | def convert_record(self, old_record): |
117 | 124 | """Converts an old logging record into a logbook log record.""" |
118 | record = logbook.LogRecord(old_record.name, | |
125 | record = LoggingCompatRecord(old_record.name, | |
119 | 126 | self.convert_level(old_record.levelno), |
120 | old_record.getMessage(), | |
121 | None, None, old_record.exc_info, | |
127 | old_record.msg, old_record.args, | |
128 | None, old_record.exc_info, | |
122 | 129 | self.find_extra(old_record), |
123 | 130 | self.find_caller(old_record)) |
124 | 131 | record.time = self.convert_time(old_record.created) |
0 | has_gevent = True | |
1 | use_gevent = False | |
2 | try: | |
3 | import gevent | |
4 | ||
5 | def enable_gevent(): | |
6 | global use_gevent | |
7 | use_gevent = True | |
8 | ||
9 | def _disable_gevent(): # for testing | |
10 | global use_gevent | |
11 | use_gevent = False | |
12 | ||
13 | def is_gevent_enabled(): | |
14 | global use_gevent | |
15 | return use_gevent | |
16 | except ImportError: | |
17 | has_gevent = False | |
18 | ||
19 | def enable_gevent(): | |
20 | pass | |
21 | ||
22 | def _disable_gevent(): | |
23 | pass | |
24 | ||
25 | def is_gevent_enabled(): | |
26 | return False | |
27 | ||
28 | ||
29 | if has_gevent: | |
30 | from gevent._threading import (Lock as ThreadLock, | |
31 | RLock as ThreadRLock, | |
32 | get_ident as thread_get_ident, | |
33 | local as thread_local) | |
34 | from gevent.thread import get_ident as greenlet_get_ident | |
35 | from gevent.local import local as greenlet_local | |
36 | from gevent.lock import BoundedSemaphore | |
37 | from gevent.threading import __threading__ | |
38 | ||
39 | def thread_get_name(): | |
40 | return __threading__.currentThread().getName() | |
41 | ||
42 | class GreenletRLock(object): | |
43 | def __init__(self): | |
44 | self._thread_local = thread_local() | |
45 | self._owner = None | |
46 | self._wait_queue = [] | |
47 | self._count = 0 | |
48 | ||
49 | def __repr__(self): | |
50 | owner = self._owner | |
51 | return "<%s owner=%r count=%d>" % (self.__class__.__name__, owner, self._count) | |
52 | ||
53 | def acquire(self, blocking=1): | |
54 | tid = thread_get_ident() | |
55 | gid = greenlet_get_ident() | |
56 | tid_gid = (tid, gid) | |
57 | if tid_gid == self._owner: # We trust the GIL here so we can do this comparison w/o locking. | |
58 | self._count = self._count + 1 | |
59 | return True | |
60 | ||
61 | greenlet_lock = self._get_greenlet_lock() | |
62 | ||
63 | self._wait_queue.append(gid) | |
64 | # this is a safety in case an exception is raised somewhere and we must make sure we're not in the queue | |
65 | # otherwise it'll get stuck forever. | |
66 | remove_from_queue_on_return = True | |
67 | try: | |
68 | while True: | |
69 | if not greenlet_lock.acquire(blocking): | |
70 | return False # non-blocking and failed to acquire lock | |
71 | ||
72 | if self._wait_queue[0] == gid: | |
73 | # Hurray, we can have the lock. | |
74 | self._owner = tid_gid | |
75 | self._count = 1 | |
76 | remove_from_queue_on_return = False # don't remove us from the queue | |
77 | return True | |
78 | else: | |
79 | # we already hold the greenlet lock so obviously the owner is not in our thread. | |
80 | greenlet_lock.release() | |
81 | if blocking: | |
82 | gevent.sleep(0.0005) # 500 us -> initial delay of 1 ms | |
83 | else: | |
84 | return False | |
85 | finally: | |
86 | if remove_from_queue_on_return: | |
87 | self._wait_queue.remove(gid) | |
88 | ||
89 | def release(self): | |
90 | tid_gid = (thread_get_ident(), greenlet_get_ident()) | |
91 | if tid_gid != self._owner: | |
92 | raise RuntimeError("cannot release un-acquired lock") | |
93 | ||
94 | self._count = self._count - 1 | |
95 | if not self._count: | |
96 | self._owner = None | |
97 | gid = self._wait_queue.pop(0) | |
98 | assert gid == tid_gid[1] | |
99 | self._thread_local.greenlet_lock.release() | |
100 | ||
101 | __enter__ = acquire | |
102 | ||
103 | def __exit__(self, t, v, tb): | |
104 | self.release() | |
105 | ||
106 | def _get_greenlet_lock(self): | |
107 | if not hasattr(self._thread_local, 'greenlet_lock'): | |
108 | greenlet_lock = self._thread_local.greenlet_lock = BoundedSemaphore(1) | |
109 | else: | |
110 | greenlet_lock = self._thread_local.greenlet_lock | |
111 | return greenlet_lock | |
112 | ||
113 | def _is_owned(self): | |
114 | return self._owner == (thread_get_ident(), greenlet_get_ident()) | |
115 | else: | |
116 | from threading import Lock as ThreadLock, RLock as ThreadRLock, currentThread | |
117 | try: | |
118 | from thread import get_ident as thread_get_ident, _local as thread_local | |
119 | except ImportError: | |
120 | from _thread import get_ident as thread_get_ident, _local as thread_local | |
121 | ||
122 | def thread_get_name(): | |
123 | return currentThread().getName() | |
124 | ||
125 | greenlet_get_ident = thread_get_ident | |
126 | ||
127 | greenlet_local = thread_local | |
128 | ||
129 | class GreenletRLock(object): | |
130 | def acquire(self): | |
131 | pass | |
132 | ||
133 | def release(self): | |
134 | pass | |
135 | ||
136 | def __enter__(self): | |
137 | pass | |
138 | ||
139 | def __exit__(self, t, v, tb): | |
140 | pass | |
141 | ||
142 | def new_fine_grained_lock(): | |
143 | global use_gevent | |
144 | if use_gevent: | |
145 | return GreenletRLock() | |
146 | else: | |
147 | return ThreadRLock() |
17 | 17 | from hashlib import sha1 |
18 | 18 | except ImportError: |
19 | 19 | from sha import new as sha1 |
20 | import threading | |
21 | 20 | import traceback |
22 | 21 | from datetime import datetime, timedelta |
23 | from threading import Lock | |
24 | 22 | from collections import deque |
23 | ||
24 | from six import add_metaclass | |
25 | 25 | |
26 | 26 | from logbook.base import CRITICAL, ERROR, WARNING, NOTICE, INFO, DEBUG, \ |
27 | 27 | NOTSET, level_name_property, _missing, lookup_level, \ |
28 | 28 | Flags, ContextObject, ContextStackManager |
29 | 29 | from logbook.helpers import rename, b, _is_text_stream, is_unicode, PY2, \ |
30 | 30 | zip, xrange, string_types, integer_types, reraise, u |
31 | ||
31 | from logbook.concurrency import new_fine_grained_lock | |
32 | 32 | |
33 | 33 | DEFAULT_FORMAT_STRING = ( |
34 | u('[{record.time:%Y-%m-%d %H:%M}] ') + | |
34 | u('[{record.time:%Y-%m-%d %H:%M:%S.%f}] ') + | |
35 | 35 | u('{record.level_name}: {record.channel}: {record.message}') |
36 | 36 | ) |
37 | 37 | SYSLOG_FORMAT_STRING = u('{record.channel}: {record.message}') |
106 | 106 | return type.__new__(cls, name, bases, d) |
107 | 107 | |
108 | 108 | |
109 | @add_metaclass(_HandlerType) | |
109 | 110 | class Handler(ContextObject): |
110 | 111 | """Handler instances dispatch logging events to specific destinations. |
111 | 112 | |
114 | 115 | records as desired. By default, no formatter is specified; in this case, |
115 | 116 | the 'raw' message as determined by record.message is logged. |
116 | 117 | |
117 | To bind a handler you can use the :meth:`push_application` and | |
118 | :meth:`push_thread` methods. This will push the handler on a stack of | |
119 | handlers. To undo this, use the :meth:`pop_application` and | |
120 | :meth:`pop_thread` methods:: | |
118 | To bind a handler you can use the :meth:`push_application`, | |
119 | :meth:`push_thread` or :meth:`push_greenlet` methods. This will push the handler on a stack of | |
120 | handlers. To undo this, use the :meth:`pop_application`, | |
121 | :meth:`pop_thread` methods and :meth:`pop_greenlet`:: | |
121 | 122 | |
122 | 123 | handler = MyHandler() |
123 | 124 | handler.push_application() |
126 | 127 | |
127 | 128 | By default messages sent to that handler will not go to a handler on |
128 | 129 | an outer level on the stack, if handled. This can be changed by |
129 | setting bubbling to `True`. This setup for example would not have | |
130 | any effect:: | |
131 | ||
132 | handler = NullHandler(bubble=True) | |
133 | handler.push_application() | |
134 | ||
135 | Whereas this setup disables all logging for the application:: | |
136 | ||
137 | handler = NullHandler() | |
138 | handler.push_application() | |
130 | setting bubbling to `True`. | |
139 | 131 | |
140 | 132 | There are also context managers to setup the handler for the duration |
141 | 133 | of a `with`-block:: |
146 | 138 | with handler.threadbound(): |
147 | 139 | ... |
148 | 140 | |
141 | with handler.greenletbound(): | |
142 | ... | |
143 | ||
149 | 144 | Because `threadbound` is a common operation, it is aliased to a with |
150 | on the handler itself:: | |
145 | on the handler itself if not using gevent:: | |
151 | 146 | |
152 | 147 | with handler: |
153 | 148 | ... |
154 | """ | |
155 | __metaclass__ = _HandlerType | |
156 | ||
149 | ||
150 | If gevent is enabled, the handler is aliased to `greenletbound`. | |
151 | """ | |
157 | 152 | stack_manager = ContextStackManager() |
158 | 153 | |
159 | 154 | #: a flag for this handler that can be set to `True` for handlers that |
299 | 294 | |
300 | 295 | |
301 | 296 | class NullHandler(Handler): |
302 | """A handler that does nothing, meant to be inserted in a handler chain | |
303 | with ``bubble=False`` to stop further processing. | |
297 | """A handler that does nothing. | |
298 | ||
299 | Useful to silence logs above a certain location in the handler stack:: | |
300 | ||
301 | handler = NullHandler() | |
302 | handler.push_application() | |
303 | ||
304 | NullHandlers swallow all logs sent to them, and do not bubble them onwards. | |
305 | ||
304 | 306 | """ |
305 | 307 | blackhole = True |
306 | 308 | |
450 | 452 | |
451 | 453 | def __init__(self, record_limit, record_delta): |
452 | 454 | self.record_limit = record_limit |
453 | self._limit_lock = Lock() | |
455 | self._limit_lock = new_fine_grained_lock() | |
454 | 456 | self._record_limits = {} |
455 | 457 | if record_delta is None: |
456 | 458 | record_delta = timedelta(seconds=60) |
520 | 522 | Handler.__init__(self, level, filter, bubble) |
521 | 523 | StringFormatterHandlerMixin.__init__(self, format_string) |
522 | 524 | self.encoding = encoding |
523 | self.lock = threading.Lock() | |
525 | self.lock = new_fine_grained_lock() | |
524 | 526 | if stream is not _missing: |
525 | 527 | self.stream = stream |
526 | 528 | |
530 | 532 | def __exit__(self, exc_type, exc_value, tb): |
531 | 533 | self.close() |
532 | 534 | return Handler.__exit__(self, exc_type, exc_value, tb) |
535 | ||
536 | def ensure_stream_is_open(self): | |
537 | """this method should be overriden in sub-classes to ensure that the | |
538 | inner stream is open | |
539 | """ | |
540 | pass | |
533 | 541 | |
534 | 542 | def close(self): |
535 | 543 | """The default stream handler implementation is not to close |
542 | 550 | if self.stream is not None and hasattr(self.stream, 'flush'): |
543 | 551 | self.stream.flush() |
544 | 552 | |
545 | def format_and_encode(self, record): | |
546 | """Formats the record and encodes it to the stream encoding.""" | |
553 | def encode(self, msg): | |
554 | """Encodes the message to the stream encoding.""" | |
547 | 555 | stream = self.stream |
548 | rv = self.format(record) + '\n' | |
556 | rv = msg + '\n' | |
549 | 557 | if (PY2 and is_unicode(rv)) or \ |
550 | 558 | not (PY2 or is_unicode(rv) or _is_text_stream(stream)): |
551 | 559 | enc = self.encoding |
559 | 567 | self.stream.write(item) |
560 | 568 | |
561 | 569 | def emit(self, record): |
570 | msg = self.format(record) | |
562 | 571 | self.lock.acquire() |
563 | 572 | try: |
564 | self.write(self.format_and_encode(record)) | |
573 | self.ensure_stream_is_open() | |
574 | self.write(self.encode(msg)) | |
565 | 575 | self.flush() |
566 | 576 | finally: |
567 | 577 | self.lock.release() |
595 | 605 | self.stream = open(self._filename, mode) |
596 | 606 | |
597 | 607 | def write(self, item): |
598 | if self.stream is None: | |
599 | self._open() | |
608 | self.ensure_stream_is_open() | |
600 | 609 | if not PY2 and isinstance(item, bytes): |
601 | 610 | self.stream.buffer.write(item) |
602 | 611 | else: |
603 | 612 | self.stream.write(item) |
604 | 613 | |
605 | 614 | def close(self): |
606 | if self.stream is not None: | |
607 | self.flush() | |
608 | self.stream.close() | |
609 | self.stream = None | |
610 | ||
611 | def format_and_encode(self, record): | |
615 | self.lock.acquire() | |
616 | try: | |
617 | if self.stream is not None: | |
618 | self.flush() | |
619 | self.stream.close() | |
620 | self.stream = None | |
621 | finally: | |
622 | self.lock.release() | |
623 | ||
624 | def encode(self, record): | |
612 | 625 | # encodes based on the stream settings, so the stream has to be |
613 | 626 | # open at the time this function is called. |
627 | self.ensure_stream_is_open() | |
628 | return StreamHandler.encode(self, record) | |
629 | ||
630 | def ensure_stream_is_open(self): | |
614 | 631 | if self.stream is None: |
615 | 632 | self._open() |
616 | return StreamHandler.format_and_encode(self, record) | |
617 | ||
618 | def emit(self, record): | |
619 | if self.stream is None: | |
620 | self._open() | |
621 | StreamHandler.emit(self, record) | |
622 | 633 | |
623 | 634 | |
624 | 635 | class MonitoringFileHandler(FileHandler): |
654 | 665 | self._last_stat = st[stat.ST_DEV], st[stat.ST_INO] |
655 | 666 | |
656 | 667 | def emit(self, record): |
657 | last_stat = self._last_stat | |
658 | self._query_fd() | |
659 | if last_stat != self._last_stat: | |
660 | self.close() | |
661 | FileHandler.emit(self, record) | |
662 | self._query_fd() | |
668 | msg = self.format(record) | |
669 | self.lock.acquire() | |
670 | try: | |
671 | last_stat = self._last_stat | |
672 | self._query_fd() | |
673 | if last_stat != self._last_stat and self.stream is not None: | |
674 | self.flush() | |
675 | self.stream.close() | |
676 | self.stream = None | |
677 | self.ensure_stream_is_open() | |
678 | self.write(self.encode(msg)) | |
679 | self.flush() | |
680 | self._query_fd() | |
681 | finally: | |
682 | self.lock.release() | |
663 | 683 | |
664 | 684 | |
665 | 685 | class StderrHandler(StreamHandler): |
679 | 699 | @property |
680 | 700 | def stream(self): |
681 | 701 | return sys.stderr |
682 | ||
683 | ||
684 | class RotatingFileHandlerBase(FileHandler): | |
685 | """Baseclass for rotating file handlers. | |
686 | ||
687 | .. versionchanged:: 0.3 | |
688 | This class was deprecated because the interface is not flexible | |
689 | enough to implement proper file rotations. The former builtin | |
690 | subclasses no longer use this baseclass. | |
691 | """ | |
692 | ||
693 | def __init__(self, *args, **kwargs): | |
694 | from warnings import warn | |
695 | warn(DeprecationWarning('This class is deprecated')) | |
696 | FileHandler.__init__(self, *args, **kwargs) | |
697 | ||
698 | def emit(self, record): | |
699 | self.lock.acquire() | |
700 | try: | |
701 | msg = self.format_and_encode(record) | |
702 | if self.should_rollover(record, msg): | |
703 | self.perform_rollover() | |
704 | self.write(msg) | |
705 | self.flush() | |
706 | finally: | |
707 | self.lock.release() | |
708 | ||
709 | def should_rollover(self, record, formatted_record): | |
710 | """Called with the log record and the return value of the | |
711 | :meth:`format_and_encode` method. The method has then to | |
712 | return `True` if a rollover should happen or `False` | |
713 | otherwise. | |
714 | ||
715 | .. versionchanged:: 0.3 | |
716 | Previously this method was called with the number of bytes | |
717 | returned by :meth:`format_and_encode` | |
718 | """ | |
719 | return False | |
720 | ||
721 | def perform_rollover(self): | |
722 | """Called if :meth:`should_rollover` returns `True` and has | |
723 | to perform the actual rollover. | |
724 | """ | |
725 | 702 | |
726 | 703 | |
727 | 704 | class RotatingFileHandler(FileHandler): |
767 | 744 | self._open('w') |
768 | 745 | |
769 | 746 | def emit(self, record): |
747 | msg = self.format(record) | |
770 | 748 | self.lock.acquire() |
771 | 749 | try: |
772 | msg = self.format_and_encode(record) | |
750 | msg = self.encode(msg) | |
773 | 751 | if self.should_rollover(record, len(msg)): |
774 | 752 | self.perform_rollover() |
775 | 753 | self.write(msg) |
831 | 809 | filename.endswith(self._fn_parts[1]): |
832 | 810 | files.append((os.path.getmtime(filename), filename)) |
833 | 811 | files.sort() |
834 | return files[:-self.backup_count + 1] | |
812 | return files[:-self.backup_count + 1] if self.backup_count > 1\ | |
813 | else files[:] | |
835 | 814 | |
836 | 815 | def perform_rollover(self): |
837 | 816 | self.stream.close() |
841 | 820 | self._open('w') |
842 | 821 | |
843 | 822 | def emit(self, record): |
823 | msg = self.format(record) | |
844 | 824 | self.lock.acquire() |
845 | 825 | try: |
846 | 826 | if self.should_rollover(record): |
847 | 827 | self.perform_rollover() |
848 | self.write(self.format_and_encode(record)) | |
828 | self.write(self.encode(msg)) | |
849 | 829 | self.flush() |
850 | 830 | finally: |
851 | 831 | self.lock.release() |
1198 | 1178 | |
1199 | 1179 | def emit_batch(self, records, reason): |
1200 | 1180 | if reason not in ('escalation', 'group'): |
1201 | return MailHandler.emit_batch(self, records, reason) | |
1181 | raise RuntimeError("reason must be either 'escalation' or 'group'") | |
1202 | 1182 | records = list(records) |
1203 | 1183 | if not records: |
1204 | 1184 | return |
1495 | 1475 | handler when a severity theshold was reached with the buffer emitting. |
1496 | 1476 | This now enables this logger to be properly used with the |
1497 | 1477 | :class:`~logbook.MailHandler`. You will now only get one mail for |
1498 | each bfufered record. However once the threshold was reached you would | |
1478 | each buffered record. However once the threshold was reached you would | |
1499 | 1479 | still get a mail for each record which is why the `reset` flag was added. |
1500 | 1480 | |
1501 | 1481 | When set to `True`, the handler will instantly reset to the untriggered |
1519 | 1499 | pull_information=True, reset=False, filter=None, |
1520 | 1500 | bubble=False): |
1521 | 1501 | Handler.__init__(self, NOTSET, filter, bubble) |
1522 | self.lock = Lock() | |
1502 | self.lock = new_fine_grained_lock() | |
1523 | 1503 | self._level = action_level |
1524 | 1504 | if isinstance(handler, Handler): |
1525 | 1505 | self._handler = handler |
1625 | 1605 | Handler.pop_thread(self) |
1626 | 1606 | self.rollover() |
1627 | 1607 | |
1608 | def pop_greenlet(self): | |
1609 | Handler.pop_greenlet(self) | |
1610 | self.rollover() | |
1611 | ||
1628 | 1612 | def emit(self, record): |
1629 | 1613 | if self.pull_information: |
1630 | 1614 | record.pull_information() |
11 | 11 | import os |
12 | 12 | from collections import defaultdict |
13 | 13 | from cgi import parse_qsl |
14 | from functools import partial | |
14 | 15 | |
15 | 16 | from logbook.base import RecordDispatcher, dispatch_record, NOTSET, ERROR, NOTICE |
16 | 17 | from logbook.handlers import Handler, StringFormatter, \ |
103 | 104 | def __init__(self, name=None, tags=None): |
104 | 105 | RecordDispatcher.__init__(self, name) |
105 | 106 | # create a method for each tag named |
106 | list(setattr(self, tag, lambda msg, *args, **kwargs: | |
107 | self.log(tag, msg, *args, **kwargs)) for tag in (tags or ())) | |
107 | for tag in (tags or ()): | |
108 | setattr(self, tag, partial(self.log, tag)) | |
108 | 109 | |
109 | 110 | def log(self, tags, msg, *args, **kwargs): |
110 | 111 | if isinstance(tags, string_types): |
112 | 113 | exc_info = kwargs.pop('exc_info', None) |
113 | 114 | extra = kwargs.pop('extra', {}) |
114 | 115 | extra['tags'] = list(tags) |
116 | frame_correction = kwargs.pop('frame_correction', 0) | |
115 | 117 | return self.make_record_and_handle(NOTSET, msg, args, kwargs, |
116 | exc_info, extra) | |
118 | exc_info, extra, frame_correction) | |
117 | 119 | |
118 | 120 | |
119 | 121 | class TaggingHandler(Handler): |
303 | 305 | return 'yellow' |
304 | 306 | return 'lightgray' |
305 | 307 | |
306 | def format_and_encode(self, record): | |
307 | rv = super(ColorizingStreamHandlerMixin, self) \ | |
308 | .format_and_encode(record) | |
308 | def format(self, record): | |
309 | rv = super(ColorizingStreamHandlerMixin, self).format(record) | |
309 | 310 | if self.should_colorize(record): |
310 | 311 | color = self.get_color(record) |
311 | 312 | if color: |
378 | 379 | Handler.__init__(self, bubble=False, *args, **kwargs) |
379 | 380 | self._format_string = format_string |
380 | 381 | self.clear() |
381 | ||
382 | ||
382 | 383 | def clear(self): |
383 | 384 | self._message_to_count = defaultdict(int) |
384 | 385 | self._unique_ordered_records = [] |
389 | 390 | |
390 | 391 | def pop_thread(self): |
391 | 392 | Handler.pop_thread(self) |
393 | self.flush() | |
394 | ||
395 | def pop_greenlet(self): | |
396 | Handler.pop_greenlet(self) | |
392 | 397 | self.flush() |
393 | 398 | |
394 | 399 | def handle(self, record): |
258 | 258 | self._notifo.send_notification(self.username, self.secret, None, |
259 | 259 | record.message, self.application_name, |
260 | 260 | _level_name, None) |
261 | ||
262 | ||
263 | class PushoverHandler(NotificationBaseHandler): | |
264 | """Sends notifications to pushover.net. Can be forwarded to your Desktop, | |
265 | iPhone, or other compatible device. If `priority` is not one of -2, -1, 0, | |
266 | or 1, it is set to 0 automatically. | |
267 | """ | |
268 | ||
269 | def __init__(self, application_name=None, apikey=None, userkey=None, | |
270 | device=None, priority=0, sound=None, record_limit=None, | |
271 | record_delta=None, level=NOTSET, filter=None, bubble=False): | |
272 | ||
273 | super(PushoverHandler, self).__init__(None, record_limit, record_delta, | |
274 | level, filter, bubble) | |
275 | ||
276 | self.application_name = application_name | |
277 | self.apikey = apikey | |
278 | self.userkey = userkey | |
279 | self.device = device | |
280 | self.priority = priority | |
281 | self.sound = sound | |
282 | ||
283 | if self.application_name is None: | |
284 | self.title = None | |
285 | elif len(self.application_name) > 100: | |
286 | self.title = "%s..." % (self.application_name[:-3],) | |
287 | else: | |
288 | self.title = self.application_name | |
289 | ||
290 | if self.priority not in [-2, -1, 0, 1]: | |
291 | self.priority = 0 | |
292 | ||
293 | def emit(self, record): | |
294 | ||
295 | if len(record.message) > 512: | |
296 | message = "%s..." % (record.message[:-3],) | |
297 | else: | |
298 | message = record.message | |
299 | ||
300 | body_dict = { | |
301 | 'token': self.apikey, | |
302 | 'user': self.userkey, | |
303 | 'message': message, | |
304 | 'priority': self.priority | |
305 | } | |
306 | ||
307 | if self.title is not None: | |
308 | body_dict['title'] = self.title | |
309 | if self.device is not None: | |
310 | body_dict['device'] = self.device | |
311 | if self.sound is not None: | |
312 | body_dict['sound'] = self.sound | |
313 | ||
314 | body = urlencode(body_dict) | |
315 | con = http_client.HTTPSConnection('api.pushover.net') | |
316 | con.request('POST', '/1/messages.json', body=body) | |
317 | con.close() |
16 | 16 | from logbook.helpers import PY2, u |
17 | 17 | |
18 | 18 | if PY2: |
19 | from Queue import Empty, Queue as ThreadQueue | |
19 | from Queue import Empty, Full, Queue as ThreadQueue | |
20 | 20 | else: |
21 | from queue import Empty, Queue as ThreadQueue | |
21 | from queue import Empty, Full, Queue as ThreadQueue | |
22 | 22 | |
23 | 23 | |
24 | 24 | class RedisHandler(Handler): |
43 | 43 | """ |
44 | 44 | def __init__(self, host='127.0.0.1', port=6379, key='redis', extra_fields={}, |
45 | 45 | flush_threshold=128, flush_time=1, level=NOTSET, filter=None, |
46 | password=False, bubble=True, context=None): | |
46 | password=False, bubble=True, context=None, push_method='rpush'): | |
47 | 47 | Handler.__init__(self, level, filter, bubble) |
48 | 48 | try: |
49 | 49 | import redis |
62 | 62 | self.flush_threshold = flush_threshold |
63 | 63 | self.queue = [] |
64 | 64 | self.lock = Lock() |
65 | self.push_method = push_method | |
65 | 66 | |
66 | 67 | #Set up a thread that flushes the queue every specified seconds |
67 | 68 | self._stop_event = threading.Event() |
84 | 85 | """Flushes the messaging queue into Redis. |
85 | 86 | |
86 | 87 | All values are pushed at once for the same key. |
88 | ||
89 | The method rpush/lpush is defined by push_method argument | |
87 | 90 | """ |
88 | 91 | if self.queue: |
89 | self.redis.rpush(self.key, *self.queue) | |
92 | getattr(self.redis, self.push_method)(self.key, *self.queue) | |
90 | 93 | self.queue = [] |
91 | 94 | |
92 | 95 | |
107 | 110 | are also appended to the message. |
108 | 111 | """ |
109 | 112 | with self.lock: |
110 | r = {"message": record.msg, "host": platform.node(), "level": record.level_name} | |
113 | r = {"message": record.msg, | |
114 | "host": platform.node(), | |
115 | "level": record.level_name, | |
116 | "time": record.time.isoformat()} | |
111 | 117 | r.update(self.extra_fields) |
112 | 118 | r.update(record.kwargs) |
113 | 119 | self.queue.append(json.dumps(r)) |
207 | 213 | def emit(self, record): |
208 | 214 | self.socket.send(json.dumps(self.export_record(record)).encode("utf-8")) |
209 | 215 | |
210 | def close(self): | |
211 | self.socket.close() | |
216 | def close(self, linger=-1): | |
217 | self.socket.close(linger) | |
218 | ||
219 | def __del__(self): | |
220 | # When the Handler is deleted we must close our socket in a non-blocking | |
221 | # fashion (using linger). | |
222 | # Otherwise it can block indefinitely, for example if the Subscriber is | |
223 | # not reachable. | |
224 | # If messages are pending on the socket, we wait 100ms for them to be sent | |
225 | # then we discard them. | |
226 | self.close(linger=100) | |
212 | 227 | |
213 | 228 | |
214 | 229 | class ThreadController(object): |
622 | 637 | """ |
623 | 638 | _direct_attrs = frozenset(['handler', 'queue', 'controller']) |
624 | 639 | |
625 | def __init__(self, handler): | |
640 | def __init__(self, handler, maxsize=0): | |
626 | 641 | WrapperHandler.__init__(self, handler) |
627 | self.queue = ThreadQueue(-1) | |
642 | self.queue = ThreadQueue(maxsize) | |
628 | 643 | self.controller = TWHThreadController(self) |
629 | 644 | self.controller.start() |
630 | 645 | |
633 | 648 | self.handler.close() |
634 | 649 | |
635 | 650 | def emit(self, record): |
636 | self.queue.put_nowait(record) | |
651 | try: | |
652 | self.queue.put_nowait(record) | |
653 | except Full: | |
654 | # silently drop | |
655 | pass | |
637 | 656 | |
638 | 657 | |
639 | 658 | class GroupMember(ThreadController): |
12 | 12 | |
13 | 13 | deps = [ |
14 | 14 | "execnet>=1.0.9", |
15 | "nose", | |
15 | "pytest", | |
16 | 16 | "pyzmq", |
17 | 17 | "sqlalchemy", |
18 | 18 | ] |
19 | 19 | |
20 | if python_version < (2, 7): | |
21 | deps.append("unittest2") | |
22 | 20 | if (3, 2) <= python_version < (3, 3): |
23 | 21 | deps.append("markupsafe==0.15") |
24 | 22 | deps.append("Jinja2==2.6") |
105 | 105 | features['speedups'] = speedups |
106 | 106 | setup( |
107 | 107 | name='Logbook', |
108 | version='0.7.0', | |
108 | version='0.10.0', | |
109 | 109 | license='BSD', |
110 | 110 | url='http://logbook.pocoo.org/', |
111 | 111 | author='Armin Ronacher, Georg Brandl', |
118 | 118 | cmdclass=cmdclass, |
119 | 119 | features=features, |
120 | 120 | install_requires=[ |
121 | 'six>=1.4.0', | |
121 | 122 | ], |
122 | 123 | **extra |
123 | 124 | ) |
0 | import logbook | |
1 | import pytest | |
2 | ||
3 | logbook.StderrHandler().push_application() | |
4 | ||
5 | @pytest.fixture | |
6 | def logger(): | |
7 | return logbook.Logger('testlogger') | |
8 | ||
9 | ||
10 | @pytest.fixture | |
11 | def active_handler(request, test_handler, activation_strategy): | |
12 | ||
13 | s = activation_strategy(test_handler) | |
14 | s.activate() | |
15 | ||
16 | @request.addfinalizer | |
17 | def deactivate(): | |
18 | s.deactivate() | |
19 | ||
20 | return test_handler | |
21 | ||
22 | ||
23 | @pytest.fixture | |
24 | def test_handler(): | |
25 | return logbook.TestHandler() | |
26 | ||
27 | ||
28 | class ActivationStrategy(object): | |
29 | ||
30 | def __init__(self, handler): | |
31 | super(ActivationStrategy, self).__init__() | |
32 | self.handler = handler | |
33 | ||
34 | def activate(self): | |
35 | raise NotImplementedError() # pragma: no cover | |
36 | ||
37 | def deactivate(self): | |
38 | raise NotImplementedError() # pragma: no cover | |
39 | ||
40 | def __enter__(self): | |
41 | self.activate() | |
42 | return self.handler | |
43 | ||
44 | def __exit__(self, *_): | |
45 | self.deactivate() | |
46 | ||
47 | ||
48 | class ContextEnteringStrategy(ActivationStrategy): | |
49 | ||
50 | def activate(self): | |
51 | self.handler.__enter__() | |
52 | ||
53 | def deactivate(self): | |
54 | self.handler.__exit__(None, None, None) | |
55 | ||
56 | ||
57 | class PushingStrategy(ActivationStrategy): | |
58 | ||
59 | def activate(self): | |
60 | from logbook.concurrency import is_gevent_enabled | |
61 | if is_gevent_enabled(): | |
62 | self.handler.push_greenlet() | |
63 | else: | |
64 | self.handler.push_thread() | |
65 | ||
66 | def deactivate(self): | |
67 | from logbook.concurrency import is_gevent_enabled | |
68 | if is_gevent_enabled(): | |
69 | self.handler.pop_greenlet() | |
70 | else: | |
71 | self.handler.pop_thread() | |
72 | ||
73 | ||
74 | @pytest.fixture(params=[ContextEnteringStrategy, PushingStrategy]) | |
75 | def activation_strategy(request): | |
76 | return request.param | |
77 | ||
78 | ||
79 | @pytest.fixture | |
80 | def logfile(tmpdir): | |
81 | return str(tmpdir.join('logfile.log')) | |
82 | ||
83 | ||
84 | @pytest.fixture | |
85 | def default_handler(request): | |
86 | returned = logbook.StderrHandler() | |
87 | returned.push_application() | |
88 | request.addfinalizer(returned.pop_application) | |
89 | return returned | |
90 | ||
91 | try: | |
92 | import gevent | |
93 | ||
94 | @pytest.fixture(scope="module", autouse=True, params=[False, True]) | |
95 | def gevent(request): | |
96 | module_name = getattr(request.module, '__name__', '') | |
97 | if not any(s in module_name for s in ('queues', 'processors')) and request.param: | |
98 | from logbook.concurrency import enable_gevent, _disable_gevent | |
99 | enable_gevent() | |
100 | ||
101 | @request.addfinalizer | |
102 | def fin(): | |
103 | _disable_gevent() | |
104 | except ImportError: | |
105 | pass |
0 | import sys | |
1 | import logbook | |
2 | ||
3 | ||
4 | class MyObject(object): | |
5 | def __init__(self, logger_func): | |
6 | self._logger_func = logger_func | |
7 | ||
8 | def __str__(self): | |
9 | self._logger_func("this debug message produced in __str__") | |
10 | return "<complex object>" | |
11 | ||
12 | ||
13 | class FakeLock(object): | |
14 | def __init__(self): | |
15 | self._acquired = False | |
16 | self._deadlock_occurred = False | |
17 | ||
18 | def acquire(self): | |
19 | if self._acquired: | |
20 | self._deadlock_occurred = True | |
21 | self._acquired = True | |
22 | ||
23 | def release(self): | |
24 | self._acquired = False | |
25 | ||
26 | ||
27 | def test_deadlock_in_emit(): | |
28 | logbook_logger = logbook.Logger("logbook") | |
29 | obj = MyObject(logbook_logger.info) | |
30 | stream_handler = logbook.StreamHandler(stream=sys.stderr, level=logbook.DEBUG) | |
31 | stream_handler.lock = FakeLock() | |
32 | with stream_handler.applicationbound(): | |
33 | logbook_logger.info("format this: {}", obj) | |
34 | assert not stream_handler.lock._deadlock_occurred |
0 | import os | |
1 | import pytest | |
2 | from datetime import datetime | |
3 | ||
4 | import logbook | |
5 | from logbook.helpers import u, xrange | |
6 | ||
7 | from .utils import capturing_stderr_context, LETTERS | |
8 | ||
9 | ||
10 | def test_file_handler(logfile, activation_strategy, logger): | |
11 | handler = logbook.FileHandler(logfile, | |
12 | format_string='{record.level_name}:{record.channel}:' | |
13 | '{record.message}',) | |
14 | with activation_strategy(handler): | |
15 | logger.warn('warning message') | |
16 | handler.close() | |
17 | with open(logfile) as f: | |
18 | assert f.readline() == 'WARNING:testlogger:warning message\n' | |
19 | ||
20 | ||
21 | def test_file_handler_unicode(logfile, activation_strategy, logger): | |
22 | with capturing_stderr_context() as captured: | |
23 | with activation_strategy(logbook.FileHandler(logfile)): | |
24 | logger.info(u('\u0431')) | |
25 | assert (not captured.getvalue()) | |
26 | ||
27 | ||
28 | def test_file_handler_delay(logfile, activation_strategy, logger): | |
29 | handler = logbook.FileHandler(logfile, | |
30 | format_string='{record.level_name}:{record.channel}:' | |
31 | '{record.message}', delay=True) | |
32 | assert (not os.path.isfile(logfile)) | |
33 | with activation_strategy(handler): | |
34 | logger.warn('warning message') | |
35 | handler.close() | |
36 | ||
37 | with open(logfile) as f: | |
38 | assert f.readline() == 'WARNING:testlogger:warning message\n' | |
39 | ||
40 | ||
41 | def test_monitoring_file_handler(logfile, activation_strategy, logger): | |
42 | if os.name == 'nt': | |
43 | pytest.skip('unsupported on windows due to different IO (also unneeded)') | |
44 | handler = logbook.MonitoringFileHandler(logfile, | |
45 | format_string='{record.level_name}:{record.channel}:' | |
46 | '{record.message}', delay=True) | |
47 | with activation_strategy(handler): | |
48 | logger.warn('warning message') | |
49 | os.rename(logfile, logfile + '.old') | |
50 | logger.warn('another warning message') | |
51 | handler.close() | |
52 | with open(logfile) as f: | |
53 | assert f.read().strip() == 'WARNING:testlogger:another warning message' | |
54 | ||
55 | ||
56 | def test_custom_formatter(activation_strategy, logfile, logger): | |
57 | def custom_format(record, handler): | |
58 | return record.level_name + ':' + record.message | |
59 | ||
60 | handler = logbook.FileHandler(logfile) | |
61 | with activation_strategy(handler): | |
62 | handler.formatter = custom_format | |
63 | logger.warn('Custom formatters are awesome') | |
64 | ||
65 | with open(logfile) as f: | |
66 | assert f.readline() == 'WARNING:Custom formatters are awesome\n' | |
67 | ||
68 | ||
69 | def test_rotating_file_handler(logfile, activation_strategy, logger): | |
70 | basename = os.path.basename(logfile) | |
71 | handler = logbook.RotatingFileHandler(logfile, max_size=2048, | |
72 | backup_count=3, | |
73 | ) | |
74 | handler.format_string = '{record.message}' | |
75 | with activation_strategy(handler): | |
76 | for c, x in zip(LETTERS, xrange(32)): | |
77 | logger.warn(c * 256) | |
78 | files = [x for x in os.listdir(os.path.dirname(logfile)) | |
79 | if x.startswith(basename)] | |
80 | files.sort() | |
81 | ||
82 | assert files == [basename, basename + '.1', basename + '.2', basename + '.3'] | |
83 | with open(logfile) as f: | |
84 | assert f.readline().rstrip() == ('C' * 256) | |
85 | assert f.readline().rstrip() == ('D' * 256) | |
86 | assert f.readline().rstrip() == ('E' * 256) | |
87 | assert f.readline().rstrip() == ('F' * 256) | |
88 | ||
89 | ||
90 | @pytest.mark.parametrize("backup_count", [1, 3]) | |
91 | def test_timed_rotating_file_handler(tmpdir, activation_strategy, backup_count): | |
92 | basename = str(tmpdir.join('trot.log')) | |
93 | handler = logbook.TimedRotatingFileHandler(basename, backup_count=backup_count) | |
94 | handler.format_string = '[{record.time:%H:%M}] {record.message}' | |
95 | ||
96 | def fake_record(message, year, month, day, hour=0, | |
97 | minute=0, second=0): | |
98 | lr = logbook.LogRecord('Test Logger', logbook.WARNING, | |
99 | message) | |
100 | lr.time = datetime(year, month, day, hour, minute, second) | |
101 | return lr | |
102 | ||
103 | with activation_strategy(handler): | |
104 | for x in xrange(10): | |
105 | handler.handle(fake_record('First One', 2010, 1, 5, x + 1)) | |
106 | for x in xrange(20): | |
107 | handler.handle(fake_record('Second One', 2010, 1, 6, x + 1)) | |
108 | for x in xrange(10): | |
109 | handler.handle(fake_record('Third One', 2010, 1, 7, x + 1)) | |
110 | for x in xrange(20): | |
111 | handler.handle(fake_record('Last One', 2010, 1, 8, x + 1)) | |
112 | ||
113 | files = sorted(x for x in os.listdir(str(tmpdir)) if x.startswith('trot')) | |
114 | ||
115 | assert files == ['trot-2010-01-0{0}.log'.format(i) for i in xrange(5, 9)][-backup_count:] | |
116 | with open(str(tmpdir.join('trot-2010-01-08.log'))) as f: | |
117 | assert f.readline().rstrip() == '[01:00] Last One' | |
118 | assert f.readline().rstrip() == '[02:00] Last One' | |
119 | if backup_count > 1: | |
120 | with open(str(tmpdir.join('trot-2010-01-07.log'))) as f: | |
121 | assert f.readline().rstrip() == '[01:00] Third One' | |
122 | assert f.readline().rstrip() == '[02:00] Third One' |
0 | import logbook | |
1 | import pytest | |
2 | ||
3 | from .utils import capturing_stderr_context | |
4 | ||
5 | ||
6 | def test_fingerscrossed(activation_strategy, logger, default_handler): | |
7 | handler = logbook.FingersCrossedHandler(default_handler, | |
8 | logbook.WARNING) | |
9 | ||
10 | # if no warning occurs, the infos are not logged | |
11 | with activation_strategy(handler): | |
12 | with capturing_stderr_context() as captured: | |
13 | logger.info('some info') | |
14 | assert captured.getvalue() == '' | |
15 | assert (not handler.triggered) | |
16 | ||
17 | # but if it does, all log messages are output | |
18 | with activation_strategy(handler): | |
19 | with capturing_stderr_context() as captured: | |
20 | logger.info('some info') | |
21 | logger.warning('something happened') | |
22 | logger.info('something else happened') | |
23 | logs = captured.getvalue() | |
24 | assert 'some info' in logs | |
25 | assert 'something happened' in logs | |
26 | assert 'something else happened' in logs | |
27 | assert handler.triggered | |
28 | ||
29 | ||
30 | def test_fingerscrossed_factory(activation_strategy, logger): | |
31 | handlers = [] | |
32 | ||
33 | def handler_factory(record, fch): | |
34 | handler = logbook.TestHandler() | |
35 | handlers.append(handler) | |
36 | return handler | |
37 | ||
38 | def make_fch(): | |
39 | return logbook.FingersCrossedHandler(handler_factory, | |
40 | logbook.WARNING) | |
41 | ||
42 | fch = make_fch() | |
43 | with activation_strategy(fch): | |
44 | logger.info('some info') | |
45 | assert len(handlers) == 0 | |
46 | logger.warning('a warning') | |
47 | assert len(handlers) == 1 | |
48 | logger.error('an error') | |
49 | assert len(handlers) == 1 | |
50 | assert handlers[0].has_infos | |
51 | assert handlers[0].has_warnings | |
52 | assert handlers[0].has_errors | |
53 | assert (not handlers[0].has_notices) | |
54 | assert (not handlers[0].has_criticals) | |
55 | assert (not handlers[0].has_debugs) | |
56 | ||
57 | fch = make_fch() | |
58 | with activation_strategy(fch): | |
59 | logger.info('some info') | |
60 | logger.warning('a warning') | |
61 | assert len(handlers) == 2 | |
62 | ||
63 | ||
64 | def test_fingerscrossed_buffer_size(activation_strategy): | |
65 | logger = logbook.Logger('Test') | |
66 | test_handler = logbook.TestHandler() | |
67 | handler = logbook.FingersCrossedHandler(test_handler, buffer_size=3) | |
68 | ||
69 | with activation_strategy(handler): | |
70 | logger.info('Never gonna give you up') | |
71 | logger.warn('Aha!') | |
72 | logger.warn('Moar!') | |
73 | logger.error('Pure hate!') | |
74 | ||
75 | assert test_handler.formatted_records == ['[WARNING] Test: Aha!', '[WARNING] Test: Moar!', '[ERROR] Test: Pure hate!'] | |
76 |
0 | import logbook | |
1 | ||
2 | import pytest | |
3 | ||
4 | from .utils import capturing_stderr_context | |
5 | ||
6 | ||
7 | def test_error_flag(logger): | |
8 | with capturing_stderr_context() as captured: | |
9 | with logbook.Flags(errors='print'): | |
10 | with logbook.Flags(errors='silent'): | |
11 | logger.warn('Foo {42}', 'aha') | |
12 | assert captured.getvalue() == '' | |
13 | ||
14 | with logbook.Flags(errors='silent'): | |
15 | with logbook.Flags(errors='print'): | |
16 | logger.warn('Foo {42}', 'aha') | |
17 | assert captured.getvalue() != '' | |
18 | ||
19 | with pytest.raises(Exception) as caught: | |
20 | with logbook.Flags(errors='raise'): | |
21 | logger.warn('Foo {42}', 'aha') | |
22 | assert 'Could not format message with provided arguments' in str(caught.value) | |
23 | ||
24 | ||
25 | def test_disable_introspection(logger): | |
26 | with logbook.Flags(introspection=False): | |
27 | with logbook.TestHandler() as h: | |
28 | logger.warn('Testing') | |
29 | assert h.records[0].frame is None | |
30 | assert h.records[0].calling_frame is None | |
31 | assert h.records[0].module is None |
0 | import logbook | |
1 | ||
2 | ||
3 | def test_groups(logger): | |
4 | def inject_extra(record): | |
5 | record.extra['foo'] = 'bar' | |
6 | group = logbook.LoggerGroup(processor=inject_extra) | |
7 | group.level = logbook.ERROR | |
8 | group.add_logger(logger) | |
9 | with logbook.TestHandler() as handler: | |
10 | logger.warn('A warning') | |
11 | logger.error('An error') | |
12 | assert (not handler.has_warning('A warning')) | |
13 | assert handler.has_error('An error') | |
14 | assert handler.records[0].extra['foo'] == 'bar' |
0 | import re | |
1 | import sys | |
2 | ||
3 | import logbook | |
4 | ||
5 | import pytest | |
6 | ||
7 | from .utils import capturing_stderr_context | |
8 | ||
9 | __file_without_pyc__ = __file__ | |
10 | if __file_without_pyc__.endswith('.pyc'): | |
11 | __file_without_pyc__ = __file_without_pyc__[:-1] | |
12 | ||
13 | ||
14 | def test_handler_exception(activation_strategy, logger): | |
15 | class ErroringHandler(logbook.TestHandler): | |
16 | ||
17 | def emit(self, record): | |
18 | raise RuntimeError('something bad happened') | |
19 | ||
20 | with capturing_stderr_context() as stderr: | |
21 | with activation_strategy(ErroringHandler()): | |
22 | logger.warn('I warn you.') | |
23 | assert 'something bad happened' in stderr.getvalue() | |
24 | assert 'I warn you' not in stderr.getvalue() | |
25 | ||
26 | ||
27 | def test_formatting_exception(): | |
28 | def make_record(): | |
29 | return logbook.LogRecord('Test Logger', logbook.WARNING, | |
30 | 'Hello {foo:invalid}', | |
31 | kwargs={'foo': 42}, | |
32 | frame=sys._getframe()) | |
33 | record = make_record() | |
34 | with pytest.raises(TypeError) as caught: | |
35 | record.message | |
36 | ||
37 | errormsg = str(caught.value) | |
38 | assert re.search('Could not format message with provided arguments: ' | |
39 | 'Invalid (?:format specifier)|(?:conversion specification)|(?:format spec)', | |
40 | errormsg, re.M | re.S) | |
41 | assert "msg='Hello {foo:invalid}'" in errormsg | |
42 | assert 'args=()' in errormsg | |
43 | assert "kwargs={'foo': 42}" in errormsg | |
44 | assert re.search(r'Happened in file .*%s, line \d+' % re.escape(__file_without_pyc__), | |
45 | errormsg, re.M | re.S) |
0 | import logbook | |
1 | ||
2 | from .utils import capturing_stderr_context, make_fake_mail_handler | |
3 | ||
4 | ||
5 | def test_custom_logger(activation_strategy, logger): | |
6 | client_ip = '127.0.0.1' | |
7 | ||
8 | class CustomLogger(logbook.Logger): | |
9 | ||
10 | def process_record(self, record): | |
11 | record.extra['ip'] = client_ip | |
12 | ||
13 | custom_log = CustomLogger('awesome logger') | |
14 | fmt = '[{record.level_name}] {record.channel}: ' \ | |
15 | '{record.message} [{record.extra[ip]}]' | |
16 | handler = logbook.TestHandler(format_string=fmt) | |
17 | assert handler.format_string == fmt | |
18 | ||
19 | with activation_strategy(handler): | |
20 | custom_log.warn('Too many sounds') | |
21 | logger.warn('"Music" playing') | |
22 | ||
23 | assert handler.formatted_records == [ | |
24 | '[WARNING] awesome logger: Too many sounds [127.0.0.1]', | |
25 | '[WARNING] testlogger: "Music" playing []'] | |
26 | ||
27 | ||
28 | def test_custom_handling(activation_strategy, logger): | |
29 | class MyTestHandler(logbook.TestHandler): | |
30 | def handle(self, record): | |
31 | if record.extra.get('flag') != 'testing': | |
32 | return False | |
33 | return logbook.TestHandler.handle(self, record) | |
34 | ||
35 | # Check metaclass (== cls.__class__) | |
36 | assert logbook.Handler.__class__ == logbook.handlers._HandlerType | |
37 | ||
38 | class MyLogger(logbook.Logger): | |
39 | def process_record(self, record): | |
40 | logbook.Logger.process_record(self, record) | |
41 | record.extra['flag'] = 'testing' | |
42 | ||
43 | log = MyLogger() | |
44 | handler = MyTestHandler() | |
45 | with capturing_stderr_context() as captured: | |
46 | with activation_strategy(handler): | |
47 | log.warn('From my logger') | |
48 | logger.warn('From another logger') | |
49 | assert handler.has_warning('From my logger') | |
50 | assert 'From another logger' in captured.getvalue() | |
51 | ||
52 | ||
53 | def test_nested_setups(activation_strategy): | |
54 | with capturing_stderr_context() as captured: | |
55 | logger = logbook.Logger('App') | |
56 | test_handler = logbook.TestHandler(level='WARNING') | |
57 | mail_handler = make_fake_mail_handler(bubble=True) | |
58 | ||
59 | handlers = logbook.NestedSetup([ | |
60 | logbook.NullHandler(), | |
61 | test_handler, | |
62 | mail_handler | |
63 | ]) | |
64 | ||
65 | with activation_strategy(handlers): | |
66 | logger.warn('This is a warning') | |
67 | logger.error('This is also a mail') | |
68 | try: | |
69 | 1 / 0 | |
70 | except Exception: | |
71 | logger.exception() | |
72 | logger.warn('And here we go straight back to stderr') | |
73 | ||
74 | assert test_handler.has_warning('This is a warning') | |
75 | assert test_handler.has_error('This is also a mail') | |
76 | assert len(mail_handler.mails) == 2 | |
77 | assert 'This is also a mail' in mail_handler.mails[0][2] | |
78 | assert '1 / 0' in mail_handler.mails[1][2] | |
79 | assert 'And here we go straight back to stderr' in captured.getvalue() | |
80 | ||
81 | with activation_strategy(handlers): | |
82 | logger.warn('threadbound warning') | |
83 | ||
84 | handlers.push_application() | |
85 | try: | |
86 | logger.warn('applicationbound warning') | |
87 | finally: | |
88 | handlers.pop_application() | |
89 | ||
90 | ||
91 | def test_filtering(activation_strategy): | |
92 | logger1 = logbook.Logger('Logger1') | |
93 | logger2 = logbook.Logger('Logger2') | |
94 | handler = logbook.TestHandler() | |
95 | outer_handler = logbook.TestHandler() | |
96 | ||
97 | def only_1(record, handler): | |
98 | return record.dispatcher is logger1 | |
99 | handler.filter = only_1 | |
100 | ||
101 | with activation_strategy(outer_handler): | |
102 | with activation_strategy(handler): | |
103 | logger1.warn('foo') | |
104 | logger2.warn('bar') | |
105 | ||
106 | assert handler.has_warning('foo', channel='Logger1') | |
107 | assert (not handler.has_warning('bar', channel='Logger2')) | |
108 | assert (not outer_handler.has_warning('foo', channel='Logger1')) | |
109 | assert outer_handler.has_warning('bar', channel='Logger2') | |
110 | ||
111 | ||
112 | def test_different_context_pushing(activation_strategy): | |
113 | h1 = logbook.TestHandler(level=logbook.DEBUG) | |
114 | h2 = logbook.TestHandler(level=logbook.INFO) | |
115 | h3 = logbook.TestHandler(level=logbook.WARNING) | |
116 | logger = logbook.Logger('Testing') | |
117 | ||
118 | with activation_strategy(h1): | |
119 | with activation_strategy(h2): | |
120 | with activation_strategy(h3): | |
121 | logger.warn('Wuuu') | |
122 | logger.info('still awesome') | |
123 | logger.debug('puzzled') | |
124 | ||
125 | assert h1.has_debug('puzzled') | |
126 | assert h2.has_info('still awesome') | |
127 | assert h3.has_warning('Wuuu') | |
128 | for handler in h1, h2, h3: | |
129 | assert len(handler.records) == 1 | |
130 | ||
131 | ||
132 | def test_default_handlers(logger): | |
133 | with capturing_stderr_context() as stream: | |
134 | logger.warn('Aha!') | |
135 | captured = stream.getvalue() | |
136 | assert 'WARNING: testlogger: Aha!' in captured |
0 | # -*- coding: utf-8 -*- | |
1 | ||
2 | from logbook.helpers import u | |
3 | from datetime import datetime | |
4 | ||
5 | import pytest | |
6 | ||
7 | ||
8 | def test_jsonhelper(): | |
9 | from logbook.helpers import to_safe_json | |
10 | ||
11 | class Bogus(object): | |
12 | def __str__(self): | |
13 | return 'bogus' | |
14 | ||
15 | rv = to_safe_json([ | |
16 | None, | |
17 | 'foo', | |
18 | u('jäger'), | |
19 | 1, | |
20 | datetime(2000, 1, 1), | |
21 | {'jäger1': 1, u('jäger2'): 2, Bogus(): 3, 'invalid': object()}, | |
22 | object() # invalid | |
23 | ]) | |
24 | ||
25 | assert rv == [None, u('foo'), u('jäger'), 1, '2000-01-01T00:00:00Z', | |
26 | {u('jäger1'): 1, u('jäger2'): 2, u('bogus'): 3, | |
27 | u('invalid'): None}, None] | |
28 | ||
29 | ||
30 | def test_datehelpers(): | |
31 | from logbook.helpers import format_iso8601, parse_iso8601 | |
32 | now = datetime.now() | |
33 | rv = format_iso8601() | |
34 | assert rv[:4] == str(now.year) | |
35 | ||
36 | with pytest.raises(ValueError): | |
37 | parse_iso8601('foo') | |
38 | v = parse_iso8601('2000-01-01T00:00:00.12Z') | |
39 | assert v.microsecond == 120000 | |
40 | v = parse_iso8601('2000-01-01T12:00:00+01:00') | |
41 | assert v.hour == 11 | |
42 | v = parse_iso8601('2000-01-01T12:00:00-01:00') | |
43 | assert v.hour == 13 |
0 | import sys | |
1 | ||
2 | import logbook | |
3 | ||
4 | from .utils import capturing_stderr_context | |
5 | ||
6 | ||
7 | def test_exc_info_when_no_exceptions_exist(logger): | |
8 | with capturing_stderr_context() as captured: | |
9 | with logbook.StreamHandler(sys.stderr): | |
10 | logger.debug('message', exc_info=True) | |
11 | assert 'Traceback' not in captured.getvalue() | |
12 | ||
13 | def test_extradict(active_handler, logger): | |
14 | logger.warn('Test warning') | |
15 | record = active_handler.records[0] | |
16 | record.extra['existing'] = 'foo' | |
17 | assert record.extra['nonexisting'] == '' | |
18 | assert record.extra['existing'] == 'foo' | |
19 | assert repr(record.extra) == "ExtraDict({'existing': 'foo'})" | |
20 | ||
21 | ||
22 | def test_calling_frame(active_handler, logger): | |
23 | logger.warn('test') | |
24 | assert active_handler.records[0].calling_frame == sys._getframe() | |
25 | ||
26 | def test_frame_correction(active_handler, logger): | |
27 | def inner(): | |
28 | logger.warn('test', frame_correction=+1) | |
29 | ||
30 | inner() | |
31 | assert active_handler.records[0].calling_frame == sys._getframe() | |
32 | ||
33 | def test_dispatcher(active_handler, logger): | |
34 | logger.warn('Logbook is too awesome for stdlib') | |
35 | assert active_handler.records[0].dispatcher == logger |
0 | # -*- coding: utf-8 -*- | |
1 | from .utils import ( | |
2 | LogbookTestCase, | |
3 | activate_via_push_pop, | |
4 | activate_via_with_statement, | |
5 | capturing_stderr_context, | |
6 | get_total_delta_seconds, | |
7 | make_fake_mail_handler, | |
8 | missing, | |
9 | require_module, | |
10 | require_py3, | |
11 | ) | |
12 | from contextlib import closing, contextmanager | |
13 | from datetime import datetime, timedelta | |
14 | from random import randrange | |
15 | 0 | import logbook |
16 | from logbook.helpers import StringIO, xrange, iteritems, zip, u | |
17 | import os | |
18 | import pickle | |
19 | import re | |
20 | import shutil | |
21 | import socket | |
22 | import sys | |
23 | import tempfile | |
24 | import time | |
25 | import json | |
26 | try: | |
27 | from thread import get_ident | |
28 | except ImportError: | |
29 | from _thread import get_ident | |
30 | import base64 | |
31 | 1 | |
32 | __file_without_pyc__ = __file__ | |
33 | if __file_without_pyc__.endswith(".pyc"): | |
34 | __file_without_pyc__ = __file_without_pyc__[:-1] | |
35 | ||
36 | LETTERS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" | |
37 | ||
38 | class _BasicAPITestCase(LogbookTestCase): | |
39 | def test_basic_logging(self): | |
40 | with self.thread_activation_strategy(logbook.TestHandler()) as handler: | |
41 | self.log.warn('This is a warning. Nice hah?') | |
42 | ||
43 | self.assert_(handler.has_warning('This is a warning. Nice hah?')) | |
44 | self.assertEqual(handler.formatted_records, [ | |
45 | '[WARNING] testlogger: This is a warning. Nice hah?' | |
46 | ]) | |
47 | ||
48 | def test_extradict(self): | |
49 | with self.thread_activation_strategy(logbook.TestHandler()) as handler: | |
50 | self.log.warn('Test warning') | |
51 | record = handler.records[0] | |
52 | record.extra['existing'] = 'foo' | |
53 | self.assertEqual(record.extra['nonexisting'], '') | |
54 | self.assertEqual(record.extra['existing'], 'foo') | |
55 | self.assertEqual(repr(record.extra), | |
56 | 'ExtraDict({\'existing\': \'foo\'})') | |
57 | ||
58 | def test_custom_logger(self): | |
59 | client_ip = '127.0.0.1' | |
60 | ||
61 | class CustomLogger(logbook.Logger): | |
62 | def process_record(self, record): | |
63 | record.extra['ip'] = client_ip | |
64 | ||
65 | custom_log = CustomLogger('awesome logger') | |
66 | fmt = '[{record.level_name}] {record.channel}: ' \ | |
67 | '{record.message} [{record.extra[ip]}]' | |
68 | handler = logbook.TestHandler(format_string=fmt) | |
69 | self.assertEqual(handler.format_string, fmt) | |
70 | ||
71 | with self.thread_activation_strategy(handler): | |
72 | custom_log.warn('Too many sounds') | |
73 | self.log.warn('"Music" playing') | |
74 | ||
75 | self.assertEqual(handler.formatted_records, [ | |
76 | '[WARNING] awesome logger: Too many sounds [127.0.0.1]', | |
77 | '[WARNING] testlogger: "Music" playing []' | |
78 | ]) | |
79 | ||
80 | def test_handler_exception(self): | |
81 | class ErroringHandler(logbook.TestHandler): | |
82 | def emit(self, record): | |
83 | raise RuntimeError('something bad happened') | |
84 | ||
85 | with capturing_stderr_context() as stderr: | |
86 | with self.thread_activation_strategy(ErroringHandler()) as handler: | |
87 | self.log.warn('I warn you.') | |
88 | self.assert_('something bad happened' in stderr.getvalue()) | |
89 | self.assert_('I warn you' not in stderr.getvalue()) | |
90 | ||
91 | def test_formatting_exception(self): | |
92 | def make_record(): | |
93 | return logbook.LogRecord('Test Logger', logbook.WARNING, | |
94 | 'Hello {foo:invalid}', | |
95 | kwargs={'foo': 42}, | |
96 | frame=sys._getframe()) | |
97 | record = make_record() | |
98 | with self.assertRaises(TypeError) as caught: | |
99 | record.message | |
100 | ||
101 | errormsg = str(caught.exception) | |
102 | self.assertRegexpMatches(errormsg, | |
103 | "Could not format message with provided arguments: Invalid (?:format specifier)|(?:conversion specification)|(?:format spec)") | |
104 | self.assertIn("msg='Hello {foo:invalid}'", errormsg) | |
105 | self.assertIn('args=()', errormsg) | |
106 | self.assertIn("kwargs={'foo': 42}", errormsg) | |
107 | self.assertRegexpMatches( | |
108 | errormsg, | |
109 | r'Happened in file .*%s, line \d+' % __file_without_pyc__) | |
110 | ||
111 | def test_exception_catching(self): | |
112 | logger = logbook.Logger('Test') | |
113 | with self.thread_activation_strategy(logbook.TestHandler()) as handler: | |
114 | self.assertFalse(handler.has_error()) | |
115 | try: | |
116 | 1 / 0 | |
117 | except Exception: | |
118 | logger.exception() | |
119 | try: | |
120 | 1 / 0 | |
121 | except Exception: | |
122 | logger.exception('Awesome') | |
123 | self.assert_(handler.has_error('Uncaught exception occurred')) | |
124 | self.assert_(handler.has_error('Awesome')) | |
125 | self.assertIsNotNone(handler.records[0].exc_info) | |
126 | self.assertIn('1 / 0', handler.records[0].formatted_exception) | |
127 | ||
128 | def test_exc_info_tuple(self): | |
129 | self._test_exc_info(as_tuple=True) | |
130 | ||
131 | def test_exc_info_true(self): | |
132 | self._test_exc_info(as_tuple=False) | |
133 | ||
134 | def _test_exc_info(self, as_tuple): | |
135 | logger = logbook.Logger("Test") | |
136 | with self.thread_activation_strategy(logbook.TestHandler()) as handler: | |
137 | try: | |
138 | 1 / 0 | |
139 | except Exception: | |
140 | exc_info = sys.exc_info() | |
141 | logger.info("Exception caught", exc_info=exc_info if as_tuple else True) | |
142 | self.assertIsNotNone(handler.records[0].exc_info) | |
143 | self.assertEquals(handler.records[0].exc_info, exc_info) | |
144 | ||
145 | def test_exporting(self): | |
146 | with self.thread_activation_strategy(logbook.TestHandler()) as handler: | |
147 | try: | |
148 | 1 / 0 | |
149 | except Exception: | |
150 | self.log.exception() | |
151 | record = handler.records[0] | |
152 | ||
153 | exported = record.to_dict() | |
154 | record.close() | |
155 | imported = logbook.LogRecord.from_dict(exported) | |
156 | for key, value in iteritems(record.__dict__): | |
157 | if key[0] == '_': | |
158 | continue | |
159 | self.assertEqual(value, getattr(imported, key)) | |
160 | ||
161 | def test_pickle(self): | |
162 | with self.thread_activation_strategy(logbook.TestHandler()) as handler: | |
163 | try: | |
164 | 1 / 0 | |
165 | except Exception: | |
166 | self.log.exception() | |
167 | record = handler.records[0] | |
168 | record.pull_information() | |
169 | record.close() | |
170 | ||
171 | for p in xrange(pickle.HIGHEST_PROTOCOL): | |
172 | exported = pickle.dumps(record, p) | |
173 | imported = pickle.loads(exported) | |
174 | for key, value in iteritems(record.__dict__): | |
175 | if key[0] == '_': | |
176 | continue | |
177 | imported_value = getattr(imported, key) | |
178 | if isinstance(value, ZeroDivisionError): | |
179 | # in Python 3.2, ZeroDivisionError(x) != ZeroDivisionError(x) | |
180 | self.assert_(type(value) is type(imported_value)) | |
181 | self.assertEqual(value.args, imported_value.args) | |
182 | else: | |
183 | self.assertEqual(value, imported_value) | |
184 | ||
185 | def test_timedate_format(self): | |
186 | """ | |
187 | tests the logbook.set_datetime_format() function | |
188 | """ | |
189 | FORMAT_STRING = '{record.time:%H:%M:%S} {record.message}' | |
190 | handler = logbook.TestHandler(format_string=FORMAT_STRING) | |
191 | handler.push_thread() | |
192 | logbook.set_datetime_format('utc') | |
193 | try: | |
194 | self.log.warn('This is a warning.') | |
195 | time_utc = handler.records[0].time | |
196 | logbook.set_datetime_format('local') | |
197 | self.log.warn('This is a warning.') | |
198 | time_local = handler.records[1].time | |
199 | finally: | |
200 | handler.pop_thread() | |
201 | # put back the default time factory | |
202 | logbook.set_datetime_format('utc') | |
203 | ||
204 | # get the expected difference between local and utc time | |
205 | t1 = datetime.now() | |
206 | t2 = datetime.utcnow() | |
207 | ||
208 | tz_minutes_diff = get_total_delta_seconds(t1 - t2)/60.0 | |
209 | ||
210 | if abs(tz_minutes_diff) < 1: | |
211 | self.skipTest("Cannot test utc/localtime differences if they vary by less than one minute...") | |
212 | ||
213 | # get the difference between LogRecord local and utc times | |
214 | logbook_minutes_diff = get_total_delta_seconds(time_local - time_utc)/60.0 | |
215 | self.assertGreater(abs(logbook_minutes_diff), 1, "Localtime does not differ from UTC by more than 1 minute (Local: %s, UTC: %s)" % (time_local, time_utc)) | |
216 | ||
217 | ratio = logbook_minutes_diff / tz_minutes_diff | |
218 | ||
219 | self.assertGreater(ratio, 0.99) | |
220 | self.assertLess(ratio, 1.01) | |
221 | ||
222 | class BasicAPITestCase_Regular(_BasicAPITestCase): | |
223 | def setUp(self): | |
224 | super(BasicAPITestCase_Regular, self).setUp() | |
225 | self.thread_activation_strategy = activate_via_with_statement | |
226 | ||
227 | class BasicAPITestCase_Contextmgr(_BasicAPITestCase): | |
228 | def setUp(self): | |
229 | super(BasicAPITestCase_Contextmgr, self).setUp() | |
230 | self.thread_activation_strategy = activate_via_push_pop | |
231 | ||
232 | class _HandlerTestCase(LogbookTestCase): | |
233 | def setUp(self): | |
234 | super(_HandlerTestCase, self).setUp() | |
235 | self.dirname = tempfile.mkdtemp() | |
236 | self.filename = os.path.join(self.dirname, 'log.tmp') | |
237 | ||
238 | def tearDown(self): | |
239 | shutil.rmtree(self.dirname) | |
240 | super(_HandlerTestCase, self).tearDown() | |
241 | ||
242 | def test_file_handler(self): | |
243 | handler = logbook.FileHandler(self.filename, | |
244 | format_string='{record.level_name}:{record.channel}:' | |
245 | '{record.message}',) | |
246 | with self.thread_activation_strategy(handler): | |
247 | self.log.warn('warning message') | |
248 | handler.close() | |
249 | with open(self.filename) as f: | |
250 | self.assertEqual(f.readline(), | |
251 | 'WARNING:testlogger:warning message\n') | |
252 | ||
253 | def test_file_handler_unicode(self): | |
254 | with capturing_stderr_context() as captured: | |
255 | with self.thread_activation_strategy(logbook.FileHandler(self.filename)) as h: | |
256 | self.log.info(u('\u0431')) | |
257 | self.assertFalse(captured.getvalue()) | |
258 | ||
259 | def test_file_handler_delay(self): | |
260 | handler = logbook.FileHandler(self.filename, | |
261 | format_string='{record.level_name}:{record.channel}:' | |
262 | '{record.message}', delay=True) | |
263 | self.assertFalse(os.path.isfile(self.filename)) | |
264 | with self.thread_activation_strategy(handler): | |
265 | self.log.warn('warning message') | |
266 | handler.close() | |
267 | ||
268 | with open(self.filename) as f: | |
269 | self.assertEqual(f.readline(), | |
270 | 'WARNING:testlogger:warning message\n') | |
271 | ||
272 | def test_monitoring_file_handler(self): | |
273 | if os.name == "nt": | |
274 | self.skipTest("unsupported on windows due to different IO (also unneeded)") | |
275 | handler = logbook.MonitoringFileHandler(self.filename, | |
276 | format_string='{record.level_name}:{record.channel}:' | |
277 | '{record.message}', delay=True) | |
278 | with self.thread_activation_strategy(handler): | |
279 | self.log.warn('warning message') | |
280 | os.rename(self.filename, self.filename + '.old') | |
281 | self.log.warn('another warning message') | |
282 | handler.close() | |
283 | with open(self.filename) as f: | |
284 | self.assertEqual(f.read().strip(), | |
285 | 'WARNING:testlogger:another warning message') | |
286 | ||
287 | def test_custom_formatter(self): | |
288 | def custom_format(record, handler): | |
289 | return record.level_name + ':' + record.message | |
290 | handler = logbook.FileHandler(self.filename) | |
291 | with self.thread_activation_strategy(handler): | |
292 | handler.formatter = custom_format | |
293 | self.log.warn('Custom formatters are awesome') | |
294 | ||
295 | with open(self.filename) as f: | |
296 | self.assertEqual(f.readline(), | |
297 | 'WARNING:Custom formatters are awesome\n') | |
298 | ||
299 | def test_rotating_file_handler(self): | |
300 | basename = os.path.join(self.dirname, 'rot.log') | |
301 | handler = logbook.RotatingFileHandler(basename, max_size=2048, | |
302 | backup_count=3, | |
303 | ) | |
304 | handler.format_string = '{record.message}' | |
305 | with self.thread_activation_strategy(handler): | |
306 | for c, x in zip(LETTERS, xrange(32)): | |
307 | self.log.warn(c * 256) | |
308 | files = [x for x in os.listdir(self.dirname) | |
309 | if x.startswith('rot.log')] | |
310 | files.sort() | |
311 | ||
312 | self.assertEqual(files, ['rot.log', 'rot.log.1', 'rot.log.2', | |
313 | 'rot.log.3']) | |
314 | with open(basename) as f: | |
315 | self.assertEqual(f.readline().rstrip(), 'C' * 256) | |
316 | self.assertEqual(f.readline().rstrip(), 'D' * 256) | |
317 | self.assertEqual(f.readline().rstrip(), 'E' * 256) | |
318 | self.assertEqual(f.readline().rstrip(), 'F' * 256) | |
319 | ||
320 | def test_timed_rotating_file_handler(self): | |
321 | basename = os.path.join(self.dirname, 'trot.log') | |
322 | handler = logbook.TimedRotatingFileHandler(basename, backup_count=3) | |
323 | handler.format_string = '[{record.time:%H:%M}] {record.message}' | |
324 | ||
325 | def fake_record(message, year, month, day, hour=0, | |
326 | minute=0, second=0): | |
327 | lr = logbook.LogRecord('Test Logger', logbook.WARNING, | |
328 | message) | |
329 | lr.time = datetime(year, month, day, hour, minute, second) | |
330 | return lr | |
331 | ||
332 | with self.thread_activation_strategy(handler): | |
333 | for x in xrange(10): | |
334 | handler.handle(fake_record('First One', 2010, 1, 5, x + 1)) | |
335 | for x in xrange(20): | |
336 | handler.handle(fake_record('Second One', 2010, 1, 6, x + 1)) | |
337 | for x in xrange(10): | |
338 | handler.handle(fake_record('Third One', 2010, 1, 7, x + 1)) | |
339 | for x in xrange(20): | |
340 | handler.handle(fake_record('Last One', 2010, 1, 8, x + 1)) | |
341 | ||
342 | files = sorted( | |
343 | x for x in os.listdir(self.dirname) if x.startswith('trot') | |
344 | ) | |
345 | self.assertEqual(files, ['trot-2010-01-06.log', 'trot-2010-01-07.log', | |
346 | 'trot-2010-01-08.log']) | |
347 | with open(os.path.join(self.dirname, 'trot-2010-01-08.log')) as f: | |
348 | self.assertEqual(f.readline().rstrip(), '[01:00] Last One') | |
349 | self.assertEqual(f.readline().rstrip(), '[02:00] Last One') | |
350 | with open(os.path.join(self.dirname, 'trot-2010-01-07.log')) as f: | |
351 | self.assertEqual(f.readline().rstrip(), '[01:00] Third One') | |
352 | self.assertEqual(f.readline().rstrip(), '[02:00] Third One') | |
353 | ||
354 | def test_mail_handler(self): | |
355 | subject = u('\xf8nicode') | |
356 | handler = make_fake_mail_handler(subject=subject) | |
357 | with capturing_stderr_context() as fallback: | |
358 | with self.thread_activation_strategy(handler): | |
359 | self.log.warn('This is not mailed') | |
360 | try: | |
361 | 1 / 0 | |
362 | except Exception: | |
363 | self.log.exception(u('Viva la Espa\xf1a')) | |
364 | ||
365 | if not handler.mails: | |
366 | # if sending the mail failed, the reason should be on stderr | |
367 | self.fail(fallback.getvalue()) | |
368 | ||
369 | self.assertEqual(len(handler.mails), 1) | |
370 | sender, receivers, mail = handler.mails[0] | |
371 | mail = mail.replace("\r", "") | |
372 | self.assertEqual(sender, handler.from_addr) | |
373 | self.assert_('=?utf-8?q?=C3=B8nicode?=' in mail) | |
374 | header, data = mail.split("\n\n", 1) | |
375 | if "Content-Transfer-Encoding: base64" in header: | |
376 | data = base64.b64decode(data).decode("utf-8") | |
377 | self.assertRegexpMatches(data, 'Message type:\s+ERROR') | |
378 | self.assertRegexpMatches(data, 'Location:.*%s' % __file_without_pyc__) | |
379 | self.assertRegexpMatches(data, 'Module:\s+%s' % __name__) | |
380 | self.assertRegexpMatches(data, 'Function:\s+test_mail_handler') | |
381 | body = u('Viva la Espa\xf1a') | |
382 | if sys.version_info < (3, 0): | |
383 | body = body.encode('utf-8') | |
384 | self.assertIn(body, data) | |
385 | self.assertIn('\nTraceback (most', data) | |
386 | self.assertIn('1 / 0', data) | |
387 | self.assertIn('This is not mailed', fallback.getvalue()) | |
388 | ||
389 | def test_mail_handler_record_limits(self): | |
390 | suppression_test = re.compile('This message occurred additional \d+ ' | |
391 | 'time\(s\) and was suppressed').search | |
392 | handler = make_fake_mail_handler(record_limit=1, | |
393 | record_delta=timedelta(seconds=0.5)) | |
394 | with self.thread_activation_strategy(handler): | |
395 | later = datetime.utcnow() + timedelta(seconds=1.1) | |
396 | while datetime.utcnow() < later: | |
397 | self.log.error('Over and over...') | |
398 | ||
399 | # first mail that is always delivered + 0.5 seconds * 2 | |
400 | # and 0.1 seconds of room for rounding errors makes 3 mails | |
401 | self.assertEqual(len(handler.mails), 3) | |
402 | ||
403 | # first mail is always delivered | |
404 | self.assert_(not suppression_test(handler.mails[0][2])) | |
405 | ||
406 | # the next two have a supression count | |
407 | self.assert_(suppression_test(handler.mails[1][2])) | |
408 | self.assert_(suppression_test(handler.mails[2][2])) | |
409 | ||
410 | def test_mail_handler_batching(self): | |
411 | mail_handler = make_fake_mail_handler() | |
412 | handler = logbook.FingersCrossedHandler(mail_handler, reset=True) | |
413 | with self.thread_activation_strategy(handler): | |
414 | self.log.warn('Testing') | |
415 | self.log.debug('Even more') | |
416 | self.log.error('And this triggers it') | |
417 | self.log.info('Aha') | |
418 | self.log.error('And this triggers it again!') | |
419 | ||
420 | self.assertEqual(len(mail_handler.mails), 2) | |
421 | mail = mail_handler.mails[0][2] | |
422 | ||
423 | pieces = mail.split('Log records that led up to this one:') | |
424 | self.assertEqual(len(pieces), 2) | |
425 | body, rest = pieces | |
426 | rest = rest.replace("\r", "") | |
427 | ||
428 | self.assertRegexpMatches(body, 'Message type:\s+ERROR') | |
429 | self.assertRegexpMatches(body, 'Module:\s+%s' % __name__) | |
430 | self.assertRegexpMatches(body, 'Function:\s+test_mail_handler_batching') | |
431 | ||
432 | related = rest.strip().split('\n\n') | |
433 | self.assertEqual(len(related), 2) | |
434 | self.assertRegexpMatches(related[0], 'Message type:\s+WARNING') | |
435 | self.assertRegexpMatches(related[1], 'Message type:\s+DEBUG') | |
436 | ||
437 | self.assertIn('And this triggers it again', mail_handler.mails[1][2]) | |
438 | ||
439 | def test_group_handler_mail_combo(self): | |
440 | mail_handler = make_fake_mail_handler(level=logbook.DEBUG) | |
441 | handler = logbook.GroupHandler(mail_handler) | |
442 | with self.thread_activation_strategy(handler): | |
443 | self.log.error('The other way round') | |
444 | self.log.warn('Testing') | |
445 | self.log.debug('Even more') | |
446 | self.assertEqual(mail_handler.mails, []) | |
447 | ||
448 | self.assertEqual(len(mail_handler.mails), 1) | |
449 | mail = mail_handler.mails[0][2] | |
450 | ||
451 | pieces = mail.split('Other log records in the same group:') | |
452 | self.assertEqual(len(pieces), 2) | |
453 | body, rest = pieces | |
454 | rest = rest.replace("\r", "") | |
455 | ||
456 | self.assertRegexpMatches(body, 'Message type:\s+ERROR') | |
457 | self.assertRegexpMatches(body, 'Module:\s+'+__name__) | |
458 | self.assertRegexpMatches(body, 'Function:\s+test_group_handler_mail_combo') | |
459 | ||
460 | related = rest.strip().split('\n\n') | |
461 | self.assertEqual(len(related), 2) | |
462 | self.assertRegexpMatches(related[0], 'Message type:\s+WARNING') | |
463 | self.assertRegexpMatches(related[1], 'Message type:\s+DEBUG') | |
464 | ||
465 | def test_syslog_handler(self): | |
466 | to_test = [ | |
467 | (socket.AF_INET, ('127.0.0.1', 0)), | |
468 | ] | |
469 | if hasattr(socket, 'AF_UNIX'): | |
470 | to_test.append((socket.AF_UNIX, self.filename)) | |
471 | for sock_family, address in to_test: | |
472 | with closing(socket.socket(sock_family, socket.SOCK_DGRAM)) as inc: | |
473 | inc.bind(address) | |
474 | inc.settimeout(1) | |
475 | for app_name in [None, 'Testing']: | |
476 | handler = logbook.SyslogHandler(app_name, inc.getsockname()) | |
477 | with self.thread_activation_strategy(handler): | |
478 | self.log.warn('Syslog is weird') | |
479 | try: | |
480 | rv = inc.recvfrom(1024)[0] | |
481 | except socket.error: | |
482 | self.fail('got timeout on socket') | |
483 | self.assertEqual(rv, ( | |
484 | u('<12>%stestlogger: Syslog is weird\x00') % | |
485 | (app_name and app_name + u(':') or u(''))).encode('utf-8')) | |
486 | ||
487 | def test_handler_processors(self): | |
488 | handler = make_fake_mail_handler(format_string='''\ | |
489 | Subject: Application Error for {record.extra[path]} [{record.extra[method]}] | |
490 | ||
491 | Message type: {record.level_name} | |
492 | Location: {record.filename}:{record.lineno} | |
493 | Module: {record.module} | |
494 | Function: {record.func_name} | |
495 | Time: {record.time:%Y-%m-%d %H:%M:%S} | |
496 | Remote IP: {record.extra[ip]} | |
497 | Request: {record.extra[path]} [{record.extra[method]}] | |
498 | ||
499 | Message: | |
500 | ||
501 | {record.message} | |
502 | ''') | |
503 | ||
504 | class Request(object): | |
505 | remote_addr = '127.0.0.1' | |
506 | method = 'GET' | |
507 | path = '/index.html' | |
508 | ||
509 | def handle_request(request): | |
510 | def inject_extra(record): | |
511 | record.extra['ip'] = request.remote_addr | |
512 | record.extra['method'] = request.method | |
513 | record.extra['path'] = request.path | |
514 | ||
515 | processor = logbook.Processor(inject_extra) | |
516 | with self.thread_activation_strategy(processor): | |
517 | handler.push_thread() | |
518 | try: | |
519 | try: | |
520 | 1 / 0 | |
521 | except Exception: | |
522 | self.log.exception('Exception happened during request') | |
523 | finally: | |
524 | handler.pop_thread() | |
525 | ||
526 | handle_request(Request()) | |
527 | self.assertEqual(len(handler.mails), 1) | |
528 | mail = handler.mails[0][2] | |
529 | self.assertIn('Subject: Application Error ' | |
530 | 'for /index.html [GET]', mail) | |
531 | self.assertIn('1 / 0', mail) | |
532 | ||
533 | def test_regex_matching(self): | |
534 | test_handler = logbook.TestHandler() | |
535 | with self.thread_activation_strategy(test_handler): | |
536 | self.log.warn('Hello World!') | |
537 | self.assert_(test_handler.has_warning(re.compile('^Hello'))) | |
538 | self.assert_(not test_handler.has_warning(re.compile('world$'))) | |
539 | self.assert_(not test_handler.has_warning('^Hello World')) | |
540 | ||
541 | def test_custom_handling_test(self): | |
542 | class MyTestHandler(logbook.TestHandler): | |
543 | def handle(self, record): | |
544 | if record.extra.get('flag') != 'testing': | |
545 | return False | |
546 | return logbook.TestHandler.handle(self, record) | |
547 | ||
548 | class MyLogger(logbook.Logger): | |
549 | def process_record(self, record): | |
550 | logbook.Logger.process_record(self, record) | |
551 | record.extra['flag'] = 'testing' | |
552 | log = MyLogger() | |
553 | handler = MyTestHandler() | |
554 | with capturing_stderr_context() as captured: | |
555 | with self.thread_activation_strategy(handler): | |
556 | log.warn('From my logger') | |
557 | self.log.warn('From another logger') | |
558 | self.assert_(handler.has_warning('From my logger')) | |
559 | self.assertIn('From another logger', captured.getvalue()) | |
560 | ||
561 | def test_custom_handling_tester(self): | |
562 | flag = True | |
563 | ||
564 | class MyTestHandler(logbook.TestHandler): | |
565 | def should_handle(self, record): | |
566 | return flag | |
567 | null_handler = logbook.NullHandler() | |
568 | with self.thread_activation_strategy(null_handler): | |
569 | test_handler = MyTestHandler() | |
570 | with self.thread_activation_strategy(test_handler): | |
571 | self.log.warn('1') | |
572 | flag = False | |
573 | self.log.warn('2') | |
574 | self.assert_(test_handler.has_warning('1')) | |
575 | self.assert_(not test_handler.has_warning('2')) | |
576 | ||
577 | def test_null_handler(self): | |
578 | with capturing_stderr_context() as captured: | |
579 | with self.thread_activation_strategy(logbook.NullHandler()) as null_handler: | |
580 | with self.thread_activation_strategy(logbook.TestHandler(level='ERROR')) as handler: | |
581 | self.log.error('An error') | |
582 | self.log.warn('A warning') | |
583 | self.assertEqual(captured.getvalue(), '') | |
584 | self.assertFalse(handler.has_warning('A warning')) | |
585 | self.assert_(handler.has_error('An error')) | |
586 | ||
587 | def test_test_handler_cache(self): | |
588 | with self.thread_activation_strategy(logbook.TestHandler()) as handler: | |
589 | self.log.warn('First line') | |
590 | self.assertEqual(len(handler.formatted_records),1) | |
591 | cache = handler.formatted_records # store cache, to make sure it is identifiable | |
592 | self.assertEqual(len(handler.formatted_records),1) | |
593 | self.assert_(cache is handler.formatted_records) # Make sure cache is not invalidated without changes to record | |
594 | self.log.warn('Second line invalidates cache') | |
595 | self.assertEqual(len(handler.formatted_records),2) | |
596 | self.assertFalse(cache is handler.formatted_records) # Make sure cache is invalidated when records change | |
597 | ||
598 | def test_blackhole_setting(self): | |
599 | null_handler = logbook.NullHandler() | |
600 | heavy_init = logbook.LogRecord.heavy_init | |
601 | with self.thread_activation_strategy(null_handler): | |
602 | def new_heavy_init(self): | |
603 | raise RuntimeError('should not be triggered') | |
604 | logbook.LogRecord.heavy_init = new_heavy_init | |
605 | try: | |
606 | with self.thread_activation_strategy(null_handler): | |
607 | logbook.warn('Awesome') | |
608 | finally: | |
609 | logbook.LogRecord.heavy_init = heavy_init | |
610 | ||
611 | null_handler.bubble = True | |
612 | with capturing_stderr_context() as captured: | |
613 | logbook.warning('Not a blockhole') | |
614 | self.assertNotEqual(captured.getvalue(), '') | |
615 | ||
616 | def test_calling_frame(self): | |
617 | handler = logbook.TestHandler() | |
618 | with self.thread_activation_strategy(handler): | |
619 | logbook.warn('test') | |
620 | self.assertEqual(handler.records[0].calling_frame, sys._getframe()) | |
621 | ||
622 | def test_nested_setups(self): | |
623 | with capturing_stderr_context() as captured: | |
624 | logger = logbook.Logger('App') | |
625 | test_handler = logbook.TestHandler(level='WARNING') | |
626 | mail_handler = make_fake_mail_handler(bubble=True) | |
627 | ||
628 | handlers = logbook.NestedSetup([ | |
629 | logbook.NullHandler(), | |
630 | test_handler, | |
631 | mail_handler | |
632 | ]) | |
633 | ||
634 | with self.thread_activation_strategy(handlers): | |
635 | logger.warn('This is a warning') | |
636 | logger.error('This is also a mail') | |
637 | try: | |
638 | 1 / 0 | |
639 | except Exception: | |
640 | logger.exception() | |
641 | logger.warn('And here we go straight back to stderr') | |
642 | ||
643 | self.assert_(test_handler.has_warning('This is a warning')) | |
644 | self.assert_(test_handler.has_error('This is also a mail')) | |
645 | self.assertEqual(len(mail_handler.mails), 2) | |
646 | self.assertIn('This is also a mail', mail_handler.mails[0][2]) | |
647 | self.assertIn('1 / 0',mail_handler.mails[1][2]) | |
648 | self.assertIn('And here we go straight back to stderr', | |
649 | captured.getvalue()) | |
650 | ||
651 | with self.thread_activation_strategy(handlers): | |
652 | logger.warn('threadbound warning') | |
653 | ||
654 | handlers.push_application() | |
655 | try: | |
656 | logger.warn('applicationbound warning') | |
657 | finally: | |
658 | handlers.pop_application() | |
659 | ||
660 | def test_dispatcher(self): | |
661 | logger = logbook.Logger('App') | |
662 | with self.thread_activation_strategy(logbook.TestHandler()) as test_handler: | |
663 | logger.warn('Logbook is too awesome for stdlib') | |
664 | self.assertEqual(test_handler.records[0].dispatcher, logger) | |
665 | ||
666 | def test_filtering(self): | |
667 | logger1 = logbook.Logger('Logger1') | |
668 | logger2 = logbook.Logger('Logger2') | |
669 | handler = logbook.TestHandler() | |
670 | outer_handler = logbook.TestHandler() | |
671 | ||
672 | def only_1(record, handler): | |
673 | return record.dispatcher is logger1 | |
674 | handler.filter = only_1 | |
675 | ||
676 | with self.thread_activation_strategy(outer_handler): | |
677 | with self.thread_activation_strategy(handler): | |
678 | logger1.warn('foo') | |
679 | logger2.warn('bar') | |
680 | ||
681 | self.assert_(handler.has_warning('foo', channel='Logger1')) | |
682 | self.assertFalse(handler.has_warning('bar', channel='Logger2')) | |
683 | self.assertFalse(outer_handler.has_warning('foo', channel='Logger1')) | |
684 | self.assert_(outer_handler.has_warning('bar', channel='Logger2')) | |
685 | ||
686 | def test_null_handler_filtering(self): | |
687 | logger1 = logbook.Logger("1") | |
688 | logger2 = logbook.Logger("2") | |
689 | outer = logbook.TestHandler() | |
690 | inner = logbook.NullHandler() | |
691 | ||
692 | inner.filter = lambda record, handler: record.dispatcher is logger1 | |
693 | ||
694 | with self.thread_activation_strategy(outer): | |
695 | with self.thread_activation_strategy(inner): | |
696 | logger1.warn("1") | |
697 | logger2.warn("2") | |
698 | ||
699 | self.assertTrue(outer.has_warning("2", channel="2")) | |
700 | self.assertFalse(outer.has_warning("1", channel="1")) | |
701 | ||
702 | def test_different_context_pushing(self): | |
703 | h1 = logbook.TestHandler(level=logbook.DEBUG) | |
704 | h2 = logbook.TestHandler(level=logbook.INFO) | |
705 | h3 = logbook.TestHandler(level=logbook.WARNING) | |
706 | logger = logbook.Logger('Testing') | |
707 | ||
708 | with self.thread_activation_strategy(h1): | |
709 | with self.thread_activation_strategy(h2): | |
710 | with self.thread_activation_strategy(h3): | |
711 | logger.warn('Wuuu') | |
712 | logger.info('still awesome') | |
713 | logger.debug('puzzled') | |
714 | ||
715 | self.assert_(h1.has_debug('puzzled')) | |
716 | self.assert_(h2.has_info('still awesome')) | |
717 | self.assert_(h3.has_warning('Wuuu')) | |
718 | for handler in h1, h2, h3: | |
719 | self.assertEquals(len(handler.records), 1) | |
720 | ||
721 | def test_global_functions(self): | |
722 | with self.thread_activation_strategy(logbook.TestHandler()) as handler: | |
723 | logbook.debug('a debug message') | |
724 | logbook.info('an info message') | |
725 | logbook.warn('warning part 1') | |
726 | logbook.warning('warning part 2') | |
727 | logbook.notice('notice') | |
728 | logbook.error('an error') | |
729 | logbook.critical('pretty critical') | |
730 | logbook.log(logbook.CRITICAL, 'critical too') | |
731 | ||
732 | self.assert_(handler.has_debug('a debug message')) | |
733 | self.assert_(handler.has_info('an info message')) | |
734 | self.assert_(handler.has_warning('warning part 1')) | |
735 | self.assert_(handler.has_warning('warning part 2')) | |
736 | self.assert_(handler.has_notice('notice')) | |
737 | self.assert_(handler.has_error('an error')) | |
738 | self.assert_(handler.has_critical('pretty critical')) | |
739 | self.assert_(handler.has_critical('critical too')) | |
740 | self.assertEqual(handler.records[0].channel, 'Generic') | |
741 | self.assertIsNone(handler.records[0].dispatcher) | |
742 | ||
743 | def test_fingerscrossed(self): | |
744 | handler = logbook.FingersCrossedHandler(logbook.default_handler, | |
745 | logbook.WARNING) | |
746 | ||
747 | # if no warning occurs, the infos are not logged | |
748 | with self.thread_activation_strategy(handler): | |
749 | with capturing_stderr_context() as captured: | |
750 | self.log.info('some info') | |
751 | self.assertEqual(captured.getvalue(), '') | |
752 | self.assert_(not handler.triggered) | |
753 | ||
754 | # but if it does, all log messages are output | |
755 | with self.thread_activation_strategy(handler): | |
756 | with capturing_stderr_context() as captured: | |
757 | self.log.info('some info') | |
758 | self.log.warning('something happened') | |
759 | self.log.info('something else happened') | |
760 | logs = captured.getvalue() | |
761 | self.assert_('some info' in logs) | |
762 | self.assert_('something happened' in logs) | |
763 | self.assert_('something else happened' in logs) | |
764 | self.assert_(handler.triggered) | |
765 | ||
766 | def test_fingerscrossed_factory(self): | |
767 | handlers = [] | |
768 | ||
769 | def handler_factory(record, fch): | |
770 | handler = logbook.TestHandler() | |
771 | handlers.append(handler) | |
772 | return handler | |
773 | ||
774 | def make_fch(): | |
775 | return logbook.FingersCrossedHandler(handler_factory, | |
776 | logbook.WARNING) | |
777 | ||
778 | fch = make_fch() | |
779 | with self.thread_activation_strategy(fch): | |
780 | self.log.info('some info') | |
781 | self.assertEqual(len(handlers), 0) | |
782 | self.log.warning('a warning') | |
783 | self.assertEqual(len(handlers), 1) | |
784 | self.log.error('an error') | |
785 | self.assertEqual(len(handlers), 1) | |
786 | self.assert_(handlers[0].has_infos) | |
787 | self.assert_(handlers[0].has_warnings) | |
788 | self.assert_(handlers[0].has_errors) | |
789 | self.assert_(not handlers[0].has_notices) | |
790 | self.assert_(not handlers[0].has_criticals) | |
791 | self.assert_(not handlers[0].has_debugs) | |
792 | ||
793 | fch = make_fch() | |
794 | with self.thread_activation_strategy(fch): | |
795 | self.log.info('some info') | |
796 | self.log.warning('a warning') | |
797 | self.assertEqual(len(handlers), 2) | |
798 | ||
799 | def test_fingerscrossed_buffer_size(self): | |
800 | logger = logbook.Logger('Test') | |
801 | test_handler = logbook.TestHandler() | |
802 | handler = logbook.FingersCrossedHandler(test_handler, buffer_size=3) | |
803 | ||
804 | with self.thread_activation_strategy(handler): | |
805 | logger.info('Never gonna give you up') | |
806 | logger.warn('Aha!') | |
807 | logger.warn('Moar!') | |
808 | logger.error('Pure hate!') | |
809 | ||
810 | self.assertEqual(test_handler.formatted_records, [ | |
811 | '[WARNING] Test: Aha!', | |
812 | '[WARNING] Test: Moar!', | |
813 | '[ERROR] Test: Pure hate!' | |
814 | ]) | |
2 | import pytest | |
815 | 3 | |
816 | 4 | |
817 | class HandlerTestCase_Regular(_HandlerTestCase): | |
818 | def setUp(self): | |
819 | super(HandlerTestCase_Regular, self).setUp() | |
820 | self.thread_activation_strategy = activate_via_push_pop | |
5 | def test_global_functions(activation_strategy): | |
6 | with activation_strategy(logbook.TestHandler()) as handler: | |
7 | logbook.debug('a debug message') | |
8 | logbook.info('an info message') | |
9 | logbook.warn('warning part 1') | |
10 | logbook.warning('warning part 2') | |
11 | logbook.notice('notice') | |
12 | logbook.error('an error') | |
13 | logbook.critical('pretty critical') | |
14 | logbook.log(logbook.CRITICAL, 'critical too') | |
821 | 15 | |
822 | class HandlerTestCase_Contextmgr(_HandlerTestCase): | |
823 | def setUp(self): | |
824 | super(HandlerTestCase_Contextmgr, self).setUp() | |
825 | self.thread_activation_strategy = activate_via_with_statement | |
826 | ||
827 | class AttributeTestCase(LogbookTestCase): | |
828 | ||
829 | def test_level_properties(self): | |
830 | self.assertEqual(self.log.level, logbook.NOTSET) | |
831 | self.assertEqual(self.log.level_name, 'NOTSET') | |
832 | self.log.level_name = 'WARNING' | |
833 | self.assertEqual(self.log.level, logbook.WARNING) | |
834 | self.log.level = logbook.ERROR | |
835 | self.assertEqual(self.log.level_name, 'ERROR') | |
836 | ||
837 | def test_reflected_properties(self): | |
838 | group = logbook.LoggerGroup() | |
839 | group.add_logger(self.log) | |
840 | self.assertEqual(self.log.group, group) | |
841 | group.level = logbook.ERROR | |
842 | self.assertEqual(self.log.level, logbook.ERROR) | |
843 | self.assertEqual(self.log.level_name, 'ERROR') | |
844 | group.level = logbook.WARNING | |
845 | self.assertEqual(self.log.level, logbook.WARNING) | |
846 | self.assertEqual(self.log.level_name, 'WARNING') | |
847 | self.log.level = logbook.CRITICAL | |
848 | group.level = logbook.DEBUG | |
849 | self.assertEqual(self.log.level, logbook.CRITICAL) | |
850 | self.assertEqual(self.log.level_name, 'CRITICAL') | |
851 | group.remove_logger(self.log) | |
852 | self.assertEqual(self.log.group, None) | |
853 | ||
854 | class LevelLookupTest(LogbookTestCase): | |
855 | def test_level_lookup_failures(self): | |
856 | with self.assertRaises(LookupError): | |
857 | logbook.get_level_name(37) | |
858 | with self.assertRaises(LookupError): | |
859 | logbook.lookup_level('FOO') | |
860 | ||
861 | class FlagsTestCase(LogbookTestCase): | |
862 | def test_error_flag(self): | |
863 | with capturing_stderr_context() as captured: | |
864 | with logbook.Flags(errors='print'): | |
865 | with logbook.Flags(errors='silent'): | |
866 | self.log.warn('Foo {42}', 'aha') | |
867 | self.assertEqual(captured.getvalue(), '') | |
868 | ||
869 | with logbook.Flags(errors='silent'): | |
870 | with logbook.Flags(errors='print'): | |
871 | self.log.warn('Foo {42}', 'aha') | |
872 | self.assertNotEqual(captured.getvalue(), '') | |
873 | ||
874 | with self.assertRaises(Exception) as caught: | |
875 | with logbook.Flags(errors='raise'): | |
876 | self.log.warn('Foo {42}', 'aha') | |
877 | self.assertIn('Could not format message with provided ' | |
878 | 'arguments', str(caught.exception)) | |
879 | ||
880 | def test_disable_introspection(self): | |
881 | with logbook.Flags(introspection=False): | |
882 | with logbook.TestHandler() as h: | |
883 | self.log.warn('Testing') | |
884 | self.assertIsNone(h.records[0].frame) | |
885 | self.assertIsNone(h.records[0].calling_frame) | |
886 | self.assertIsNone(h.records[0].module) | |
887 | ||
888 | class LoggerGroupTestCase(LogbookTestCase): | |
889 | def test_groups(self): | |
890 | def inject_extra(record): | |
891 | record.extra['foo'] = 'bar' | |
892 | group = logbook.LoggerGroup(processor=inject_extra) | |
893 | group.level = logbook.ERROR | |
894 | group.add_logger(self.log) | |
895 | with logbook.TestHandler() as handler: | |
896 | self.log.warn('A warning') | |
897 | self.log.error('An error') | |
898 | self.assertFalse(handler.has_warning('A warning')) | |
899 | self.assertTrue(handler.has_error('An error')) | |
900 | self.assertEqual(handler.records[0].extra['foo'], 'bar') | |
901 | ||
902 | class DefaultConfigurationTestCase(LogbookTestCase): | |
903 | ||
904 | def test_default_handlers(self): | |
905 | with capturing_stderr_context() as stream: | |
906 | self.log.warn('Aha!') | |
907 | captured = stream.getvalue() | |
908 | self.assertIn('WARNING: testlogger: Aha!', captured) | |
909 | ||
910 | class LoggingCompatTestCase(LogbookTestCase): | |
911 | ||
912 | def test_basic_compat_with_level_setting(self): | |
913 | self._test_basic_compat(True) | |
914 | def test_basic_compat_without_level_setting(self): | |
915 | self._test_basic_compat(False) | |
916 | ||
917 | def _test_basic_compat(self, set_root_logger_level): | |
918 | import logging | |
919 | from logbook.compat import redirected_logging | |
920 | ||
921 | # mimic the default logging setting | |
922 | self.addCleanup(logging.root.setLevel, logging.root.level) | |
923 | logging.root.setLevel(logging.WARNING) | |
924 | ||
925 | name = 'test_logbook-%d' % randrange(1 << 32) | |
926 | logger = logging.getLogger(name) | |
927 | ||
928 | with logbook.TestHandler(bubble=True) as handler: | |
929 | with capturing_stderr_context() as captured: | |
930 | with redirected_logging(set_root_logger_level): | |
931 | logger.debug('This is from the old system') | |
932 | logger.info('This is from the old system') | |
933 | logger.warn('This is from the old system') | |
934 | logger.error('This is from the old system') | |
935 | logger.critical('This is from the old system') | |
936 | self.assertIn(('WARNING: %s: This is from the old system' % name), | |
937 | captured.getvalue()) | |
938 | if set_root_logger_level: | |
939 | self.assertEquals(handler.records[0].level, logbook.DEBUG) | |
940 | else: | |
941 | self.assertEquals(handler.records[0].level, logbook.WARNING) | |
942 | ||
943 | def test_redirect_logbook(self): | |
944 | import logging | |
945 | from logbook.compat import LoggingHandler | |
946 | out = StringIO() | |
947 | logger = logging.getLogger() | |
948 | old_handlers = logger.handlers[:] | |
949 | handler = logging.StreamHandler(out) | |
950 | handler.setFormatter(logging.Formatter( | |
951 | '%(name)s:%(levelname)s:%(message)s')) | |
952 | logger.handlers[:] = [handler] | |
953 | try: | |
954 | with logbook.compat.LoggingHandler() as logging_handler: | |
955 | self.log.warn("This goes to logging") | |
956 | pieces = out.getvalue().strip().split(':') | |
957 | self.assertEqual(pieces, [ | |
958 | 'testlogger', | |
959 | 'WARNING', | |
960 | 'This goes to logging' | |
961 | ]) | |
962 | finally: | |
963 | logger.handlers[:] = old_handlers | |
964 | ||
965 | class WarningsCompatTestCase(LogbookTestCase): | |
966 | ||
967 | def test_warning_redirections(self): | |
968 | from logbook.compat import redirected_warnings | |
969 | with logbook.TestHandler() as handler: | |
970 | redirector = redirected_warnings() | |
971 | redirector.start() | |
972 | try: | |
973 | from warnings import warn | |
974 | warn(RuntimeWarning('Testing')) | |
975 | finally: | |
976 | redirector.end() | |
977 | ||
978 | self.assertEqual(len(handler.records), 1) | |
979 | self.assertEqual('[WARNING] RuntimeWarning: Testing', | |
980 | handler.formatted_records[0]) | |
981 | self.assertIn(__file_without_pyc__, handler.records[0].filename) | |
982 | ||
983 | class MoreTestCase(LogbookTestCase): | |
984 | ||
985 | @contextmanager | |
986 | def _get_temporary_file_context(self): | |
987 | fn = tempfile.mktemp() | |
988 | try: | |
989 | yield fn | |
990 | finally: | |
991 | try: | |
992 | os.remove(fn) | |
993 | except OSError: | |
994 | pass | |
995 | ||
996 | @require_module('jinja2') | |
997 | def test_jinja_formatter(self): | |
998 | from logbook.more import JinjaFormatter | |
999 | fmter = JinjaFormatter('{{ record.channel }}/{{ record.level_name }}') | |
1000 | handler = logbook.TestHandler() | |
1001 | handler.formatter = fmter | |
1002 | with handler: | |
1003 | self.log.info('info') | |
1004 | self.assertIn('testlogger/INFO', handler.formatted_records) | |
1005 | ||
1006 | @missing('jinja2') | |
1007 | def test_missing_jinja2(self): | |
1008 | from logbook.more import JinjaFormatter | |
1009 | # check the RuntimeError is raised | |
1010 | with self.assertRaises(RuntimeError): | |
1011 | JinjaFormatter('dummy') | |
1012 | ||
1013 | def test_colorizing_support(self): | |
1014 | from logbook.more import ColorizedStderrHandler | |
1015 | ||
1016 | class TestColorizingHandler(ColorizedStderrHandler): | |
1017 | def should_colorize(self, record): | |
1018 | return True | |
1019 | stream = StringIO() | |
1020 | with TestColorizingHandler(format_string='{record.message}') as handler: | |
1021 | self.log.error('An error') | |
1022 | self.log.warn('A warning') | |
1023 | self.log.debug('A debug message') | |
1024 | lines = handler.stream.getvalue().rstrip('\n').splitlines() | |
1025 |