Import python-werkzeug_0.11.10+dfsg1.orig.tar.gz
Ondřej Nový
7 years ago
0 | 0 | Werkzeug Changelog |
1 | 1 | ================== |
2 | ||
3 | Version 0.11.10 | |
4 | --------------- | |
5 | ||
6 | Released on May 24th 2016. | |
7 | ||
8 | - Fixed a bug that occurs when running on Python 2.6 and using a broken locale. | |
9 | See pull request #912. | |
10 | - Fixed a crash when running the debugger on Google App Engine. See issue #925. | |
11 | - Fixed an issue with multipart parsing that could cause memory exhaustion. | |
2 | 12 | |
3 | 13 | Version 0.11.9 |
4 | 14 | -------------- |
38 | 38 | You probably want to set up a `virtualenv |
39 | 39 | <http://virtualenv.readthedocs.org/en/latest/index.html>`_. |
40 | 40 | |
41 | Werkzeug must be installed for all tests to pass:: | |
42 | ||
43 | pip install -e . | |
44 | ||
41 | 45 | The minimal requirement for running the testsuite is ``py.test``. You can |
42 | 46 | install it with:: |
43 | 47 |
153 | 153 | class StreamMPP(formparser.MultiPartParser): |
154 | 154 | |
155 | 155 | def parse(self, file, boundary, content_length): |
156 | i = iter(self.parse_lines(file, boundary, content_length)) | |
156 | i = iter(self.parse_lines(file, boundary, content_length, | |
157 | cap_at_buffer=False)) | |
157 | 158 | one = next(i) |
158 | 159 | two = next(i) |
159 | 160 | return self.cls(()), {'one': one, 'two': two} |
380 | 380 | buffer_size=4)) |
381 | 381 | assert rv == [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz', b'ABCDEFGHIJK'] |
382 | 382 | |
383 | data = b'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK' | |
384 | test_stream = BytesIO(data) | |
385 | rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data), | |
386 | buffer_size=4, cap_at_buffer=True)) | |
387 | assert rv == [b'abcd', b'ef', b'ghij', b'kl', b'mnop', b'qrst', b'uvwx', | |
388 | b'yz', b'ABCD', b'EFGH', b'IJK'] | |
389 | ||
383 | 390 | |
384 | 391 | def test_lines_longer_buffer_size(): |
385 | 392 | data = '1234567890\n1234567890\n' |
387 | 394 | lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data), |
388 | 395 | buffer_size=4)) |
389 | 396 | assert lines == ['1234567890\n', '1234567890\n'] |
397 | ||
398 | ||
399 | def test_lines_longer_buffer_size_cap(): | |
400 | data = '1234567890\n1234567890\n' | |
401 | for bufsize in range(1, 15): | |
402 | lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data), | |
403 | buffer_size=4, cap_at_buffer=True)) | |
404 | assert lines == ['1234', '5678', '90\n', '1234', '5678', '90\n'] |
19 | 19 | from werkzeug._compat import iteritems |
20 | 20 | |
21 | 21 | # the version. Usually set automatically by a script. |
22 | __version__ = '0.11.9' | |
22 | __version__ = '0.11.10' | |
23 | 23 | |
24 | 24 | |
25 | 25 | # This import magic raises concerns quite often which is why the implementation |
64 | 64 | |
65 | 65 | # On OS X we can use the computer's serial number assuming that |
66 | 66 | # ioreg exists and can spit out that information. |
67 | from subprocess import Popen, PIPE | |
68 | 67 | try: |
68 | # Also catch import errors: subprocess may not be available, e.g. | |
69 | # Google App Engine | |
70 | # See https://github.com/pallets/werkzeug/issues/925 | |
71 | from subprocess import Popen, PIPE | |
69 | 72 | dump = Popen(['ioreg', '-c', 'IOPlatformExpertDevice', '-d', '2'], |
70 | 73 | stdout=PIPE).communicate()[0] |
71 | 74 | match = re.search(b'"serial-number" = <([^>]+)', dump) |
72 | 75 | if match is not None: |
73 | 76 | return match.group(1) |
74 | except OSError: | |
77 | except (OSError, ImportError): | |
75 | 78 | pass |
76 | 79 | |
77 | 80 | # On Windows we can use winreg to get the machine guid |
58 | 58 | if not _warned_about_filesystem_encoding: |
59 | 59 | warnings.warn( |
60 | 60 | 'Detected a misconfigured UNIX filesystem: Will use UTF-8 as ' |
61 | 'filesystem encoding instead of {!r}'.format(rv), | |
61 | 'filesystem encoding instead of {0!r}'.format(rv), | |
62 | 62 | BrokenFilesystemWarning) |
63 | 63 | _warned_about_filesystem_encoding = True |
64 | 64 | return 'utf-8' |
371 | 371 | # the assert is skipped. |
372 | 372 | self.fail('Boundary longer than buffer size') |
373 | 373 | |
374 | def parse_lines(self, file, boundary, content_length): | |
374 | def parse_lines(self, file, boundary, content_length, cap_at_buffer=True): | |
375 | 375 | """Generate parts of |
376 | 376 | ``('begin_form', (headers, name))`` |
377 | 377 | ``('begin_file', (headers, name, filename))`` |
386 | 386 | last_part = next_part + b'--' |
387 | 387 | |
388 | 388 | iterator = chain(make_line_iter(file, limit=content_length, |
389 | buffer_size=self.buffer_size), | |
389 | buffer_size=self.buffer_size, | |
390 | cap_at_buffer=cap_at_buffer), | |
390 | 391 | _empty_string_iter) |
391 | 392 | |
392 | 393 | terminator = self._find_terminator(iterator) |
783 | 783 | yield item |
784 | 784 | |
785 | 785 | |
786 | def make_line_iter(stream, limit=None, buffer_size=10 * 1024): | |
786 | def make_line_iter(stream, limit=None, buffer_size=10 * 1024, | |
787 | cap_at_buffer=False): | |
787 | 788 | """Safely iterates line-based over an input stream. If the input stream |
788 | 789 | is not a :class:`LimitedStream` the `limit` parameter is mandatory. |
789 | 790 | |
807 | 808 | content length. Not necessary if the `stream` |
808 | 809 | is a :class:`LimitedStream`. |
809 | 810 | :param buffer_size: The optional buffer size. |
811 | :param cap_at_buffer: if this is set chunks are split if they are longer | |
812 | than the buffer size. Internally this is implemented | |
813 | that the buffer size might be exhausted by a factor | |
814 | of two however. | |
815 | .. versionadded:: 0.11.10 | |
816 | added support for the `cap_at_buffer` parameter. | |
810 | 817 | """ |
811 | 818 | _iter = _make_chunk_iter(stream, limit, buffer_size) |
812 | 819 | |
830 | 837 | if not new_data: |
831 | 838 | break |
832 | 839 | new_buf = [] |
840 | buf_size = 0 | |
833 | 841 | for item in chain(buffer, new_data.splitlines(True)): |
834 | 842 | new_buf.append(item) |
843 | buf_size += len(item) | |
835 | 844 | if item and item[-1:] in crlf: |
836 | 845 | yield _join(new_buf) |
837 | 846 | new_buf = [] |
847 | elif cap_at_buffer and buf_size >= buffer_size: | |
848 | rv = _join(new_buf) | |
849 | while len(rv) >= buffer_size: | |
850 | yield rv[:buffer_size] | |
851 | rv = rv[buffer_size:] | |
852 | new_buf = [rv] | |
838 | 853 | buffer = new_buf |
839 | 854 | if buffer: |
840 | 855 | yield _join(buffer) |
853 | 868 | yield previous |
854 | 869 | |
855 | 870 | |
856 | def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024): | |
871 | def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024, | |
872 | cap_at_buffer=False): | |
857 | 873 | """Works like :func:`make_line_iter` but accepts a separator |
858 | 874 | which divides chunks. If you want newline based processing |
859 | 875 | you should use :func:`make_line_iter` instead as it |
863 | 879 | |
864 | 880 | .. versionadded:: 0.9 |
865 | 881 | added support for iterators as input stream. |
882 | ||
883 | .. versionadded:: 0.11.10 | |
884 | added support for the `cap_at_buffer` parameter. | |
866 | 885 | |
867 | 886 | :param stream: the stream or iterate to iterate over. |
868 | 887 | :param separator: the separator that divides chunks. |
870 | 889 | content length. Not necessary if the `stream` |
871 | 890 | is otherwise already limited). |
872 | 891 | :param buffer_size: The optional buffer size. |
892 | :param cap_at_buffer: if this is set chunks are split if they are longer | |
893 | than the buffer size. Internally this is implemented | |
894 | that the buffer size might be exhausted by a factor | |
895 | of two however. | |
873 | 896 | """ |
874 | 897 | _iter = _make_chunk_iter(stream, limit, buffer_size) |
875 | 898 | |
894 | 917 | break |
895 | 918 | chunks = _split(new_data) |
896 | 919 | new_buf = [] |
920 | buf_size = 0 | |
897 | 921 | for item in chain(buffer, chunks): |
898 | 922 | if item == separator: |
899 | 923 | yield _join(new_buf) |
900 | 924 | new_buf = [] |
925 | buf_size = 0 | |
901 | 926 | else: |
927 | buf_size += len(item) | |
902 | 928 | new_buf.append(item) |
929 | ||
930 | if cap_at_buffer and buf_size >= buffer_size: | |
931 | rv = _join(new_buf) | |
932 | while len(rv) >= buffer_size: | |
933 | yield rv[:buffer_size] | |
934 | rv = rv[buffer_size:] | |
935 | new_buf = [rv] | |
936 | buf_size = len(rv) | |
937 | ||
903 | 938 | buffer = new_buf |
904 | 939 | if buffer: |
905 | 940 | yield _join(buffer) |