Codebase list python-werkzeug / 7e27478
Import python-werkzeug_0.11.10+dfsg1.orig.tar.gz Ondřej Nový 7 years ago
9 changed file(s) with 78 addition(s) and 9 deletion(s). Raw diff Collapse all Expand all
00 Werkzeug Changelog
11 ==================
2
3 Version 0.11.10
4 ---------------
5
6 Released on May 24th 2016.
7
8 - Fixed a bug that occurs when running on Python 2.6 and using a broken locale.
9 See pull request #912.
10 - Fixed a crash when running the debugger on Google App Engine. See issue #925.
11 - Fixed an issue with multipart parsing that could cause memory exhaustion.
212
313 Version 0.11.9
414 --------------
3838 You probably want to set up a `virtualenv
3939 <http://virtualenv.readthedocs.org/en/latest/index.html>`_.
4040
41 Werkzeug must be installed for all tests to pass::
42
43 pip install -e .
44
4145 The minimal requirement for running the testsuite is ``py.test``. You can
4246 install it with::
4347
153153 class StreamMPP(formparser.MultiPartParser):
154154
155155 def parse(self, file, boundary, content_length):
156 i = iter(self.parse_lines(file, boundary, content_length))
156 i = iter(self.parse_lines(file, boundary, content_length,
157 cap_at_buffer=False))
157158 one = next(i)
158159 two = next(i)
159160 return self.cls(()), {'one': one, 'two': two}
380380 buffer_size=4))
381381 assert rv == [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz', b'ABCDEFGHIJK']
382382
383 data = b'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK'
384 test_stream = BytesIO(data)
385 rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data),
386 buffer_size=4, cap_at_buffer=True))
387 assert rv == [b'abcd', b'ef', b'ghij', b'kl', b'mnop', b'qrst', b'uvwx',
388 b'yz', b'ABCD', b'EFGH', b'IJK']
389
383390
384391 def test_lines_longer_buffer_size():
385392 data = '1234567890\n1234567890\n'
387394 lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data),
388395 buffer_size=4))
389396 assert lines == ['1234567890\n', '1234567890\n']
397
398
399 def test_lines_longer_buffer_size_cap():
400 data = '1234567890\n1234567890\n'
401 for bufsize in range(1, 15):
402 lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data),
403 buffer_size=4, cap_at_buffer=True))
404 assert lines == ['1234', '5678', '90\n', '1234', '5678', '90\n']
1919 from werkzeug._compat import iteritems
2020
2121 # the version. Usually set automatically by a script.
22 __version__ = '0.11.9'
22 __version__ = '0.11.10'
2323
2424
2525 # This import magic raises concerns quite often which is why the implementation
6464
6565 # On OS X we can use the computer's serial number assuming that
6666 # ioreg exists and can spit out that information.
67 from subprocess import Popen, PIPE
6867 try:
68 # Also catch import errors: subprocess may not be available, e.g.
69 # Google App Engine
70 # See https://github.com/pallets/werkzeug/issues/925
71 from subprocess import Popen, PIPE
6972 dump = Popen(['ioreg', '-c', 'IOPlatformExpertDevice', '-d', '2'],
7073 stdout=PIPE).communicate()[0]
7174 match = re.search(b'"serial-number" = <([^>]+)', dump)
7275 if match is not None:
7376 return match.group(1)
74 except OSError:
77 except (OSError, ImportError):
7578 pass
7679
7780 # On Windows we can use winreg to get the machine guid
5858 if not _warned_about_filesystem_encoding:
5959 warnings.warn(
6060 'Detected a misconfigured UNIX filesystem: Will use UTF-8 as '
61 'filesystem encoding instead of {!r}'.format(rv),
61 'filesystem encoding instead of {0!r}'.format(rv),
6262 BrokenFilesystemWarning)
6363 _warned_about_filesystem_encoding = True
6464 return 'utf-8'
371371 # the assert is skipped.
372372 self.fail('Boundary longer than buffer size')
373373
374 def parse_lines(self, file, boundary, content_length):
374 def parse_lines(self, file, boundary, content_length, cap_at_buffer=True):
375375 """Generate parts of
376376 ``('begin_form', (headers, name))``
377377 ``('begin_file', (headers, name, filename))``
386386 last_part = next_part + b'--'
387387
388388 iterator = chain(make_line_iter(file, limit=content_length,
389 buffer_size=self.buffer_size),
389 buffer_size=self.buffer_size,
390 cap_at_buffer=cap_at_buffer),
390391 _empty_string_iter)
391392
392393 terminator = self._find_terminator(iterator)
783783 yield item
784784
785785
786 def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
786 def make_line_iter(stream, limit=None, buffer_size=10 * 1024,
787 cap_at_buffer=False):
787788 """Safely iterates line-based over an input stream. If the input stream
788789 is not a :class:`LimitedStream` the `limit` parameter is mandatory.
789790
807808 content length. Not necessary if the `stream`
808809 is a :class:`LimitedStream`.
809810 :param buffer_size: The optional buffer size.
811 :param cap_at_buffer: if this is set chunks are split if they are longer
812 than the buffer size. Internally this is implemented
813 that the buffer size might be exhausted by a factor
814 of two however.
815 .. versionadded:: 0.11.10
816 added support for the `cap_at_buffer` parameter.
810817 """
811818 _iter = _make_chunk_iter(stream, limit, buffer_size)
812819
830837 if not new_data:
831838 break
832839 new_buf = []
840 buf_size = 0
833841 for item in chain(buffer, new_data.splitlines(True)):
834842 new_buf.append(item)
843 buf_size += len(item)
835844 if item and item[-1:] in crlf:
836845 yield _join(new_buf)
837846 new_buf = []
847 elif cap_at_buffer and buf_size >= buffer_size:
848 rv = _join(new_buf)
849 while len(rv) >= buffer_size:
850 yield rv[:buffer_size]
851 rv = rv[buffer_size:]
852 new_buf = [rv]
838853 buffer = new_buf
839854 if buffer:
840855 yield _join(buffer)
853868 yield previous
854869
855870
856 def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
871 def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024,
872 cap_at_buffer=False):
857873 """Works like :func:`make_line_iter` but accepts a separator
858874 which divides chunks. If you want newline based processing
859875 you should use :func:`make_line_iter` instead as it
863879
864880 .. versionadded:: 0.9
865881 added support for iterators as input stream.
882
883 .. versionadded:: 0.11.10
884 added support for the `cap_at_buffer` parameter.
866885
867886 :param stream: the stream or iterate to iterate over.
868887 :param separator: the separator that divides chunks.
870889 content length. Not necessary if the `stream`
871890 is otherwise already limited).
872891 :param buffer_size: The optional buffer size.
892 :param cap_at_buffer: if this is set chunks are split if they are longer
893 than the buffer size. Internally this is implemented
894 that the buffer size might be exhausted by a factor
895 of two however.
873896 """
874897 _iter = _make_chunk_iter(stream, limit, buffer_size)
875898
894917 break
895918 chunks = _split(new_data)
896919 new_buf = []
920 buf_size = 0
897921 for item in chain(buffer, chunks):
898922 if item == separator:
899923 yield _join(new_buf)
900924 new_buf = []
925 buf_size = 0
901926 else:
927 buf_size += len(item)
902928 new_buf.append(item)
929
930 if cap_at_buffer and buf_size >= buffer_size:
931 rv = _join(new_buf)
932 while len(rv) >= buffer_size:
933 yield rv[:buffer_size]
934 rv = rv[buffer_size:]
935 new_buf = [rv]
936 buf_size = len(rv)
937
903938 buffer = new_buf
904939 if buffer:
905940 yield _join(buffer)