New upstream version 0.1.21
Scott Kitterman
4 years ago
0 | No-notice MIT License | |
1 | ||
2 | Permission is hereby granted, free of charge, to any person obtaining a copy | |
3 | of this software and associated documentation files (the "Software"), to deal | |
4 | in the Software without restriction, including without limitation the rights | |
5 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
6 | copies of the Software, and to permit persons to whom the Software is | |
7 | furnished to do so. | |
8 | ||
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
10 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
11 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
12 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
13 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
14 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
15 | THE SOFTWARE. | |
0 | No-notice MIT License | |
1 | ||
2 | Permission is hereby granted, free of charge, to any person obtaining a copy | |
3 | of this software and associated documentation files (the "Software"), to deal | |
4 | in the Software without restriction, including without limitation the rights | |
5 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
6 | copies of the Software, and to permit persons to whom the Software is | |
7 | furnished to do so. | |
8 | ||
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
10 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
11 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
12 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
13 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
14 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
15 | THE SOFTWARE. |
0 | Metadata-Version: 2.1 | |
1 | Name: pytoml | |
2 | Version: 0.1.21 | |
3 | Summary: A parser for TOML-0.4.0 | |
4 | Home-page: https://github.com/avakar/pytoml | |
5 | Author: Martin Vejnár | |
6 | Author-email: vejnar.martin@gmail.com | |
7 | License: MIT | |
8 | Description: [![PyPI](https://img.shields.io/pypi/v/pytoml.svg)](https://pypi.python.org/pypi/pytoml) | |
9 | [![Build Status](https://travis-ci.org/avakar/pytoml.svg?branch=master)](https://travis-ci.org/avakar/pytoml) | |
10 | ||
11 | # Deprecated | |
12 | ||
13 | The pytoml project is no longer being actively maintained. Consider using the | |
14 | [toml](https://github.com/uiri/toml) package instead. | |
15 | ||
16 | # pytoml | |
17 | ||
18 | This project aims at being a specs-conforming and strict parser and writer for [TOML][1] files. | |
19 | The library currently supports [version 0.4.0][2] of the specs and runs with Python 2.7+ and 3.5+. | |
20 | ||
21 | Install: | |
22 | ||
23 | pip install pytoml | |
24 | ||
25 | The interface is the same as for the standard `json` package. | |
26 | ||
27 | >>> import pytoml as toml | |
28 | >>> toml.loads('a = 1') | |
29 | {'a': 1} | |
30 | >>> with open('file.toml', 'rb') as fin: | |
31 | ... obj = toml.load(fin) | |
32 | >>> obj | |
33 | {'a': 1} | |
34 | ||
35 | The `loads` function accepts either a bytes object | |
36 | (that gets decoded as UTF-8 with no BOM allowed), | |
37 | or a unicode object. | |
38 | ||
39 | Use `dump` or `dumps` to serialize a dict into TOML. | |
40 | ||
41 | >>> print toml.dumps(obj) | |
42 | a = 1 | |
43 | ||
44 | ## tests | |
45 | ||
46 | To run the tests update the `toml-test` submodule: | |
47 | ||
48 | git submodule update --init --recursive | |
49 | ||
50 | Then run the tests: | |
51 | ||
52 | python test/test.py | |
53 | ||
54 | [1]: https://github.com/toml-lang/toml | |
55 | [2]: https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md | |
56 | ||
57 | Platform: UNKNOWN | |
58 | Classifier: Programming Language :: Python :: 2 | |
59 | Classifier: Programming Language :: Python :: 2.7 | |
60 | Classifier: Programming Language :: Python :: 3 | |
61 | Classifier: Programming Language :: Python :: 3.5 | |
62 | Classifier: Programming Language :: Python :: 3.6 | |
63 | Classifier: Programming Language :: Python :: 3.7 | |
64 | Classifier: License :: OSI Approved :: MIT License | |
65 | Classifier: Topic :: Software Development :: Libraries | |
66 | Description-Content-Type: text/markdown |
0 | # pytoml | |
1 | ||
2 | This project aims at being a specs-conforming and strict parser and writer for [TOML][1] files. | |
3 | The library currently supports [version 0.4.0][2] of the specs and runs with Python 2.7 and 3.4+. | |
4 | ||
5 | Install: | |
6 | ||
7 | easy_install pytoml | |
8 | ||
9 | The interface is the same as for the standard `json` package. | |
10 | ||
11 | >>> import pytoml as toml | |
12 | >>> toml.loads('a = 1') | |
13 | {'a': 1} | |
14 | >>> with open('file.toml', 'rb') as fin: | |
15 | ... obj = toml.load(fin) | |
16 | >>> obj | |
17 | {'a': 1} | |
18 | ||
19 | The `loads` function accepts either a bytes object | |
20 | (that gets decoded as UTF-8 with no BOM allowed), | |
21 | or a unicode object. | |
22 | ||
23 | Use `dump` or `dumps` to serialize a dict into TOML. | |
24 | ||
25 | >>> print toml.dumps(obj) | |
26 | a = 1 | |
27 | ||
28 | [1]: https://github.com/toml-lang/toml | |
29 | [2]: https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md | |
0 | [![PyPI](https://img.shields.io/pypi/v/pytoml.svg)](https://pypi.python.org/pypi/pytoml) | |
1 | [![Build Status](https://travis-ci.org/avakar/pytoml.svg?branch=master)](https://travis-ci.org/avakar/pytoml) | |
2 | ||
3 | # Deprecated | |
4 | ||
5 | The pytoml project is no longer being actively maintained. Consider using the | |
6 | [toml](https://github.com/uiri/toml) package instead. | |
7 | ||
8 | # pytoml | |
9 | ||
10 | This project aims at being a specs-conforming and strict parser and writer for [TOML][1] files. | |
11 | The library currently supports [version 0.4.0][2] of the specs and runs with Python 2.7+ and 3.5+. | |
12 | ||
13 | Install: | |
14 | ||
15 | pip install pytoml | |
16 | ||
17 | The interface is the same as for the standard `json` package. | |
18 | ||
19 | >>> import pytoml as toml | |
20 | >>> toml.loads('a = 1') | |
21 | {'a': 1} | |
22 | >>> with open('file.toml', 'rb') as fin: | |
23 | ... obj = toml.load(fin) | |
24 | >>> obj | |
25 | {'a': 1} | |
26 | ||
27 | The `loads` function accepts either a bytes object | |
28 | (that gets decoded as UTF-8 with no BOM allowed), | |
29 | or a unicode object. | |
30 | ||
31 | Use `dump` or `dumps` to serialize a dict into TOML. | |
32 | ||
33 | >>> print toml.dumps(obj) | |
34 | a = 1 | |
35 | ||
36 | ## tests | |
37 | ||
38 | To run the tests update the `toml-test` submodule: | |
39 | ||
40 | git submodule update --init --recursive | |
41 | ||
42 | Then run the tests: | |
43 | ||
44 | python test/test.py | |
45 | ||
46 | [1]: https://github.com/toml-lang/toml | |
47 | [2]: https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md |
0 | from .core import TomlError | |
1 | from .parser import load, loads | |
2 | from .writer import dump, dumps | |
0 | from .core import TomlError | |
1 | from .parser import load, loads | |
2 | from .test import translate_to_test | |
3 | from .writer import dump, dumps⏎ |
0 | class TomlError(RuntimeError): | |
1 | def __init__(self, message, line, col, filename): | |
2 | RuntimeError.__init__(self, message, line, col, filename) | |
3 | self.message = message | |
4 | self.line = line | |
5 | self.col = col | |
6 | self.filename = filename | |
7 | ||
8 | def __str__(self): | |
9 | return '{}({}, {}): {}'.format(self.filename, self.line, self.col, self.message) | |
10 | ||
11 | def __repr__(self): | |
12 | return 'TomlError({!r}, {!r}, {!r}, {!r})'.format(self.message, self.line, self.col, self.filename) | |
0 | class TomlError(RuntimeError): | |
1 | def __init__(self, message, line, col, filename): | |
2 | RuntimeError.__init__(self, message, line, col, filename) | |
3 | self.message = message | |
4 | self.line = line | |
5 | self.col = col | |
6 | self.filename = filename | |
7 | ||
8 | def __str__(self): | |
9 | return '{}({}, {}): {}'.format(self.filename, self.line, self.col, self.message) | |
10 | ||
11 | def __repr__(self): | |
12 | return 'TomlError({!r}, {!r}, {!r}, {!r})'.format(self.message, self.line, self.col, self.filename) |
0 | import string, re, sys, datetime | |
1 | from .core import TomlError | |
2 | ||
3 | class _CharSource: | |
4 | def __init__(self, s, filename): | |
5 | self._s = s | |
6 | self._index = 0 | |
7 | self._mark = 0 | |
8 | self._line = 1 | |
9 | self._col = 1 | |
10 | self._filename = filename | |
11 | self._update_cur() | |
12 | ||
13 | def __bool__(self): | |
14 | return self.cur is not None | |
15 | ||
16 | def __len__(self): | |
17 | return len(self._s[self._index:]) | |
18 | ||
19 | def __getitem__(self, item): | |
20 | return self._s[self._index:][item] | |
21 | ||
22 | def next(self, l=1): | |
23 | for ch in self[:l]: | |
24 | if ch == '\n': | |
25 | self._line += 1 | |
26 | self._col = 1 | |
27 | else: | |
28 | self._col += 1 | |
29 | self._index += l | |
30 | self._update_cur() | |
31 | ||
32 | def mark(self): | |
33 | self._mark = self._index | |
34 | self._mark_pos = self._line, self._col | |
35 | ||
36 | def rollback(self): | |
37 | self._index = self._mark | |
38 | self._line, self._col = self._mark_pos | |
39 | self._update_cur() | |
40 | ||
41 | def commit(self, type=None, text=None): | |
42 | tok = self._s[self._mark:self._index] | |
43 | pos = (self._mark_pos, (self._line, self._col)) | |
44 | if type is None: | |
45 | type = tok | |
46 | if text is None: | |
47 | text = tok | |
48 | return type, text, pos | |
49 | ||
50 | def error(self, message): | |
51 | raise TomlError(message, self._line, self._col, self._filename) | |
52 | ||
53 | def _update_cur(self): | |
54 | self.tail = self._s[self._index:] | |
55 | if self._index < len(self._s): | |
56 | self.cur = self._s[self._index] | |
57 | else: | |
58 | self.cur = None | |
59 | ||
60 | if sys.version_info[0] == 2: | |
61 | _chr = unichr | |
62 | else: | |
63 | _chr = chr | |
64 | ||
65 | _datetime_re = re.compile(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(\.\d+)?(?:Z|([+-]\d{2}):(\d{2}))') | |
66 | ||
67 | def _lex(s, filename): | |
68 | src = _CharSource(s.replace('\r\n', '\n'), filename) | |
69 | def is_id(ch): | |
70 | return ch is not None and (ch.isalnum() or ch in '-_') | |
71 | ||
72 | def is_ws(ch): | |
73 | return ch is not None and ch in ' \t' | |
74 | ||
75 | def fetch_esc(): | |
76 | escapes = {'b':'\b', 't':'\t', 'n':'\n', 'f':'\f', 'r':'\r', '"':'"', '\\':'\\'} | |
77 | if src.cur == 'u': | |
78 | if len(src) < 5 or any(ch not in string.hexdigits for ch in src[1:5]): | |
79 | src.error('invalid_escape_sequence') | |
80 | res = _chr(int(src[1:5], 16)) | |
81 | src.next(5) | |
82 | elif src.cur == 'U': | |
83 | if len(src) < 9 or any(ch not in string.hexdigits for ch in src[1:9]): | |
84 | src.error('invalid_escape_sequence') | |
85 | res = _chr(int(src[1:9], 16)) | |
86 | src.next(9) | |
87 | elif src.cur == '\n': | |
88 | while src and src.cur in ' \n\t': | |
89 | src.next() | |
90 | res = '' | |
91 | elif src.cur in escapes: | |
92 | res = escapes[src.cur] | |
93 | src.next(1) | |
94 | else: | |
95 | src.error('invalid_escape_sequence') | |
96 | return res | |
97 | ||
98 | def consume_datetime(): | |
99 | m = _datetime_re.match(src.tail) | |
100 | if not m: | |
101 | return False | |
102 | src.next(len(m.group(0))) | |
103 | return True | |
104 | ||
105 | def consume_int(): | |
106 | if not src: | |
107 | src.error('malformed') | |
108 | if src.cur in '+-': | |
109 | src.next() | |
110 | if not src or src.cur not in '0123456789': | |
111 | src.error('malformed') | |
112 | while src and src.cur in '0123456789_': | |
113 | src.next() | |
114 | ||
115 | def consume_float(): | |
116 | consume_int() | |
117 | type = 'int' | |
118 | if src and src.cur == '.': | |
119 | type = 'float' | |
120 | src.next() | |
121 | if not src or src.cur not in '0123456789_': | |
122 | src.error('malformed_float') | |
123 | while src and src.cur in '0123456789_': | |
124 | src.next() | |
125 | if src and src.cur in 'eE': | |
126 | type = 'float' | |
127 | src.next() | |
128 | consume_int() | |
129 | return type | |
130 | ||
131 | while src: | |
132 | src.mark() | |
133 | if src.cur in ' \t': | |
134 | src.next() | |
135 | while src and src.cur in ' \t': | |
136 | src.next() | |
137 | elif src.cur == '#': | |
138 | src.next() | |
139 | while src and src.cur != '\n': | |
140 | src.next() | |
141 | elif src.cur in '0123456789': | |
142 | if consume_datetime(): | |
143 | yield src.commit('datetime') | |
144 | else: | |
145 | src.rollback() | |
146 | type = consume_float() | |
147 | yield src.commit(type) | |
148 | elif src.cur in '+-': | |
149 | type = consume_float() | |
150 | yield src.commit(type) | |
151 | elif is_id(src.cur): | |
152 | while is_id(src.cur): | |
153 | src.next() | |
154 | yield src.commit('id') | |
155 | elif src.cur in '[]{}=.,\n': | |
156 | src.next() | |
157 | yield src.commit() | |
158 | elif src.tail.startswith("'''"): | |
159 | src.next(3) | |
160 | if src.cur == '\n': | |
161 | src.next() | |
162 | end_quote = src.tail.find("'''") | |
163 | if end_quote == -1: | |
164 | src.error('unclosed_multiline_string') | |
165 | text = src[:end_quote] | |
166 | src.next(end_quote+3) | |
167 | yield src.commit('str', text) | |
168 | elif src.cur == "'": | |
169 | src.next() | |
170 | end_quote = src.tail.find("'") | |
171 | if end_quote == -1: | |
172 | src.error('unclosed_string') | |
173 | text = src[:end_quote] | |
174 | src.next(end_quote+1) | |
175 | yield src.commit('str', text) | |
176 | elif src.tail.startswith('"""'): | |
177 | src.next(3) | |
178 | if src.cur == '\n': | |
179 | src.next() | |
180 | res = [] | |
181 | while True: | |
182 | src.mark() | |
183 | end_quote = src.tail.find('"""') | |
184 | if end_quote == -1: | |
185 | src.error('unclosed_multiline_string') | |
186 | esc_pos = src.tail.find('\\') | |
187 | if esc_pos == -1 or esc_pos > end_quote: | |
188 | res.append(src[:end_quote]) | |
189 | src.next(end_quote+3) | |
190 | break | |
191 | res.append(src[:esc_pos]) | |
192 | src.next(esc_pos+1) | |
193 | res.append(fetch_esc()) | |
194 | ||
195 | yield src.commit('str', ''.join(res)) | |
196 | elif src.cur == '"': | |
197 | src.next() | |
198 | res = [] | |
199 | while True: | |
200 | src.mark() | |
201 | end_quote = src.tail.find('"') | |
202 | if end_quote == -1: | |
203 | src.error('unclosed_string') | |
204 | esc_pos = src.tail.find('\\') | |
205 | if esc_pos == -1 or esc_pos > end_quote: | |
206 | res.append(src[:end_quote]) | |
207 | src.next(end_quote+1) | |
208 | break | |
209 | res.append(src[:esc_pos]) | |
210 | src.next(esc_pos+1) | |
211 | res.append(fetch_esc()) | |
212 | ||
213 | yield src.commit('str', ''.join(res)) | |
214 | else: | |
215 | src.error('unexpected_char') | |
216 | ||
217 | src.mark() | |
218 | yield src.commit('\n', '') | |
219 | yield src.commit('eof', '') | |
220 | ||
221 | class _TokSource: | |
222 | def __init__(self, s, filename): | |
223 | self._filename = filename | |
224 | self._lex = iter(_lex(s, filename)) | |
225 | self.pos = None | |
226 | self.next() | |
227 | ||
228 | def next(self): | |
229 | self.prev_pos = self.pos | |
230 | self.tok, self.text, self.pos = next(self._lex) | |
231 | ||
232 | def consume(self, kind): | |
233 | if self.tok == kind: | |
234 | self.next() | |
235 | return True | |
236 | return False | |
237 | ||
238 | def consume_adjacent(self, kind): | |
239 | if self.prev_pos is None or self.prev_pos[1] != self.pos[0]: | |
240 | return False | |
241 | return self.consume(kind) | |
242 | ||
243 | def consume_nls(self): | |
244 | while self.consume('\n'): | |
245 | pass | |
246 | ||
247 | def expect(self, kind, error_text): | |
248 | if not self.consume(kind): | |
249 | self.error(error_text) | |
250 | ||
251 | def error(self, message): | |
252 | raise TomlError(message, self.pos[0][0], self.pos[0][1], self._filename) | |
253 | ||
254 | def _translate_datetime(s): | |
255 | match = _datetime_re.match(s) | |
256 | re.compile(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(\.\d+)?(?:Z|([+-]\d{2}):(\d{2}))') | |
257 | ||
258 | y = int(match.group(1)) | |
259 | m = int(match.group(2)) | |
260 | d = int(match.group(3)) | |
261 | H = int(match.group(4)) | |
262 | M = int(match.group(5)) | |
263 | S = int(match.group(6)) | |
264 | ||
265 | if match.group(7) is not None: | |
266 | micro = float(match.group(7)) | |
267 | else: | |
268 | micro = 0 | |
269 | ||
270 | if match.group(8) is not None: | |
271 | tzh = int(match.group(8)) | |
272 | tzm = int(match.group(9)) | |
273 | if tzh < 0: | |
274 | tzm = -tzm | |
275 | offs = tzh * 60 + tzm | |
276 | else: | |
277 | offs = 0 | |
278 | ||
279 | dt = datetime.datetime(y, m, d, H, M, S, int(micro * 1000000), | |
280 | _TimeZone(datetime.timedelta(0, offs*60))) | |
281 | ||
282 | return dt | |
283 | ||
284 | def _translate_literal(type, text): | |
285 | if type == 'bool': | |
286 | return text == 'true' | |
287 | elif type == 'int': | |
288 | return int(text.replace('_', '')) | |
289 | elif type == 'float': | |
290 | return float(text.replace('_', '')) | |
291 | elif type == 'str': | |
292 | return text | |
293 | elif type == 'datetime': | |
294 | return _translate_datetime(text) | |
295 | ||
296 | def _translate_array(a): | |
297 | return a | |
298 | ||
299 | def load(fin, translate_literal=_translate_literal, translate_array=_translate_array): | |
300 | return loads(fin.read(), | |
301 | translate_literal=translate_literal, translate_array=translate_array, | |
302 | filename=fin.name) | |
303 | ||
304 | def loads(s, translate_literal=_translate_literal, translate_array=_translate_array, filename='<string>'): | |
305 | if isinstance(s, bytes): | |
306 | s = s.decode('utf-8') | |
307 | ||
308 | toks = _TokSource(s, filename) | |
309 | ||
310 | def read_value(): | |
311 | while True: | |
312 | if toks.tok == 'id': | |
313 | if toks.text in ('true', 'false'): | |
314 | value = translate_literal('bool', toks.text) | |
315 | toks.next() | |
316 | return 'bool', value | |
317 | else: | |
318 | toks.error('unexpected_identifier') | |
319 | elif toks.tok in ('int', 'str', 'float', 'datetime'): | |
320 | type = toks.tok | |
321 | value = translate_literal(toks.tok, toks.text) | |
322 | toks.next() | |
323 | return type, value | |
324 | elif toks.consume('['): | |
325 | res = [] | |
326 | toks.consume_nls() | |
327 | if not toks.consume(']'): | |
328 | toks.consume_nls() | |
329 | type, val = read_value() | |
330 | res.append(val) | |
331 | toks.consume_nls() | |
332 | while toks.consume(','): | |
333 | toks.consume_nls() | |
334 | if toks.consume(']'): | |
335 | break | |
336 | cur_type, val = read_value() | |
337 | if type != cur_type: | |
338 | toks.error('heterogenous_array') | |
339 | res.append(val) | |
340 | toks.consume_nls() | |
341 | else: | |
342 | toks.expect(']', 'expected_right_brace') | |
343 | return 'array', translate_array(res) | |
344 | elif toks.consume('{'): | |
345 | res = {} | |
346 | while toks.tok in ('id', 'str'): | |
347 | k = toks.text | |
348 | toks.next() | |
349 | if k in res: | |
350 | toks.error('duplicate_key') | |
351 | toks.expect('=', 'expected_equals') | |
352 | type, v = read_value() | |
353 | res[k] = v | |
354 | if not toks.consume(','): | |
355 | break | |
356 | toks.expect('}', 'expected_closing_brace') | |
357 | return 'table', res | |
358 | else: | |
359 | toks.error('unexpected_token') | |
360 | ||
361 | root = {} | |
362 | tables = {} | |
363 | scope = root | |
364 | ||
365 | while toks.tok != 'eof': | |
366 | if toks.tok in ('id', 'str'): | |
367 | k = toks.text | |
368 | toks.next() | |
369 | toks.expect('=', 'expected_equals') | |
370 | type, v = read_value() | |
371 | if k in scope: | |
372 | toks.error('duplicate_keys') | |
373 | scope[k] = v | |
374 | toks.expect('\n', 'expected_eol') | |
375 | elif toks.consume('\n'): | |
376 | pass | |
377 | elif toks.consume('['): | |
378 | is_table_array = toks.consume_adjacent('[') | |
379 | ||
380 | path = [] | |
381 | if toks.tok not in ('id', 'str'): | |
382 | toks.error('expected_table_name') | |
383 | path.append(toks.text) | |
384 | toks.next() | |
385 | while toks.consume('.'): | |
386 | if toks.tok not in ('id', 'str'): | |
387 | toks.error('expected_table_name') | |
388 | path.append(toks.text) | |
389 | toks.next() | |
390 | if not toks.consume(']') or (is_table_array and not toks.consume_adjacent(']')): | |
391 | toks.error('malformed_table_name') | |
392 | toks.expect('\n', 'expected_eol') | |
393 | ||
394 | cur = tables | |
395 | for name in path[:-1]: | |
396 | if isinstance(cur.get(name), list): | |
397 | d, cur = cur[name][-1] | |
398 | else: | |
399 | d, cur = cur.setdefault(name, (None, {})) | |
400 | ||
401 | scope = {} | |
402 | name = path[-1] | |
403 | if name not in cur: | |
404 | if is_table_array: | |
405 | cur[name] = [(scope, {})] | |
406 | else: | |
407 | cur[name] = (scope, {}) | |
408 | elif isinstance(cur[name], list): | |
409 | if not is_table_array: | |
410 | toks.error('table_type_mismatch') | |
411 | cur[name].append((scope, {})) | |
412 | else: | |
413 | if is_table_array: | |
414 | toks.error('table_type_mismatch') | |
415 | old_scope, next_table = cur[name] | |
416 | if old_scope is not None: | |
417 | toks.error('duplicate_tables') | |
418 | cur[name] = (scope, next_table) | |
419 | else: | |
420 | toks.error('unexpected') | |
421 | ||
422 | def merge_tables(scope, tables): | |
423 | if scope is None: | |
424 | scope = {} | |
425 | for k in tables: | |
426 | if k in scope: | |
427 | toks.error('key_table_conflict') | |
428 | v = tables[k] | |
429 | if isinstance(v, list): | |
430 | scope[k] = [merge_tables(sc, tbl) for sc, tbl in v] | |
431 | else: | |
432 | scope[k] = merge_tables(v[0], v[1]) | |
433 | return scope | |
434 | ||
435 | return merge_tables(root, tables) | |
436 | ||
437 | class _TimeZone(datetime.tzinfo): | |
438 | def __init__(self, offset): | |
439 | self._offset = offset | |
440 | ||
441 | def utcoffset(self, dt): | |
442 | return self._offset | |
443 | ||
444 | def dst(self, dt): | |
445 | return None | |
446 | ||
447 | def tzname(self, dt): | |
448 | m = self._offset.total_seconds() // 60 | |
449 | if m < 0: | |
450 | res = '-' | |
451 | m = -m | |
452 | else: | |
453 | res = '+' | |
454 | h = m // 60 | |
455 | m = m - h * 60 | |
456 | return '{}{:.02}{:.02}'.format(res, h, m) | |
0 | import re, sys | |
1 | from .core import TomlError | |
2 | from .utils import rfc3339_re, parse_rfc3339_re | |
3 | ||
4 | if sys.version_info[0] == 2: | |
5 | _chr = unichr | |
6 | else: | |
7 | _chr = chr | |
8 | ||
9 | def load(fin, translate=lambda t, x, v: v, object_pairs_hook=dict): | |
10 | return loads(fin.read(), translate=translate, object_pairs_hook=object_pairs_hook, filename=getattr(fin, 'name', repr(fin))) | |
11 | ||
12 | def loads(s, filename='<string>', translate=lambda t, x, v: v, object_pairs_hook=dict): | |
13 | if isinstance(s, bytes): | |
14 | s = s.decode('utf-8') | |
15 | ||
16 | s = s.replace('\r\n', '\n') | |
17 | ||
18 | root = object_pairs_hook() | |
19 | tables = object_pairs_hook() | |
20 | scope = root | |
21 | ||
22 | src = _Source(s, filename=filename) | |
23 | ast = _p_toml(src, object_pairs_hook=object_pairs_hook) | |
24 | ||
25 | def error(msg): | |
26 | raise TomlError(msg, pos[0], pos[1], filename) | |
27 | ||
28 | def process_value(v, object_pairs_hook): | |
29 | kind, text, value, pos = v | |
30 | if kind == 'array': | |
31 | if value and any(k != value[0][0] for k, t, v, p in value[1:]): | |
32 | error('array-type-mismatch') | |
33 | value = [process_value(item, object_pairs_hook=object_pairs_hook) for item in value] | |
34 | elif kind == 'table': | |
35 | value = object_pairs_hook([(k, process_value(value[k], object_pairs_hook=object_pairs_hook)) for k in value]) | |
36 | return translate(kind, text, value) | |
37 | ||
38 | for kind, value, pos in ast: | |
39 | if kind == 'kv': | |
40 | k, v = value | |
41 | if k in scope: | |
42 | error('duplicate_keys. Key "{0}" was used more than once.'.format(k)) | |
43 | scope[k] = process_value(v, object_pairs_hook=object_pairs_hook) | |
44 | else: | |
45 | is_table_array = (kind == 'table_array') | |
46 | cur = tables | |
47 | for name in value[:-1]: | |
48 | if isinstance(cur.get(name), list): | |
49 | d, cur = cur[name][-1] | |
50 | else: | |
51 | d, cur = cur.setdefault(name, (None, object_pairs_hook())) | |
52 | ||
53 | scope = object_pairs_hook() | |
54 | name = value[-1] | |
55 | if name not in cur: | |
56 | if is_table_array: | |
57 | cur[name] = [(scope, object_pairs_hook())] | |
58 | else: | |
59 | cur[name] = (scope, object_pairs_hook()) | |
60 | elif isinstance(cur[name], list): | |
61 | if not is_table_array: | |
62 | error('table_type_mismatch') | |
63 | cur[name].append((scope, object_pairs_hook())) | |
64 | else: | |
65 | if is_table_array: | |
66 | error('table_type_mismatch') | |
67 | old_scope, next_table = cur[name] | |
68 | if old_scope is not None: | |
69 | error('duplicate_tables') | |
70 | cur[name] = (scope, next_table) | |
71 | ||
72 | def merge_tables(scope, tables): | |
73 | if scope is None: | |
74 | scope = object_pairs_hook() | |
75 | for k in tables: | |
76 | if k in scope: | |
77 | error('key_table_conflict') | |
78 | v = tables[k] | |
79 | if isinstance(v, list): | |
80 | scope[k] = [merge_tables(sc, tbl) for sc, tbl in v] | |
81 | else: | |
82 | scope[k] = merge_tables(v[0], v[1]) | |
83 | return scope | |
84 | ||
85 | return merge_tables(root, tables) | |
86 | ||
87 | class _Source: | |
88 | def __init__(self, s, filename=None): | |
89 | self.s = s | |
90 | self._pos = (1, 1) | |
91 | self._last = None | |
92 | self._filename = filename | |
93 | self.backtrack_stack = [] | |
94 | ||
95 | def last(self): | |
96 | return self._last | |
97 | ||
98 | def pos(self): | |
99 | return self._pos | |
100 | ||
101 | def fail(self): | |
102 | return self._expect(None) | |
103 | ||
104 | def consume_dot(self): | |
105 | if self.s: | |
106 | self._last = self.s[0] | |
107 | self.s = self[1:] | |
108 | self._advance(self._last) | |
109 | return self._last | |
110 | return None | |
111 | ||
112 | def expect_dot(self): | |
113 | return self._expect(self.consume_dot()) | |
114 | ||
115 | def consume_eof(self): | |
116 | if not self.s: | |
117 | self._last = '' | |
118 | return True | |
119 | return False | |
120 | ||
121 | def expect_eof(self): | |
122 | return self._expect(self.consume_eof()) | |
123 | ||
124 | def consume(self, s): | |
125 | if self.s.startswith(s): | |
126 | self.s = self.s[len(s):] | |
127 | self._last = s | |
128 | self._advance(s) | |
129 | return True | |
130 | return False | |
131 | ||
132 | def expect(self, s): | |
133 | return self._expect(self.consume(s)) | |
134 | ||
135 | def consume_re(self, re): | |
136 | m = re.match(self.s) | |
137 | if m: | |
138 | self.s = self.s[len(m.group(0)):] | |
139 | self._last = m | |
140 | self._advance(m.group(0)) | |
141 | return m | |
142 | return None | |
143 | ||
144 | def expect_re(self, re): | |
145 | return self._expect(self.consume_re(re)) | |
146 | ||
147 | def __enter__(self): | |
148 | self.backtrack_stack.append((self.s, self._pos)) | |
149 | ||
150 | def __exit__(self, type, value, traceback): | |
151 | if type is None: | |
152 | self.backtrack_stack.pop() | |
153 | else: | |
154 | self.s, self._pos = self.backtrack_stack.pop() | |
155 | return type == TomlError | |
156 | ||
157 | def commit(self): | |
158 | self.backtrack_stack[-1] = (self.s, self._pos) | |
159 | ||
160 | def _expect(self, r): | |
161 | if not r: | |
162 | raise TomlError('msg', self._pos[0], self._pos[1], self._filename) | |
163 | return r | |
164 | ||
165 | def _advance(self, s): | |
166 | suffix_pos = s.rfind('\n') | |
167 | if suffix_pos == -1: | |
168 | self._pos = (self._pos[0], self._pos[1] + len(s)) | |
169 | else: | |
170 | self._pos = (self._pos[0] + s.count('\n'), len(s) - suffix_pos) | |
171 | ||
172 | _ews_re = re.compile(r'(?:[ \t]|#[^\n]*\n|#[^\n]*\Z|\n)*') | |
173 | def _p_ews(s): | |
174 | s.expect_re(_ews_re) | |
175 | ||
176 | _ws_re = re.compile(r'[ \t]*') | |
177 | def _p_ws(s): | |
178 | s.expect_re(_ws_re) | |
179 | ||
180 | _escapes = { 'b': '\b', 'n': '\n', 'r': '\r', 't': '\t', '"': '"', | |
181 | '\\': '\\', 'f': '\f' } | |
182 | ||
183 | _basicstr_re = re.compile(r'[^"\\\000-\037]*') | |
184 | _short_uni_re = re.compile(r'u([0-9a-fA-F]{4})') | |
185 | _long_uni_re = re.compile(r'U([0-9a-fA-F]{8})') | |
186 | _escapes_re = re.compile(r'[btnfr\"\\]') | |
187 | _newline_esc_re = re.compile('\n[ \t\n]*') | |
188 | def _p_basicstr_content(s, content=_basicstr_re): | |
189 | res = [] | |
190 | while True: | |
191 | res.append(s.expect_re(content).group(0)) | |
192 | if not s.consume('\\'): | |
193 | break | |
194 | if s.consume_re(_newline_esc_re): | |
195 | pass | |
196 | elif s.consume_re(_short_uni_re) or s.consume_re(_long_uni_re): | |
197 | v = int(s.last().group(1), 16) | |
198 | if 0xd800 <= v < 0xe000: | |
199 | s.fail() | |
200 | res.append(_chr(v)) | |
201 | else: | |
202 | s.expect_re(_escapes_re) | |
203 | res.append(_escapes[s.last().group(0)]) | |
204 | return ''.join(res) | |
205 | ||
206 | _key_re = re.compile(r'[0-9a-zA-Z-_]+') | |
207 | def _p_key(s): | |
208 | with s: | |
209 | s.expect('"') | |
210 | r = _p_basicstr_content(s, _basicstr_re) | |
211 | s.expect('"') | |
212 | return r | |
213 | if s.consume('\''): | |
214 | if s.consume('\'\''): | |
215 | s.consume('\n') | |
216 | r = s.expect_re(_litstr_ml_re).group(0) | |
217 | s.expect('\'\'\'') | |
218 | else: | |
219 | r = s.expect_re(_litstr_re).group(0) | |
220 | s.expect('\'') | |
221 | return r | |
222 | return s.expect_re(_key_re).group(0) | |
223 | ||
224 | _float_re = re.compile(r'[+-]?(?:0|[1-9](?:_?\d)*)(?:\.\d(?:_?\d)*)?(?:[eE][+-]?(?:\d(?:_?\d)*))?') | |
225 | ||
226 | _basicstr_ml_re = re.compile(r'(?:""?(?!")|[^"\\\000-\011\013-\037])*') | |
227 | _litstr_re = re.compile(r"[^'\000\010\012-\037]*") | |
228 | _litstr_ml_re = re.compile(r"(?:(?:|'|'')(?:[^'\000-\010\013-\037]))*") | |
229 | def _p_value(s, object_pairs_hook): | |
230 | pos = s.pos() | |
231 | ||
232 | if s.consume('true'): | |
233 | return 'bool', s.last(), True, pos | |
234 | if s.consume('false'): | |
235 | return 'bool', s.last(), False, pos | |
236 | ||
237 | if s.consume('"'): | |
238 | if s.consume('""'): | |
239 | s.consume('\n') | |
240 | r = _p_basicstr_content(s, _basicstr_ml_re) | |
241 | s.expect('"""') | |
242 | else: | |
243 | r = _p_basicstr_content(s, _basicstr_re) | |
244 | s.expect('"') | |
245 | return 'str', r, r, pos | |
246 | ||
247 | if s.consume('\''): | |
248 | if s.consume('\'\''): | |
249 | s.consume('\n') | |
250 | r = s.expect_re(_litstr_ml_re).group(0) | |
251 | s.expect('\'\'\'') | |
252 | else: | |
253 | r = s.expect_re(_litstr_re).group(0) | |
254 | s.expect('\'') | |
255 | return 'str', r, r, pos | |
256 | ||
257 | if s.consume_re(rfc3339_re): | |
258 | m = s.last() | |
259 | return 'datetime', m.group(0), parse_rfc3339_re(m), pos | |
260 | ||
261 | if s.consume_re(_float_re): | |
262 | m = s.last().group(0) | |
263 | r = m.replace('_','') | |
264 | if '.' in m or 'e' in m or 'E' in m: | |
265 | return 'float', m, float(r), pos | |
266 | else: | |
267 | return 'int', m, int(r, 10), pos | |
268 | ||
269 | if s.consume('['): | |
270 | items = [] | |
271 | with s: | |
272 | while True: | |
273 | _p_ews(s) | |
274 | items.append(_p_value(s, object_pairs_hook=object_pairs_hook)) | |
275 | s.commit() | |
276 | _p_ews(s) | |
277 | s.expect(',') | |
278 | s.commit() | |
279 | _p_ews(s) | |
280 | s.expect(']') | |
281 | return 'array', None, items, pos | |
282 | ||
283 | if s.consume('{'): | |
284 | _p_ws(s) | |
285 | items = object_pairs_hook() | |
286 | if not s.consume('}'): | |
287 | k = _p_key(s) | |
288 | _p_ws(s) | |
289 | s.expect('=') | |
290 | _p_ws(s) | |
291 | items[k] = _p_value(s, object_pairs_hook=object_pairs_hook) | |
292 | _p_ws(s) | |
293 | while s.consume(','): | |
294 | _p_ws(s) | |
295 | k = _p_key(s) | |
296 | _p_ws(s) | |
297 | s.expect('=') | |
298 | _p_ws(s) | |
299 | items[k] = _p_value(s, object_pairs_hook=object_pairs_hook) | |
300 | _p_ws(s) | |
301 | s.expect('}') | |
302 | return 'table', None, items, pos | |
303 | ||
304 | s.fail() | |
305 | ||
306 | def _p_stmt(s, object_pairs_hook): | |
307 | pos = s.pos() | |
308 | if s.consume( '['): | |
309 | is_array = s.consume('[') | |
310 | _p_ws(s) | |
311 | keys = [_p_key(s)] | |
312 | _p_ws(s) | |
313 | while s.consume('.'): | |
314 | _p_ws(s) | |
315 | keys.append(_p_key(s)) | |
316 | _p_ws(s) | |
317 | s.expect(']') | |
318 | if is_array: | |
319 | s.expect(']') | |
320 | return 'table_array' if is_array else 'table', keys, pos | |
321 | ||
322 | key = _p_key(s) | |
323 | _p_ws(s) | |
324 | s.expect('=') | |
325 | _p_ws(s) | |
326 | value = _p_value(s, object_pairs_hook=object_pairs_hook) | |
327 | return 'kv', (key, value), pos | |
328 | ||
329 | _stmtsep_re = re.compile(r'(?:[ \t]*(?:#[^\n]*)?\n)+[ \t]*') | |
330 | def _p_toml(s, object_pairs_hook): | |
331 | stmts = [] | |
332 | _p_ews(s) | |
333 | with s: | |
334 | stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook)) | |
335 | while True: | |
336 | s.commit() | |
337 | s.expect_re(_stmtsep_re) | |
338 | stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook)) | |
339 | _p_ews(s) | |
340 | s.expect_eof() | |
341 | return stmts |
0 | import datetime | |
1 | from .utils import format_rfc3339 | |
2 | ||
3 | try: | |
4 | _string_types = (str, unicode) | |
5 | _int_types = (int, long) | |
6 | except NameError: | |
7 | _string_types = str | |
8 | _int_types = int | |
9 | ||
10 | def translate_to_test(v): | |
11 | if isinstance(v, dict): | |
12 | return { k: translate_to_test(v) for k, v in v.items() } | |
13 | if isinstance(v, list): | |
14 | a = [translate_to_test(x) for x in v] | |
15 | if v and isinstance(v[0], dict): | |
16 | return a | |
17 | else: | |
18 | return {'type': 'array', 'value': a} | |
19 | if isinstance(v, datetime.datetime): | |
20 | return {'type': 'datetime', 'value': format_rfc3339(v)} | |
21 | if isinstance(v, bool): | |
22 | return {'type': 'bool', 'value': 'true' if v else 'false'} | |
23 | if isinstance(v, _int_types): | |
24 | return {'type': 'integer', 'value': str(v)} | |
25 | if isinstance(v, float): | |
26 | return {'type': 'float', 'value': '{:.17}'.format(v)} | |
27 | if isinstance(v, _string_types): | |
28 | return {'type': 'string', 'value': v} | |
29 | raise RuntimeError('unexpected value: {!r}'.format(v)) |
0 | import datetime | |
1 | import re | |
2 | ||
3 | rfc3339_re = re.compile(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(\.\d+)?(?:Z|([+-]\d{2}):(\d{2}))') | |
4 | ||
5 | def parse_rfc3339(v): | |
6 | m = rfc3339_re.match(v) | |
7 | if not m or m.group(0) != v: | |
8 | return None | |
9 | return parse_rfc3339_re(m) | |
10 | ||
11 | def parse_rfc3339_re(m): | |
12 | r = map(int, m.groups()[:6]) | |
13 | if m.group(7): | |
14 | micro = float(m.group(7)) | |
15 | else: | |
16 | micro = 0 | |
17 | ||
18 | if m.group(8): | |
19 | g = int(m.group(8), 10) * 60 + int(m.group(9), 10) | |
20 | tz = _TimeZone(datetime.timedelta(0, g * 60)) | |
21 | else: | |
22 | tz = _TimeZone(datetime.timedelta(0, 0)) | |
23 | ||
24 | y, m, d, H, M, S = r | |
25 | return datetime.datetime(y, m, d, H, M, S, int(micro * 1000000), tz) | |
26 | ||
27 | ||
28 | def format_rfc3339(v): | |
29 | offs = v.utcoffset() | |
30 | offs = int(offs.total_seconds()) // 60 if offs is not None else 0 | |
31 | ||
32 | if offs == 0: | |
33 | suffix = 'Z' | |
34 | else: | |
35 | if offs > 0: | |
36 | suffix = '+' | |
37 | else: | |
38 | suffix = '-' | |
39 | offs = -offs | |
40 | suffix = '{0}{1:02}:{2:02}'.format(suffix, offs // 60, offs % 60) | |
41 | ||
42 | if v.microsecond: | |
43 | return v.strftime('%Y-%m-%dT%H:%M:%S.%f') + suffix | |
44 | else: | |
45 | return v.strftime('%Y-%m-%dT%H:%M:%S') + suffix | |
46 | ||
47 | class _TimeZone(datetime.tzinfo): | |
48 | def __init__(self, offset): | |
49 | self._offset = offset | |
50 | ||
51 | def utcoffset(self, dt): | |
52 | return self._offset | |
53 | ||
54 | def dst(self, dt): | |
55 | return None | |
56 | ||
57 | def tzname(self, dt): | |
58 | m = self._offset.total_seconds() // 60 | |
59 | if m < 0: | |
60 | res = '-' | |
61 | m = -m | |
62 | else: | |
63 | res = '+' | |
64 | h = m // 60 | |
65 | m = m - h * 60 | |
66 | return '{}{:.02}{:.02}'.format(res, h, m) |
0 | from __future__ import unicode_literals | |
1 | import io, datetime, sys | |
2 | ||
3 | if sys.version_info[0] == 3: | |
4 | long = int | |
5 | unicode = str | |
6 | ||
7 | def dumps(obj): | |
8 | fout = io.StringIO() | |
9 | dump(fout, obj) | |
10 | return fout.getvalue() | |
11 | ||
12 | _escapes = { '\n': 'n', '\r': 'r', '\\': '\\', '\t': 't', '\b': 'b', '\f': 'f', '"': '"' } | |
13 | ||
14 | def _escape_string(s): | |
15 | res = [] | |
16 | start = 0 | |
17 | def flush(): | |
18 | if start != i: | |
19 | res.append(s[start:i]) | |
20 | return i + 1 | |
21 | ||
22 | i = 0 | |
23 | while i < len(s): | |
24 | c = s[i] | |
25 | if c in '"\\\n\r\t\b\f': | |
26 | start = flush() | |
27 | res.append('\\' + _escapes[c]) | |
28 | elif ord(c) < 0x20: | |
29 | start = flush() | |
30 | res.append('\\u%04x' % ord(c)) | |
31 | i += 1 | |
32 | ||
33 | flush() | |
34 | return '"' + ''.join(res) + '"' | |
35 | ||
36 | def _escape_id(s): | |
37 | if any(not c.isalnum() and c not in '-_' for c in s): | |
38 | return _escape_string(s) | |
39 | return s | |
40 | ||
41 | def _format_list(v): | |
42 | return '[{}]'.format(', '.join(_format_value(obj) for obj in v)) | |
43 | ||
44 | def _format_value(v): | |
45 | if isinstance(v, bool): | |
46 | return 'true' if v else 'false' | |
47 | if isinstance(v, int) or isinstance(v, long): | |
48 | return unicode(v) | |
49 | if isinstance(v, float): | |
50 | return '{:.17f}'.format(v) | |
51 | elif isinstance(v, unicode) or isinstance(v, bytes): | |
52 | return _escape_string(v) | |
53 | elif isinstance(v, datetime.datetime): | |
54 | offs = v.utcoffset() | |
55 | offs = offs.total_seconds() // 60 if offs is not None else 0 | |
56 | ||
57 | if offs == 0: | |
58 | suffix = 'Z' | |
59 | else: | |
60 | if offs > 0: | |
61 | suffix = '+' | |
62 | else: | |
63 | suffix = '-' | |
64 | offs = -offs | |
65 | suffix = '{}{:.02}{:.02}'.format(suffix, offs // 60, offs % 60) | |
66 | ||
67 | if v.microsecond: | |
68 | return v.strftime('%Y-%m-%dT%H:%M:%S.%f') + suffix | |
69 | else: | |
70 | return v.strftime('%Y-%m-%dT%H:%M:%S') + suffix | |
71 | elif isinstance(v, list): | |
72 | return _format_list(v) | |
73 | else: | |
74 | raise RuntimeError('XXX') | |
75 | ||
76 | def dump(fout, obj): | |
77 | tables = [((), obj, False)] | |
78 | ||
79 | while tables: | |
80 | name, table, is_array = tables.pop() | |
81 | if name: | |
82 | section_name = '.'.join(_escape_id(c) for c in name) | |
83 | if is_array: | |
84 | fout.write('[[{}]]\n'.format(section_name)) | |
85 | else: | |
86 | fout.write('[{}]\n'.format(section_name)) | |
87 | ||
88 | for k in table: | |
89 | v = table[k] | |
90 | if isinstance(v, dict): | |
91 | tables.append((name + (k,), v, False)) | |
92 | elif isinstance(v, list) and v and all(isinstance(o, dict) for o in v): | |
93 | tables.extend((name + (k,), d, True) for d in reversed(v)) | |
94 | else: | |
95 | fout.write('{} = {}\n'.format(_escape_id(k), _format_value(v))) | |
96 | ||
97 | if tables: | |
98 | fout.write('\n') | |
0 | from __future__ import unicode_literals | |
1 | import io, datetime, math, string, sys | |
2 | ||
3 | from .utils import format_rfc3339 | |
4 | ||
5 | try: | |
6 | from pathlib import PurePath as _path_types | |
7 | except ImportError: | |
8 | _path_types = () | |
9 | ||
10 | ||
11 | if sys.version_info[0] == 3: | |
12 | long = int | |
13 | unicode = str | |
14 | ||
15 | ||
16 | def dumps(obj, sort_keys=False): | |
17 | fout = io.StringIO() | |
18 | dump(obj, fout, sort_keys=sort_keys) | |
19 | return fout.getvalue() | |
20 | ||
21 | ||
22 | _escapes = {'\n': 'n', '\r': 'r', '\\': '\\', '\t': 't', '\b': 'b', '\f': 'f', '"': '"'} | |
23 | ||
24 | ||
25 | def _escape_string(s): | |
26 | res = [] | |
27 | start = 0 | |
28 | ||
29 | def flush(): | |
30 | if start != i: | |
31 | res.append(s[start:i]) | |
32 | return i + 1 | |
33 | ||
34 | i = 0 | |
35 | while i < len(s): | |
36 | c = s[i] | |
37 | if c in '"\\\n\r\t\b\f': | |
38 | start = flush() | |
39 | res.append('\\' + _escapes[c]) | |
40 | elif ord(c) < 0x20: | |
41 | start = flush() | |
42 | res.append('\\u%04x' % ord(c)) | |
43 | i += 1 | |
44 | ||
45 | flush() | |
46 | return '"' + ''.join(res) + '"' | |
47 | ||
48 | ||
49 | _key_chars = string.digits + string.ascii_letters + '-_' | |
50 | def _escape_id(s): | |
51 | if any(c not in _key_chars for c in s): | |
52 | return _escape_string(s) | |
53 | return s | |
54 | ||
55 | ||
56 | def _format_value(v): | |
57 | if isinstance(v, bool): | |
58 | return 'true' if v else 'false' | |
59 | if isinstance(v, int) or isinstance(v, long): | |
60 | return unicode(v) | |
61 | if isinstance(v, float): | |
62 | if math.isnan(v) or math.isinf(v): | |
63 | raise ValueError("{0} is not a valid TOML value".format(v)) | |
64 | else: | |
65 | return repr(v) | |
66 | elif isinstance(v, unicode) or isinstance(v, bytes): | |
67 | return _escape_string(v) | |
68 | elif isinstance(v, datetime.datetime): | |
69 | return format_rfc3339(v) | |
70 | elif isinstance(v, list): | |
71 | return '[{0}]'.format(', '.join(_format_value(obj) for obj in v)) | |
72 | elif isinstance(v, dict): | |
73 | return '{{{0}}}'.format(', '.join('{} = {}'.format(_escape_id(k), _format_value(obj)) for k, obj in v.items())) | |
74 | elif isinstance(v, _path_types): | |
75 | return _escape_string(str(v)) | |
76 | else: | |
77 | raise RuntimeError(v) | |
78 | ||
79 | ||
80 | def dump(obj, fout, sort_keys=False): | |
81 | tables = [((), obj, False)] | |
82 | ||
83 | while tables: | |
84 | name, table, is_array = tables.pop() | |
85 | if name: | |
86 | section_name = '.'.join(_escape_id(c) for c in name) | |
87 | if is_array: | |
88 | fout.write('[[{0}]]\n'.format(section_name)) | |
89 | else: | |
90 | fout.write('[{0}]\n'.format(section_name)) | |
91 | ||
92 | table_keys = sorted(table.keys()) if sort_keys else table.keys() | |
93 | new_tables = [] | |
94 | has_kv = False | |
95 | for k in table_keys: | |
96 | v = table[k] | |
97 | if isinstance(v, dict): | |
98 | new_tables.append((name + (k,), v, False)) | |
99 | elif isinstance(v, list) and v and all(isinstance(o, dict) for o in v): | |
100 | new_tables.extend((name + (k,), d, True) for d in v) | |
101 | elif v is None: | |
102 | # based on mojombo's comment: https://github.com/toml-lang/toml/issues/146#issuecomment-25019344 | |
103 | fout.write( | |
104 | '#{} = null # To use: uncomment and replace null with value\n'.format(_escape_id(k))) | |
105 | has_kv = True | |
106 | else: | |
107 | fout.write('{0} = {1}\n'.format(_escape_id(k), _format_value(v))) | |
108 | has_kv = True | |
109 | ||
110 | tables.extend(reversed(new_tables)) | |
111 | ||
112 | if (name or has_kv) and tables: | |
113 | fout.write('\n') |
0 | Metadata-Version: 2.1 | |
1 | Name: pytoml | |
2 | Version: 0.1.21 | |
3 | Summary: A parser for TOML-0.4.0 | |
4 | Home-page: https://github.com/avakar/pytoml | |
5 | Author: Martin Vejnár | |
6 | Author-email: vejnar.martin@gmail.com | |
7 | License: MIT | |
8 | Description: [![PyPI](https://img.shields.io/pypi/v/pytoml.svg)](https://pypi.python.org/pypi/pytoml) | |
9 | [![Build Status](https://travis-ci.org/avakar/pytoml.svg?branch=master)](https://travis-ci.org/avakar/pytoml) | |
10 | ||
11 | # Deprecated | |
12 | ||
13 | The pytoml project is no longer being actively maintained. Consider using the | |
14 | [toml](https://github.com/uiri/toml) package instead. | |
15 | ||
16 | # pytoml | |
17 | ||
18 | This project aims at being a specs-conforming and strict parser and writer for [TOML][1] files. | |
19 | The library currently supports [version 0.4.0][2] of the specs and runs with Python 2.7+ and 3.5+. | |
20 | ||
21 | Install: | |
22 | ||
23 | pip install pytoml | |
24 | ||
25 | The interface is the same as for the standard `json` package. | |
26 | ||
27 | >>> import pytoml as toml | |
28 | >>> toml.loads('a = 1') | |
29 | {'a': 1} | |
30 | >>> with open('file.toml', 'rb') as fin: | |
31 | ... obj = toml.load(fin) | |
32 | >>> obj | |
33 | {'a': 1} | |
34 | ||
35 | The `loads` function accepts either a bytes object | |
36 | (that gets decoded as UTF-8 with no BOM allowed), | |
37 | or a unicode object. | |
38 | ||
39 | Use `dump` or `dumps` to serialize a dict into TOML. | |
40 | ||
41 | >>> print toml.dumps(obj) | |
42 | a = 1 | |
43 | ||
44 | ## tests | |
45 | ||
46 | To run the tests update the `toml-test` submodule: | |
47 | ||
48 | git submodule update --init --recursive | |
49 | ||
50 | Then run the tests: | |
51 | ||
52 | python test/test.py | |
53 | ||
54 | [1]: https://github.com/toml-lang/toml | |
55 | [2]: https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md | |
56 | ||
57 | Platform: UNKNOWN | |
58 | Classifier: Programming Language :: Python :: 2 | |
59 | Classifier: Programming Language :: Python :: 2.7 | |
60 | Classifier: Programming Language :: Python :: 3 | |
61 | Classifier: Programming Language :: Python :: 3.5 | |
62 | Classifier: Programming Language :: Python :: 3.6 | |
63 | Classifier: Programming Language :: Python :: 3.7 | |
64 | Classifier: License :: OSI Approved :: MIT License | |
65 | Classifier: Topic :: Software Development :: Libraries | |
66 | Description-Content-Type: text/markdown |
0 | LICENSE | |
1 | MANIFEST.in | |
2 | README.md | |
3 | setup.cfg | |
4 | setup.py | |
5 | pytoml/__init__.py | |
6 | pytoml/core.py | |
7 | pytoml/parser.py | |
8 | pytoml/test.py | |
9 | pytoml/utils.py | |
10 | pytoml/writer.py | |
11 | pytoml.egg-info/PKG-INFO | |
12 | pytoml.egg-info/SOURCES.txt | |
13 | pytoml.egg-info/dependency_links.txt | |
14 | pytoml.egg-info/top_level.txt | |
15 | test/test.py | |
16 | test/test_parser.py | |
17 | test/test_writer.py⏎ |
0 | pytoml |
0 | [bdist_wheel] | |
1 | universal = 1 | |
2 | ||
3 | [metadata] | |
4 | license_file = LICENSE | |
5 | ||
6 | [egg_info] | |
7 | tag_build = | |
8 | tag_date = 0 | |
9 |
0 | #!/usr/bin/env python | |
1 | # coding: utf-8 | |
2 | ||
3 | from setuptools import setup | |
4 | ||
5 | setup( | |
6 | name='pytoml', | |
7 | version='0.1.2', | |
8 | ||
9 | description='A parser for TOML-0.4.0', | |
10 | author='Martin Vejnár', | |
11 | author_email='avakar@ratatanek.cz', | |
12 | url='https://github.com/avakar/pytoml', | |
13 | license='MIT', | |
14 | ||
15 | packages=['pytoml'], | |
16 | ) | |
0 | #!/usr/bin/env python | |
1 | # coding: utf-8 | |
2 | ||
3 | from setuptools import setup | |
4 | ||
5 | with open('README.md', 'r') as fin: | |
6 | long_description = fin.read() | |
7 | ||
8 | setup( | |
9 | name='pytoml', | |
10 | version='0.1.21', | |
11 | ||
12 | description='A parser for TOML-0.4.0', | |
13 | long_description=long_description, | |
14 | long_description_content_type='text/markdown', | |
15 | ||
16 | author='Martin Vejnár', | |
17 | author_email='vejnar.martin@gmail.com', | |
18 | url='https://github.com/avakar/pytoml', | |
19 | license='MIT', | |
20 | packages=['pytoml'], | |
21 | classifiers=[ | |
22 | # Supported python versions | |
23 | 'Programming Language :: Python :: 2', | |
24 | 'Programming Language :: Python :: 2.7', | |
25 | 'Programming Language :: Python :: 3', | |
26 | 'Programming Language :: Python :: 3.5', | |
27 | 'Programming Language :: Python :: 3.6', | |
28 | 'Programming Language :: Python :: 3.7', | |
29 | ||
30 | # License | |
31 | 'License :: OSI Approved :: MIT License', | |
32 | ||
33 | # Topics | |
34 | 'Topic :: Software Development :: Libraries', | |
35 | ] | |
36 | ) |
0 | arrays-and-ints = [1, ["Arrays are not integers."]] |
0 | strings-and-ints = ["hi", 42] |
0 | with-milli = 1987-07-5T17:45:00.12Z |
0 | invalid-escape = "This string has a bad \a escape character." |
0 | # This test is a bit tricky. It should fail because the first use of | |
1 | # `[[albums.songs]]` without first declaring `albums` implies that `albums` | |
2 | # must be a table. The alternative would be quite weird. Namely, it wouldn't | |
3 | # comply with the TOML spec: "Each double-bracketed sub-table will belong to | |
4 | # the most *recently* defined table element *above* it." | |
5 | # | |
6 | # This is in contrast to the *valid* test, table-array-implicit where | |
7 | # `[[albums.songs]]` works by itself, so long as `[[albums]]` isn't declared | |
8 | # later. (Although, `[albums]` could be.) | |
9 | [[albums.songs]] | |
10 | name = "Glory Days" | |
11 | ||
12 | [[albums]] | |
13 | name = "Born in the USA" |
0 | import os, json, sys, io | |
1 | import pytoml as toml | |
2 | ||
3 | def _testbench_literal(type, text): | |
4 | _type_table = {'str': 'string', 'int': 'integer'} | |
5 | return {'type': _type_table.get(type, type), 'value': text} | |
6 | ||
7 | def _testbench_array(values): | |
8 | return {'type': 'array', 'value': values} | |
9 | ||
10 | def _main(): | |
11 | succeeded = [] | |
12 | failed = [] | |
13 | ||
14 | for top, dirnames, fnames in os.walk('.'): | |
15 | for fname in fnames: | |
16 | if not fname.endswith('.toml'): | |
17 | continue | |
18 | ||
19 | try: | |
20 | with open(os.path.join(top, fname), 'rb') as fin: | |
21 | parsed = toml.load(fin) | |
22 | except toml.TomlError: | |
23 | parsed = None | |
24 | else: | |
25 | dumped = toml.dumps(parsed) | |
26 | parsed2 = toml.loads(dumped) | |
27 | if parsed != parsed2: | |
28 | failed.append(fname) | |
29 | continue | |
30 | ||
31 | with open(os.path.join(top, fname), 'rb') as fin: | |
32 | parsed = toml.load(fin, _testbench_literal, _testbench_array) | |
33 | ||
34 | try: | |
35 | with io.open(os.path.join(top, fname[:-5] + '.json'), 'rt', encoding='utf-8') as fin: | |
36 | bench = json.load(fin) | |
37 | except IOError: | |
38 | bench = None | |
39 | ||
40 | if parsed != bench: | |
41 | failed.append(fname) | |
42 | else: | |
43 | succeeded.append(fname) | |
44 | ||
45 | for f in failed: | |
46 | print('failed: {}'.format(f)) | |
47 | print('succeeded: {}'.format(len(succeeded))) | |
48 | return 1 if failed else 0 | |
49 | ||
50 | if __name__ == '__main__': | |
51 | sys.exit(_main()) | |
52 | ||
0 | import os, json, sys, io, traceback, argparse | |
1 | import pytoml as toml | |
2 | from pytoml.utils import parse_rfc3339 | |
3 | ||
4 | def is_bench_equal(a, b): | |
5 | if isinstance(a, dict): | |
6 | if 'type' in a: | |
7 | if b.get('type') != a['type']: | |
8 | return False | |
9 | ||
10 | if a['type'] == 'float': | |
11 | return float(a['value']) == float(b['value']) | |
12 | if a['type'] == 'datetime': | |
13 | x = parse_rfc3339(a['value']) | |
14 | y = parse_rfc3339(b['value']) | |
15 | return x == y | |
16 | if a['type'] == 'array': | |
17 | return is_bench_equal(a['value'], b['value']) | |
18 | return a['value'] == b['value'] | |
19 | ||
20 | return (isinstance(b, dict) and len(a) == len(b) | |
21 | and all(k in b and is_bench_equal(a[k], b[k]) for k in a)) | |
22 | ||
23 | if isinstance(a, list): | |
24 | return (isinstance(b, list) and len(a) == len(b) | |
25 | and all(is_bench_equal(x, y) for x, y in zip(a, b))) | |
26 | ||
27 | raise RuntimeError('Invalid data in the bench JSON') | |
28 | ||
29 | def _main(): | |
30 | ap = argparse.ArgumentParser() | |
31 | ap.add_argument('-d', '--dir', action='append') | |
32 | ap.add_argument('testcase', nargs='*') | |
33 | args = ap.parse_args() | |
34 | ||
35 | if not args.dir: | |
36 | args.dir = [os.path.join(os.path.split(__file__)[0], 'toml-test/tests')] | |
37 | ||
38 | succeeded = [] | |
39 | failed = [] | |
40 | ||
41 | for path in args.dir: | |
42 | if not os.path.isdir(path): | |
43 | print('error: not a dir: {0}'.format(path)) | |
44 | return 2 | |
45 | for top, dirnames, fnames in os.walk(path): | |
46 | for fname in fnames: | |
47 | if not fname.endswith('.toml'): | |
48 | continue | |
49 | ||
50 | if args.testcase and not any(arg in fname for arg in args.testcase): | |
51 | continue | |
52 | ||
53 | parse_error = None | |
54 | try: | |
55 | with open(os.path.join(top, fname), 'rb') as fin: | |
56 | parsed = toml.load(fin) | |
57 | except toml.TomlError: | |
58 | parsed = None | |
59 | parse_error = sys.exc_info() | |
60 | else: | |
61 | dumped = toml.dumps(parsed, sort_keys=False) | |
62 | dumped_sorted = toml.dumps(parsed, sort_keys=True) | |
63 | parsed2 = toml.loads(dumped) | |
64 | parsed2_sorted = toml.loads(dumped_sorted) | |
65 | if parsed != parsed2 or parsed != parsed2_sorted: | |
66 | failed.append((fname, parsed, [parsed2, parsed2_sorted], None)) | |
67 | continue | |
68 | ||
69 | with open(os.path.join(top, fname), 'rb') as fin: | |
70 | parsed = toml.load(fin) | |
71 | parsed = toml.translate_to_test(parsed) | |
72 | ||
73 | try: | |
74 | with io.open(os.path.join(top, fname[:-5] + '.json'), 'rt', encoding='utf-8') as fin: | |
75 | bench = json.load(fin) | |
76 | except IOError: | |
77 | bench = None | |
78 | ||
79 | if (parsed is None) != (bench is None) or (parsed is not None and not is_bench_equal(parsed, bench)): | |
80 | failed.append((fname, parsed, bench, parse_error)) | |
81 | else: | |
82 | succeeded.append(fname) | |
83 | ||
84 | for f, parsed, bench, e in failed: | |
85 | try: | |
86 | print('failed: {}\n{}\n{}'.format(f, json.dumps(parsed, indent=4), json.dumps(bench, indent=4))) | |
87 | except TypeError: | |
88 | print('failed: {}\n{}\n{}'.format(f, parsed, bench)) | |
89 | ||
90 | if e: | |
91 | traceback.print_exception(*e) | |
92 | print('succeeded: {0}'.format(len(succeeded))) | |
93 | return 1 if failed or not succeeded else 0 | |
94 | ||
95 | if __name__ == '__main__': | |
96 | r = _main() | |
97 | if r: | |
98 | sys.exit(r) |
0 | from __future__ import unicode_literals | |
1 | ||
2 | import collections | |
3 | import sys | |
4 | if sys.version_info < (2, 7): | |
5 | from StringIO import StringIO | |
6 | else: | |
7 | from io import StringIO | |
8 | ||
9 | import pytest | |
10 | ||
11 | import pytoml as toml | |
12 | ||
13 | ||
14 | def test_name_of_fileobj_is_used_in_errors(): | |
15 | source = StringIO("[") | |
16 | source.name = "<source>" | |
17 | error = pytest.raises(toml.TomlError, lambda: toml.load(source)) | |
18 | assert error.value.filename == "<source>" | |
19 | ||
20 | ||
21 | def test_when_fileobj_has_no_name_attr_then_repr_of_fileobj_is_used_in_errors(): | |
22 | source = StringIO("[") | |
23 | error = pytest.raises(toml.TomlError, lambda: toml.load(source)) | |
24 | assert error.value.filename == repr(source) | |
25 | ||
26 | ||
27 | def test_object_pairs_hook(): | |
28 | source = StringIO(u"""\ | |
29 | [x.a] | |
30 | [x.b] | |
31 | [x.c] | |
32 | """) | |
33 | ||
34 | d = toml.load(source, object_pairs_hook=collections.defaultdict) | |
35 | assert isinstance(d, collections.defaultdict) | |
36 | assert isinstance(d['x'], collections.defaultdict) |
0 | from __future__ import unicode_literals | |
1 | ||
2 | import pytest | |
3 | ||
4 | import pytoml as toml | |
5 | ||
6 | ||
7 | @pytest.mark.parametrize("value", [ | |
8 | float("NaN"), | |
9 | float("Inf"), | |
10 | -float("Inf"), | |
11 | ]) | |
12 | def test_attempting_to_write_non_number_floats_raises_error(value): | |
13 | error = pytest.raises(ValueError, lambda: toml.dumps({"value": value})) | |
14 | assert str(error.value) == "{0} is not a valid TOML value".format(value) | |
15 | ||
16 | ||
17 | def test_pathlib_path_objects_are_written_as_strings(): | |
18 | pathlib = pytest.importorskip("pathlib") | |
19 | path_value = toml.dumps({"value": pathlib.Path("test-path")}) | |
20 | assert path_value == 'value = "test-path"\n' | |
21 | ||
22 | ||
23 | def test_pathlib_purepath_objects_are_written_as_strings(): | |
24 | pathlib = pytest.importorskip("pathlib") | |
25 | path_value = toml.dumps({"value": pathlib.PurePath("test-path")}) | |
26 | assert path_value == 'value = "test-path"\n' | |
27 | ||
28 | ||
29 | def test_pathlib_purepath_objects_contents_are_escaped(): | |
30 | pathlib = pytest.importorskip("pathlib") | |
31 | path_value = toml.dumps({"value": pathlib.PurePath('C:\\Escape\"this string"')}) | |
32 | assert path_value == 'value = "C:\\\\Escape\\"this string\\""\n' |
0 | { | |
1 | "thevoid": { "type": "array", "value": [ | |
2 | {"type": "array", "value": [ | |
3 | {"type": "array", "value": [ | |
4 | {"type": "array", "value": [ | |
5 | {"type": "array", "value": []} | |
6 | ]} | |
7 | ]} | |
8 | ]} | |
9 | ]} | |
10 | } |
0 | { | |
1 | "ints": { | |
2 | "type": "array", | |
3 | "value": [ | |
4 | {"type": "integer", "value": "1"}, | |
5 | {"type": "integer", "value": "2"}, | |
6 | {"type": "integer", "value": "3"} | |
7 | ] | |
8 | } | |
9 | } |
0 | { | |
1 | "mixed": { | |
2 | "type": "array", | |
3 | "value": [ | |
4 | {"type": "array", "value": [ | |
5 | {"type": "integer", "value": "1"}, | |
6 | {"type": "integer", "value": "2"} | |
7 | ]}, | |
8 | {"type": "array", "value": [ | |
9 | {"type": "string", "value": "a"}, | |
10 | {"type": "string", "value": "b"} | |
11 | ]}, | |
12 | {"type": "array", "value": [ | |
13 | {"type": "float", "value": "1.1"}, | |
14 | {"type": "float", "value": "2.1"} | |
15 | ]} | |
16 | ] | |
17 | } | |
18 | } |
0 | { | |
1 | "nest": { | |
2 | "type": "array", | |
3 | "value": [ | |
4 | {"type": "array", "value": [ | |
5 | {"type": "string", "value": "a"} | |
6 | ]}, | |
7 | {"type": "array", "value": [ | |
8 | {"type": "string", "value": "b"} | |
9 | ]} | |
10 | ] | |
11 | } | |
12 | } |
0 | { | |
1 | "ints": { | |
2 | "type": "array", | |
3 | "value": [ | |
4 | {"type": "integer", "value": "1"}, | |
5 | {"type": "integer", "value": "2"}, | |
6 | {"type": "integer", "value": "3"} | |
7 | ] | |
8 | }, | |
9 | "floats": { | |
10 | "type": "array", | |
11 | "value": [ | |
12 | {"type": "float", "value": "1.1"}, | |
13 | {"type": "float", "value": "2.1"}, | |
14 | {"type": "float", "value": "3.1"} | |
15 | ] | |
16 | }, | |
17 | "strings": { | |
18 | "type": "array", | |
19 | "value": [ | |
20 | {"type": "string", "value": "a"}, | |
21 | {"type": "string", "value": "b"}, | |
22 | {"type": "string", "value": "c"} | |
23 | ] | |
24 | }, | |
25 | "dates": { | |
26 | "type": "array", | |
27 | "value": [ | |
28 | {"type": "datetime", "value": "1987-07-05T17:45:00Z"}, | |
29 | {"type": "datetime", "value": "1979-05-27T07:32:00Z"}, | |
30 | {"type": "datetime", "value": "2006-06-01T11:00:00Z"} | |
31 | ] | |
32 | } | |
33 | } |
0 | ints = [1, 2, 3] | |
1 | floats = [1.1, 2.1, 3.1] | |
2 | strings = ["a", "b", "c"] | |
3 | dates = [ | |
4 | 1987-07-05T17:45:00Z, | |
5 | 1979-05-27T07:32:00Z, | |
6 | 2006-06-01T11:00:00Z, | |
7 | ] |
0 | { | |
1 | "group": { | |
2 | "answer": {"type": "integer", "value": "42"}, | |
3 | "more": { | |
4 | "type": "array", | |
5 | "value": [ | |
6 | {"type": "integer", "value": "42"}, | |
7 | {"type": "integer", "value": "42"} | |
8 | ] | |
9 | } | |
10 | } | |
11 | } |
0 | # Top comment. | |
1 | # Top comment. | |
2 | # Top comment. | |
3 | ||
4 | # [no-extraneous-groups-please] | |
5 | ||
6 | [group] # Comment | |
7 | answer = 42 # Comment | |
8 | # no-extraneous-keys-please = 999 | |
9 | # Inbetween comment. | |
10 | more = [ # Comment | |
11 | # What about multiple # comments? | |
12 | # Can you handle it? | |
13 | # | |
14 | # Evil. | |
15 | # Evil. | |
16 | 42, 42, # Comments within arrays are fun. | |
17 | # What about multiple # comments? | |
18 | # Can you handle it? | |
19 | # | |
20 | # Evil. | |
21 | # Evil. | |
22 | # ] Did I fool you? | |
23 | ] # Hopefully not. |
0 | { | |
1 | "best-day-ever": {"type": "datetime", "value": "1987-07-05T17:45:00Z"}, | |
2 | "numtheory": { | |
3 | "boring": {"type": "bool", "value": "false"}, | |
4 | "perfection": { | |
5 | "type": "array", | |
6 | "value": [ | |
7 | {"type": "integer", "value": "6"}, | |
8 | {"type": "integer", "value": "28"}, | |
9 | {"type": "integer", "value": "496"} | |
10 | ] | |
11 | } | |
12 | } | |
13 | } |
0 | { | |
1 | "a": { | |
2 | "better": {"type": "integer", "value": "43"}, | |
3 | "b": { | |
4 | "c": { | |
5 | "answer": {"type": "integer", "value": "42"} | |
6 | } | |
7 | } | |
8 | } | |
9 | } |
0 | { | |
1 | "a": { | |
2 | "better": {"type": "integer", "value": "43"}, | |
3 | "b": { | |
4 | "c": { | |
5 | "answer": {"type": "integer", "value": "42"} | |
6 | } | |
7 | } | |
8 | } | |
9 | } |
0 | { | |
1 | "simple": { "a": {"type": "integer", "value": "1"} }, | |
2 | "str-key": { "a": {"type": "integer", "value": "1"} }, | |
3 | "table-array": { | |
4 | "type": "array", | |
5 | "value": [ | |
6 | { "a": {"type": "integer", "value": "1"} }, | |
7 | { "b": {"type": "integer", "value": "2"} } | |
8 | ] | |
9 | } | |
10 | } |
0 | { | |
1 | "answer": {"type": "integer", "value": "42"}, | |
2 | "neganswer": {"type": "integer", "value": "-42"} | |
3 | } |
0 | { | |
1 | "longpi": {"type": "float", "value": "3.141592653589793"}, | |
2 | "neglongpi": {"type": "float", "value": "-3.141592653589793"} | |
3 | } |
0 | { | |
1 | "answer": {"type": "integer", "value": "9223372036854775807"}, | |
2 | "neganswer": {"type": "integer", "value": "-9223372036854775808"} | |
3 | } |
0 | { | |
1 | "multiline_empty_one": { | |
2 | "type": "string", | |
3 | "value": "" | |
4 | }, | |
5 | "multiline_empty_two": { | |
6 | "type": "string", | |
7 | "value": "" | |
8 | }, | |
9 | "multiline_empty_three": { | |
10 | "type": "string", | |
11 | "value": "" | |
12 | }, | |
13 | "multiline_empty_four": { | |
14 | "type": "string", | |
15 | "value": "" | |
16 | }, | |
17 | "equivalent_one": { | |
18 | "type": "string", | |
19 | "value": "The quick brown fox jumps over the lazy dog." | |
20 | }, | |
21 | "equivalent_two": { | |
22 | "type": "string", | |
23 | "value": "The quick brown fox jumps over the lazy dog." | |
24 | }, | |
25 | "equivalent_three": { | |
26 | "type": "string", | |
27 | "value": "The quick brown fox jumps over the lazy dog." | |
28 | } | |
29 | } |
0 | multiline_empty_one = """""" | |
1 | multiline_empty_two = """ | |
2 | """ | |
3 | multiline_empty_three = """\ | |
4 | """ | |
5 | multiline_empty_four = """\ | |
6 | \ | |
7 | \ | |
8 | """ | |
9 | ||
10 | equivalent_one = "The quick brown fox jumps over the lazy dog." | |
11 | equivalent_two = """ | |
12 | The quick brown \ | |
13 | ||
14 | ||
15 | fox jumps over \ | |
16 | the lazy dog.""" | |
17 | ||
18 | equivalent_three = """\ | |
19 | The quick brown \ | |
20 | fox jumps over \ | |
21 | the lazy dog.\ | |
22 | """ |
0 | { | |
1 | "oneline": { | |
2 | "type": "string", | |
3 | "value": "This string has a ' quote character." | |
4 | }, | |
5 | "firstnl": { | |
6 | "type": "string", | |
7 | "value": "This string has a ' quote character." | |
8 | }, | |
9 | "multiline": { | |
10 | "type": "string", | |
11 | "value": "This string\nhas ' a quote character\nand more than\none newline\nin it." | |
12 | } | |
13 | } |
0 | oneline = '''This string has a ' quote character.''' | |
1 | firstnl = ''' | |
2 | This string has a ' quote character.''' | |
3 | multiline = ''' | |
4 | This string | |
5 | has ' a quote character | |
6 | and more than | |
7 | one newline | |
8 | in it.''' |
0 | { | |
1 | "backspace": { | |
2 | "type": "string", | |
3 | "value": "This string has a \\b backspace character." | |
4 | }, | |
5 | "tab": { | |
6 | "type": "string", | |
7 | "value": "This string has a \\t tab character." | |
8 | }, | |
9 | "newline": { | |
10 | "type": "string", | |
11 | "value": "This string has a \\n new line character." | |
12 | }, | |
13 | "formfeed": { | |
14 | "type": "string", | |
15 | "value": "This string has a \\f form feed character." | |
16 | }, | |
17 | "carriage": { | |
18 | "type": "string", | |
19 | "value": "This string has a \\r carriage return character." | |
20 | }, | |
21 | "slash": { | |
22 | "type": "string", | |
23 | "value": "This string has a \\/ slash character." | |
24 | }, | |
25 | "backslash": { | |
26 | "type": "string", | |
27 | "value": "This string has a \\\\ backslash character." | |
28 | } | |
29 | } |
0 | backspace = 'This string has a \b backspace character.' | |
1 | tab = 'This string has a \t tab character.' | |
2 | newline = 'This string has a \n new line character.' | |
3 | formfeed = 'This string has a \f form feed character.' | |
4 | carriage = 'This string has a \r carriage return character.' | |
5 | slash = 'This string has a \/ slash character.' | |
6 | backslash = 'This string has a \\ backslash character.' |
0 | { | |
1 | "backspace": { | |
2 | "type": "string", | |
3 | "value": "This string has a \u0008 backspace character." | |
4 | }, | |
5 | "tab": { | |
6 | "type": "string", | |
7 | "value": "This string has a \u0009 tab character." | |
8 | }, | |
9 | "newline": { | |
10 | "type": "string", | |
11 | "value": "This string has a \u000A new line character." | |
12 | }, | |
13 | "formfeed": { | |
14 | "type": "string", | |
15 | "value": "This string has a \u000C form feed character." | |
16 | }, | |
17 | "carriage": { | |
18 | "type": "string", | |
19 | "value": "This string has a \u000D carriage return character." | |
20 | }, | |
21 | "quote": { | |
22 | "type": "string", | |
23 | "value": "This string has a \u0022 quote character." | |
24 | }, | |
25 | "backslash": { | |
26 | "type": "string", | |
27 | "value": "This string has a \u005C backslash character." | |
28 | }, | |
29 | "notunicode1": { | |
30 | "type": "string", | |
31 | "value": "This string does not have a unicode \\u escape." | |
32 | }, | |
33 | "notunicode2": { | |
34 | "type": "string", | |
35 | "value": "This string does not have a unicode \u005Cu escape." | |
36 | }, | |
37 | "notunicode3": { | |
38 | "type": "string", | |
39 | "value": "This string does not have a unicode \\u0075 escape." | |
40 | }, | |
41 | "notunicode4": { | |
42 | "type": "string", | |
43 | "value": "This string does not have a unicode \\\u0075 escape." | |
44 | } | |
45 | } |
0 | backspace = "This string has a \b backspace character." | |
1 | tab = "This string has a \t tab character." | |
2 | newline = "This string has a \n new line character." | |
3 | formfeed = "This string has a \f form feed character." | |
4 | carriage = "This string has a \r carriage return character." | |
5 | quote = "This string has a \" quote character." | |
6 | backslash = "This string has a \\ backslash character." | |
7 | notunicode1 = "This string does not have a unicode \\u escape." | |
8 | notunicode2 = "This string does not have a unicode \u005Cu escape." | |
9 | notunicode3 = "This string does not have a unicode \\u0075 escape." | |
10 | notunicode4 = "This string does not have a unicode \\\u0075 escape." |
0 | { | |
1 | "pound": {"type": "string", "value": "We see no # comments here."}, | |
2 | "poundcomment": { | |
3 | "type": "string", | |
4 | "value": "But there are # some comments here." | |
5 | } | |
6 | } |
0 | pound = "We see no # comments here." | |
1 | poundcomment = "But there are # some comments here." # Did I # mess you up? |
0 | { | |
1 | "people": [ | |
2 | { | |
3 | "first_name": {"type": "string", "value": "Bruce"}, | |
4 | "last_name": {"type": "string", "value": "Springsteen"} | |
5 | }, | |
6 | { | |
7 | "first_name": {"type": "string", "value": "Eric"}, | |
8 | "last_name": {"type": "string", "value": "Clapton"} | |
9 | }, | |
10 | { | |
11 | "first_name": {"type": "string", "value": "Bob"}, | |
12 | "last_name": {"type": "string", "value": "Seger"} | |
13 | } | |
14 | ] | |
15 | } |
0 | [[people]] | |
1 | first_name = "Bruce" | |
2 | last_name = "Springsteen" | |
3 | ||
4 | [[people]] | |
5 | first_name = "Eric" | |
6 | last_name = "Clapton" | |
7 | ||
8 | [[people]] | |
9 | first_name = "Bob" | |
10 | last_name = "Seger" |
0 | { | |
1 | "albums": [ | |
2 | { | |
3 | "name": {"type": "string", "value": "Born to Run"}, | |
4 | "songs": [ | |
5 | {"name": {"type": "string", "value": "Jungleland"}}, | |
6 | {"name": {"type": "string", "value": "Meeting Across the River"}} | |
7 | ] | |
8 | }, | |
9 | { | |
10 | "name": {"type": "string", "value": "Born in the USA"}, | |
11 | "songs": [ | |
12 | {"name": {"type": "string", "value": "Glory Days"}}, | |
13 | {"name": {"type": "string", "value": "Dancing in the Dark"}} | |
14 | ] | |
15 | } | |
16 | ] | |
17 | } |
0 | [[albums]] | |
1 | name = "Born to Run" | |
2 | ||
3 | [[albums.songs]] | |
4 | name = "Jungleland" | |
5 | ||
6 | [[albums.songs]] | |
7 | name = "Meeting Across the River" | |
8 | ||
9 | [[albums]] | |
10 | name = "Born in the USA" | |
11 | ||
12 | [[albums.songs]] | |
13 | name = "Glory Days" | |
14 | ||
15 | [[albums.songs]] | |
16 | name = "Dancing in the Dark" |
0 | { | |
1 | "people": [ | |
2 | { | |
3 | "first_name": {"type": "string", "value": "Bruce"}, | |
4 | "last_name": {"type": "string", "value": "Springsteen"} | |
5 | } | |
6 | ] | |
7 | } |
0 | { | |
1 | "answer4": {"type": "string", "value": "\u03B4"}, | |
2 | "answer8": {"type": "string", "value": "\u03B4"} | |
3 | } |