New upstream version 2.2.1
Anthony Fok
6 years ago
3 | 3 | - 1.4 |
4 | 4 | - 1.5 |
5 | 5 | - 1.6 |
6 | - 1.7 | |
7 | - 1.8 | |
8 | - 1.9 | |
6 | 9 | - tip |
7 | 10 | |
8 | 11 | go_import_path: gopkg.in/yaml.v2 |
0 | Copyright 2011-2016 Canonical Ltd. | |
1 | ||
2 | Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | you may not use this file except in compliance with the License. | |
4 | You may obtain a copy of the License at | |
5 | ||
6 | http://www.apache.org/licenses/LICENSE-2.0 | |
7 | ||
8 | Unless required by applicable law or agreed to in writing, software | |
9 | distributed under the License is distributed on an "AS IS" BASIS, | |
10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | See the License for the specific language governing permissions and | |
12 | limitations under the License. | |
0 | Apache License | |
1 | Version 2.0, January 2004 | |
2 | http://www.apache.org/licenses/ | |
3 | ||
4 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |
5 | ||
6 | 1. Definitions. | |
7 | ||
8 | "License" shall mean the terms and conditions for use, reproduction, | |
9 | and distribution as defined by Sections 1 through 9 of this document. | |
10 | ||
11 | "Licensor" shall mean the copyright owner or entity authorized by | |
12 | the copyright owner that is granting the License. | |
13 | ||
14 | "Legal Entity" shall mean the union of the acting entity and all | |
15 | other entities that control, are controlled by, or are under common | |
16 | control with that entity. For the purposes of this definition, | |
17 | "control" means (i) the power, direct or indirect, to cause the | |
18 | direction or management of such entity, whether by contract or | |
19 | otherwise, or (ii) ownership of fifty percent (50%) or more of the | |
20 | outstanding shares, or (iii) beneficial ownership of such entity. | |
21 | ||
22 | "You" (or "Your") shall mean an individual or Legal Entity | |
23 | exercising permissions granted by this License. | |
24 | ||
25 | "Source" form shall mean the preferred form for making modifications, | |
26 | including but not limited to software source code, documentation | |
27 | source, and configuration files. | |
28 | ||
29 | "Object" form shall mean any form resulting from mechanical | |
30 | transformation or translation of a Source form, including but | |
31 | not limited to compiled object code, generated documentation, | |
32 | and conversions to other media types. | |
33 | ||
34 | "Work" shall mean the work of authorship, whether in Source or | |
35 | Object form, made available under the License, as indicated by a | |
36 | copyright notice that is included in or attached to the work | |
37 | (an example is provided in the Appendix below). | |
38 | ||
39 | "Derivative Works" shall mean any work, whether in Source or Object | |
40 | form, that is based on (or derived from) the Work and for which the | |
41 | editorial revisions, annotations, elaborations, or other modifications | |
42 | represent, as a whole, an original work of authorship. For the purposes | |
43 | of this License, Derivative Works shall not include works that remain | |
44 | separable from, or merely link (or bind by name) to the interfaces of, | |
45 | the Work and Derivative Works thereof. | |
46 | ||
47 | "Contribution" shall mean any work of authorship, including | |
48 | the original version of the Work and any modifications or additions | |
49 | to that Work or Derivative Works thereof, that is intentionally | |
50 | submitted to Licensor for inclusion in the Work by the copyright owner | |
51 | or by an individual or Legal Entity authorized to submit on behalf of | |
52 | the copyright owner. For the purposes of this definition, "submitted" | |
53 | means any form of electronic, verbal, or written communication sent | |
54 | to the Licensor or its representatives, including but not limited to | |
55 | communication on electronic mailing lists, source code control systems, | |
56 | and issue tracking systems that are managed by, or on behalf of, the | |
57 | Licensor for the purpose of discussing and improving the Work, but | |
58 | excluding communication that is conspicuously marked or otherwise | |
59 | designated in writing by the copyright owner as "Not a Contribution." | |
60 | ||
61 | "Contributor" shall mean Licensor and any individual or Legal Entity | |
62 | on behalf of whom a Contribution has been received by Licensor and | |
63 | subsequently incorporated within the Work. | |
64 | ||
65 | 2. Grant of Copyright License. Subject to the terms and conditions of | |
66 | this License, each Contributor hereby grants to You a perpetual, | |
67 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |
68 | copyright license to reproduce, prepare Derivative Works of, | |
69 | publicly display, publicly perform, sublicense, and distribute the | |
70 | Work and such Derivative Works in Source or Object form. | |
71 | ||
72 | 3. Grant of Patent License. Subject to the terms and conditions of | |
73 | this License, each Contributor hereby grants to You a perpetual, | |
74 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |
75 | (except as stated in this section) patent license to make, have made, | |
76 | use, offer to sell, sell, import, and otherwise transfer the Work, | |
77 | where such license applies only to those patent claims licensable | |
78 | by such Contributor that are necessarily infringed by their | |
79 | Contribution(s) alone or by combination of their Contribution(s) | |
80 | with the Work to which such Contribution(s) was submitted. If You | |
81 | institute patent litigation against any entity (including a | |
82 | cross-claim or counterclaim in a lawsuit) alleging that the Work | |
83 | or a Contribution incorporated within the Work constitutes direct | |
84 | or contributory patent infringement, then any patent licenses | |
85 | granted to You under this License for that Work shall terminate | |
86 | as of the date such litigation is filed. | |
87 | ||
88 | 4. Redistribution. You may reproduce and distribute copies of the | |
89 | Work or Derivative Works thereof in any medium, with or without | |
90 | modifications, and in Source or Object form, provided that You | |
91 | meet the following conditions: | |
92 | ||
93 | (a) You must give any other recipients of the Work or | |
94 | Derivative Works a copy of this License; and | |
95 | ||
96 | (b) You must cause any modified files to carry prominent notices | |
97 | stating that You changed the files; and | |
98 | ||
99 | (c) You must retain, in the Source form of any Derivative Works | |
100 | that You distribute, all copyright, patent, trademark, and | |
101 | attribution notices from the Source form of the Work, | |
102 | excluding those notices that do not pertain to any part of | |
103 | the Derivative Works; and | |
104 | ||
105 | (d) If the Work includes a "NOTICE" text file as part of its | |
106 | distribution, then any Derivative Works that You distribute must | |
107 | include a readable copy of the attribution notices contained | |
108 | within such NOTICE file, excluding those notices that do not | |
109 | pertain to any part of the Derivative Works, in at least one | |
110 | of the following places: within a NOTICE text file distributed | |
111 | as part of the Derivative Works; within the Source form or | |
112 | documentation, if provided along with the Derivative Works; or, | |
113 | within a display generated by the Derivative Works, if and | |
114 | wherever such third-party notices normally appear. The contents | |
115 | of the NOTICE file are for informational purposes only and | |
116 | do not modify the License. You may add Your own attribution | |
117 | notices within Derivative Works that You distribute, alongside | |
118 | or as an addendum to the NOTICE text from the Work, provided | |
119 | that such additional attribution notices cannot be construed | |
120 | as modifying the License. | |
121 | ||
122 | You may add Your own copyright statement to Your modifications and | |
123 | may provide additional or different license terms and conditions | |
124 | for use, reproduction, or distribution of Your modifications, or | |
125 | for any such Derivative Works as a whole, provided Your use, | |
126 | reproduction, and distribution of the Work otherwise complies with | |
127 | the conditions stated in this License. | |
128 | ||
129 | 5. Submission of Contributions. Unless You explicitly state otherwise, | |
130 | any Contribution intentionally submitted for inclusion in the Work | |
131 | by You to the Licensor shall be under the terms and conditions of | |
132 | this License, without any additional terms or conditions. | |
133 | Notwithstanding the above, nothing herein shall supersede or modify | |
134 | the terms of any separate license agreement you may have executed | |
135 | with Licensor regarding such Contributions. | |
136 | ||
137 | 6. Trademarks. This License does not grant permission to use the trade | |
138 | names, trademarks, service marks, or product names of the Licensor, | |
139 | except as required for reasonable and customary use in describing the | |
140 | origin of the Work and reproducing the content of the NOTICE file. | |
141 | ||
142 | 7. Disclaimer of Warranty. Unless required by applicable law or | |
143 | agreed to in writing, Licensor provides the Work (and each | |
144 | Contributor provides its Contributions) on an "AS IS" BASIS, | |
145 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |
146 | implied, including, without limitation, any warranties or conditions | |
147 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |
148 | PARTICULAR PURPOSE. You are solely responsible for determining the | |
149 | appropriateness of using or redistributing the Work and assume any | |
150 | risks associated with Your exercise of permissions under this License. | |
151 | ||
152 | 8. Limitation of Liability. In no event and under no legal theory, | |
153 | whether in tort (including negligence), contract, or otherwise, | |
154 | unless required by applicable law (such as deliberate and grossly | |
155 | negligent acts) or agreed to in writing, shall any Contributor be | |
156 | liable to You for damages, including any direct, indirect, special, | |
157 | incidental, or consequential damages of any character arising as a | |
158 | result of this License or out of the use or inability to use the | |
159 | Work (including but not limited to damages for loss of goodwill, | |
160 | work stoppage, computer failure or malfunction, or any and all | |
161 | other commercial damages or losses), even if such Contributor | |
162 | has been advised of the possibility of such damages. | |
163 | ||
164 | 9. Accepting Warranty or Additional Liability. While redistributing | |
165 | the Work or Derivative Works thereof, You may choose to offer, | |
166 | and charge a fee for, acceptance of support, warranty, indemnity, | |
167 | or other liability obligations and/or rights consistent with this | |
168 | License. However, in accepting such obligations, You may act only | |
169 | on Your own behalf and on Your sole responsibility, not on behalf | |
170 | of any other Contributor, and only if You agree to indemnify, | |
171 | defend, and hold each Contributor harmless for any liability | |
172 | incurred by, or claims asserted against, such Contributor by reason | |
173 | of your accepting any such warranty or additional liability. | |
174 | ||
175 | END OF TERMS AND CONDITIONS | |
176 | ||
177 | APPENDIX: How to apply the Apache License to your work. | |
178 | ||
179 | To apply the Apache License to your work, attach the following | |
180 | boilerplate notice, with the fields enclosed by brackets "{}" | |
181 | replaced with your own identifying information. (Don't include | |
182 | the brackets!) The text should be enclosed in the appropriate | |
183 | comment syntax for the file format. We also recommend that a | |
184 | file or class name and description of purpose be included on the | |
185 | same "printed page" as the copyright notice for easier | |
186 | identification within third-party archives. | |
187 | ||
188 | Copyright {yyyy} {name of copyright owner} | |
189 | ||
190 | Licensed under the Apache License, Version 2.0 (the "License"); | |
191 | you may not use this file except in compliance with the License. | |
192 | You may obtain a copy of the License at | |
193 | ||
194 | http://www.apache.org/licenses/LICENSE-2.0 | |
195 | ||
196 | Unless required by applicable law or agreed to in writing, software | |
197 | distributed under the License is distributed on an "AS IS" BASIS, | |
198 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
199 | See the License for the specific language governing permissions and | |
200 | limitations under the License. |
0 | Copyright 2011-2016 Canonical Ltd. | |
1 | ||
2 | Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | you may not use this file except in compliance with the License. | |
4 | You may obtain a copy of the License at | |
5 | ||
6 | http://www.apache.org/licenses/LICENSE-2.0 | |
7 | ||
8 | Unless required by applicable law or agreed to in writing, software | |
9 | distributed under the License is distributed on an "AS IS" BASIS, | |
10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | See the License for the specific language governing permissions and | |
12 | limitations under the License. |
64 | 64 | d: [3, 4] |
65 | 65 | ` |
66 | 66 | |
67 | // Note: struct fields must be public in order for unmarshal to | |
68 | // correctly populate the data. | |
67 | 69 | type T struct { |
68 | 70 | A string |
69 | 71 | B struct { |
1 | 1 | |
2 | 2 | import ( |
3 | 3 | "io" |
4 | "os" | |
5 | 4 | ) |
6 | 5 | |
7 | 6 | func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { |
47 | 46 | return n, nil |
48 | 47 | } |
49 | 48 | |
50 | // File read handler. | |
51 | func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { | |
52 | return parser.input_file.Read(buffer) | |
49 | // Reader read handler. | |
50 | func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { | |
51 | return parser.input_reader.Read(buffer) | |
53 | 52 | } |
54 | 53 | |
55 | 54 | // Set a string input. |
63 | 62 | } |
64 | 63 | |
65 | 64 | // Set a file input. |
66 | func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { | |
65 | func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { | |
67 | 66 | if parser.read_handler != nil { |
68 | 67 | panic("must set the input source only once") |
69 | 68 | } |
70 | parser.read_handler = yaml_file_read_handler | |
71 | parser.input_file = file | |
69 | parser.read_handler = yaml_reader_read_handler | |
70 | parser.input_reader = r | |
72 | 71 | } |
73 | 72 | |
74 | 73 | // Set the source encoding. |
80 | 79 | } |
81 | 80 | |
82 | 81 | // Create a new emitter object. |
83 | func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { | |
82 | func yaml_emitter_initialize(emitter *yaml_emitter_t) { | |
84 | 83 | *emitter = yaml_emitter_t{ |
85 | 84 | buffer: make([]byte, output_buffer_size), |
86 | 85 | raw_buffer: make([]byte, 0, output_raw_buffer_size), |
87 | 86 | states: make([]yaml_emitter_state_t, 0, initial_stack_size), |
88 | 87 | events: make([]yaml_event_t, 0, initial_queue_size), |
89 | 88 | } |
90 | return true | |
91 | 89 | } |
92 | 90 | |
93 | 91 | // Destroy an emitter object. |
101 | 99 | return nil |
102 | 100 | } |
103 | 101 | |
104 | // File write handler. | |
105 | func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { | |
106 | _, err := emitter.output_file.Write(buffer) | |
102 | // yaml_writer_write_handler uses emitter.output_writer to write the | |
103 | // emitted text. | |
104 | func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { | |
105 | _, err := emitter.output_writer.Write(buffer) | |
107 | 106 | return err |
108 | 107 | } |
109 | 108 | |
117 | 116 | } |
118 | 117 | |
119 | 118 | // Set a file output. |
120 | func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { | |
119 | func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { | |
121 | 120 | if emitter.write_handler != nil { |
122 | 121 | panic("must set the output target only once") |
123 | 122 | } |
124 | emitter.write_handler = yaml_file_write_handler | |
125 | emitter.output_file = file | |
123 | emitter.write_handler = yaml_writer_write_handler | |
124 | emitter.output_writer = w | |
126 | 125 | } |
127 | 126 | |
128 | 127 | // Set the output encoding. |
251 | 250 | // |
252 | 251 | |
253 | 252 | // Create STREAM-START. |
254 | func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { | |
253 | func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { | |
255 | 254 | *event = yaml_event_t{ |
256 | 255 | typ: yaml_STREAM_START_EVENT, |
257 | 256 | encoding: encoding, |
258 | 257 | } |
259 | return true | |
260 | 258 | } |
261 | 259 | |
262 | 260 | // Create STREAM-END. |
263 | func yaml_stream_end_event_initialize(event *yaml_event_t) bool { | |
261 | func yaml_stream_end_event_initialize(event *yaml_event_t) { | |
264 | 262 | *event = yaml_event_t{ |
265 | 263 | typ: yaml_STREAM_END_EVENT, |
266 | 264 | } |
267 | return true | |
268 | 265 | } |
269 | 266 | |
270 | 267 | // Create DOCUMENT-START. |
271 | func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, | |
272 | tag_directives []yaml_tag_directive_t, implicit bool) bool { | |
268 | func yaml_document_start_event_initialize( | |
269 | event *yaml_event_t, | |
270 | version_directive *yaml_version_directive_t, | |
271 | tag_directives []yaml_tag_directive_t, | |
272 | implicit bool, | |
273 | ) { | |
273 | 274 | *event = yaml_event_t{ |
274 | 275 | typ: yaml_DOCUMENT_START_EVENT, |
275 | 276 | version_directive: version_directive, |
276 | 277 | tag_directives: tag_directives, |
277 | 278 | implicit: implicit, |
278 | 279 | } |
279 | return true | |
280 | 280 | } |
281 | 281 | |
282 | 282 | // Create DOCUMENT-END. |
283 | func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { | |
283 | func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { | |
284 | 284 | *event = yaml_event_t{ |
285 | 285 | typ: yaml_DOCUMENT_END_EVENT, |
286 | 286 | implicit: implicit, |
287 | 287 | } |
288 | return true | |
289 | 288 | } |
290 | 289 | |
291 | 290 | ///* |
347 | 346 | } |
348 | 347 | |
349 | 348 | // Create MAPPING-START. |
350 | func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { | |
349 | func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { | |
351 | 350 | *event = yaml_event_t{ |
352 | 351 | typ: yaml_MAPPING_START_EVENT, |
353 | 352 | anchor: anchor, |
355 | 354 | implicit: implicit, |
356 | 355 | style: yaml_style_t(style), |
357 | 356 | } |
358 | return true | |
359 | 357 | } |
360 | 358 | |
361 | 359 | // Create MAPPING-END. |
362 | func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { | |
360 | func yaml_mapping_end_event_initialize(event *yaml_event_t) { | |
363 | 361 | *event = yaml_event_t{ |
364 | 362 | typ: yaml_MAPPING_END_EVENT, |
365 | 363 | } |
366 | return true | |
367 | 364 | } |
368 | 365 | |
369 | 366 | // Destroy an event object. |
470 | 467 | // } context |
471 | 468 | // tag_directive *yaml_tag_directive_t |
472 | 469 | // |
473 | // context.error = YAML_NO_ERROR // Eliminate a compliler warning. | |
470 | // context.error = YAML_NO_ERROR // Eliminate a compiler warning. | |
474 | 471 | // |
475 | 472 | // assert(document) // Non-NULL document object is expected. |
476 | 473 | // |
3 | 3 | "encoding" |
4 | 4 | "encoding/base64" |
5 | 5 | "fmt" |
6 | "io" | |
6 | 7 | "math" |
7 | 8 | "reflect" |
8 | 9 | "strconv" |
21 | 22 | kind int |
22 | 23 | line, column int |
23 | 24 | tag string |
24 | value string | |
25 | implicit bool | |
26 | children []*node | |
27 | anchors map[string]*node | |
25 | // For an alias node, alias holds the resolved alias. | |
26 | alias *node | |
27 | value string | |
28 | implicit bool | |
29 | children []*node | |
30 | anchors map[string]*node | |
28 | 31 | } |
29 | 32 | |
30 | 33 | // ---------------------------------------------------------------------------- |
31 | 34 | // Parser, produces a node tree out of a libyaml event stream. |
32 | 35 | |
33 | 36 | type parser struct { |
34 | parser yaml_parser_t | |
35 | event yaml_event_t | |
36 | doc *node | |
37 | parser yaml_parser_t | |
38 | event yaml_event_t | |
39 | doc *node | |
40 | doneInit bool | |
37 | 41 | } |
38 | 42 | |
39 | 43 | func newParser(b []byte) *parser { |
41 | 45 | if !yaml_parser_initialize(&p.parser) { |
42 | 46 | panic("failed to initialize YAML emitter") |
43 | 47 | } |
44 | ||
45 | 48 | if len(b) == 0 { |
46 | 49 | b = []byte{'\n'} |
47 | 50 | } |
48 | ||
49 | 51 | yaml_parser_set_input_string(&p.parser, b) |
50 | ||
51 | p.skip() | |
52 | if p.event.typ != yaml_STREAM_START_EVENT { | |
53 | panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) | |
54 | } | |
55 | p.skip() | |
56 | 52 | return &p |
53 | } | |
54 | ||
55 | func newParserFromReader(r io.Reader) *parser { | |
56 | p := parser{} | |
57 | if !yaml_parser_initialize(&p.parser) { | |
58 | panic("failed to initialize YAML emitter") | |
59 | } | |
60 | yaml_parser_set_input_reader(&p.parser, r) | |
61 | return &p | |
62 | } | |
63 | ||
64 | func (p *parser) init() { | |
65 | if p.doneInit { | |
66 | return | |
67 | } | |
68 | p.expect(yaml_STREAM_START_EVENT) | |
69 | p.doneInit = true | |
57 | 70 | } |
58 | 71 | |
59 | 72 | func (p *parser) destroy() { |
63 | 76 | yaml_parser_delete(&p.parser) |
64 | 77 | } |
65 | 78 | |
66 | func (p *parser) skip() { | |
79 | // expect consumes an event from the event stream and | |
80 | // checks that it's of the expected type. | |
81 | func (p *parser) expect(e yaml_event_type_t) { | |
82 | if p.event.typ == yaml_NO_EVENT { | |
83 | if !yaml_parser_parse(&p.parser, &p.event) { | |
84 | p.fail() | |
85 | } | |
86 | } | |
87 | if p.event.typ == yaml_STREAM_END_EVENT { | |
88 | failf("attempted to go past the end of stream; corrupted value?") | |
89 | } | |
90 | if p.event.typ != e { | |
91 | p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) | |
92 | p.fail() | |
93 | } | |
94 | yaml_event_delete(&p.event) | |
95 | p.event.typ = yaml_NO_EVENT | |
96 | } | |
97 | ||
98 | // peek peeks at the next event in the event stream, | |
99 | // puts the results into p.event and returns the event type. | |
100 | func (p *parser) peek() yaml_event_type_t { | |
67 | 101 | if p.event.typ != yaml_NO_EVENT { |
68 | if p.event.typ == yaml_STREAM_END_EVENT { | |
69 | failf("attempted to go past the end of stream; corrupted value?") | |
70 | } | |
71 | yaml_event_delete(&p.event) | |
102 | return p.event.typ | |
72 | 103 | } |
73 | 104 | if !yaml_parser_parse(&p.parser, &p.event) { |
74 | 105 | p.fail() |
75 | 106 | } |
107 | return p.event.typ | |
76 | 108 | } |
77 | 109 | |
78 | 110 | func (p *parser) fail() { |
80 | 112 | var line int |
81 | 113 | if p.parser.problem_mark.line != 0 { |
82 | 114 | line = p.parser.problem_mark.line |
115 | // Scanner errors don't iterate line before returning error | |
116 | if p.parser.error == yaml_SCANNER_ERROR { | |
117 | line++ | |
118 | } | |
83 | 119 | } else if p.parser.context_mark.line != 0 { |
84 | 120 | line = p.parser.context_mark.line |
85 | 121 | } |
102 | 138 | } |
103 | 139 | |
104 | 140 | func (p *parser) parse() *node { |
105 | switch p.event.typ { | |
141 | p.init() | |
142 | switch p.peek() { | |
106 | 143 | case yaml_SCALAR_EVENT: |
107 | 144 | return p.scalar() |
108 | 145 | case yaml_ALIAS_EVENT: |
117 | 154 | // Happens when attempting to decode an empty buffer. |
118 | 155 | return nil |
119 | 156 | default: |
120 | panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) | |
157 | panic("attempted to parse unknown event: " + p.event.typ.String()) | |
121 | 158 | } |
122 | 159 | } |
123 | 160 | |
133 | 170 | n := p.node(documentNode) |
134 | 171 | n.anchors = make(map[string]*node) |
135 | 172 | p.doc = n |
136 | p.skip() | |
173 | p.expect(yaml_DOCUMENT_START_EVENT) | |
137 | 174 | n.children = append(n.children, p.parse()) |
138 | if p.event.typ != yaml_DOCUMENT_END_EVENT { | |
139 | panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) | |
140 | } | |
141 | p.skip() | |
175 | p.expect(yaml_DOCUMENT_END_EVENT) | |
142 | 176 | return n |
143 | 177 | } |
144 | 178 | |
145 | 179 | func (p *parser) alias() *node { |
146 | 180 | n := p.node(aliasNode) |
147 | 181 | n.value = string(p.event.anchor) |
148 | p.skip() | |
182 | n.alias = p.doc.anchors[n.value] | |
183 | if n.alias == nil { | |
184 | failf("unknown anchor '%s' referenced", n.value) | |
185 | } | |
186 | p.expect(yaml_ALIAS_EVENT) | |
149 | 187 | return n |
150 | 188 | } |
151 | 189 | |
155 | 193 | n.tag = string(p.event.tag) |
156 | 194 | n.implicit = p.event.implicit |
157 | 195 | p.anchor(n, p.event.anchor) |
158 | p.skip() | |
196 | p.expect(yaml_SCALAR_EVENT) | |
159 | 197 | return n |
160 | 198 | } |
161 | 199 | |
162 | 200 | func (p *parser) sequence() *node { |
163 | 201 | n := p.node(sequenceNode) |
164 | 202 | p.anchor(n, p.event.anchor) |
165 | p.skip() | |
166 | for p.event.typ != yaml_SEQUENCE_END_EVENT { | |
203 | p.expect(yaml_SEQUENCE_START_EVENT) | |
204 | for p.peek() != yaml_SEQUENCE_END_EVENT { | |
167 | 205 | n.children = append(n.children, p.parse()) |
168 | 206 | } |
169 | p.skip() | |
207 | p.expect(yaml_SEQUENCE_END_EVENT) | |
170 | 208 | return n |
171 | 209 | } |
172 | 210 | |
173 | 211 | func (p *parser) mapping() *node { |
174 | 212 | n := p.node(mappingNode) |
175 | 213 | p.anchor(n, p.event.anchor) |
176 | p.skip() | |
177 | for p.event.typ != yaml_MAPPING_END_EVENT { | |
214 | p.expect(yaml_MAPPING_START_EVENT) | |
215 | for p.peek() != yaml_MAPPING_END_EVENT { | |
178 | 216 | n.children = append(n.children, p.parse(), p.parse()) |
179 | 217 | } |
180 | p.skip() | |
218 | p.expect(yaml_MAPPING_END_EVENT) | |
181 | 219 | return n |
182 | 220 | } |
183 | 221 | |
186 | 224 | |
187 | 225 | type decoder struct { |
188 | 226 | doc *node |
189 | aliases map[string]bool | |
227 | aliases map[*node]bool | |
190 | 228 | mapType reflect.Type |
191 | 229 | terrors []string |
230 | strict bool | |
192 | 231 | } |
193 | 232 | |
194 | 233 | var ( |
196 | 235 | durationType = reflect.TypeOf(time.Duration(0)) |
197 | 236 | defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) |
198 | 237 | ifaceType = defaultMapType.Elem() |
238 | timeType = reflect.TypeOf(time.Time{}) | |
239 | ptrTimeType = reflect.TypeOf(&time.Time{}) | |
199 | 240 | ) |
200 | 241 | |
201 | func newDecoder() *decoder { | |
202 | d := &decoder{mapType: defaultMapType} | |
203 | d.aliases = make(map[string]bool) | |
242 | func newDecoder(strict bool) *decoder { | |
243 | d := &decoder{mapType: defaultMapType, strict: strict} | |
244 | d.aliases = make(map[*node]bool) | |
204 | 245 | return d |
205 | 246 | } |
206 | 247 | |
249 | 290 | // |
250 | 291 | // If n holds a null value, prepare returns before doing anything. |
251 | 292 | func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { |
252 | if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) { | |
293 | if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { | |
253 | 294 | return out, false, false |
254 | 295 | } |
255 | 296 | again := true |
306 | 347 | } |
307 | 348 | |
308 | 349 | func (d *decoder) alias(n *node, out reflect.Value) (good bool) { |
309 | an, ok := d.doc.anchors[n.value] | |
310 | if !ok { | |
311 | failf("unknown anchor '%s' referenced", n.value) | |
312 | } | |
313 | if d.aliases[n.value] { | |
350 | if d.aliases[n] { | |
351 | // TODO this could actually be allowed in some circumstances. | |
314 | 352 | failf("anchor '%s' value contains itself", n.value) |
315 | 353 | } |
316 | d.aliases[n.value] = true | |
317 | good = d.unmarshal(an, out) | |
318 | delete(d.aliases, n.value) | |
354 | d.aliases[n] = true | |
355 | good = d.unmarshal(n.alias, out) | |
356 | delete(d.aliases, n) | |
319 | 357 | return good |
320 | 358 | } |
321 | 359 | |
327 | 365 | } |
328 | 366 | } |
329 | 367 | |
330 | func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { | |
368 | func (d *decoder) scalar(n *node, out reflect.Value) bool { | |
331 | 369 | var tag string |
332 | 370 | var resolved interface{} |
333 | 371 | if n.tag == "" && !n.implicit { |
351 | 389 | } |
352 | 390 | return true |
353 | 391 | } |
354 | if s, ok := resolved.(string); ok && out.CanAddr() { | |
355 | if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { | |
356 | err := u.UnmarshalText([]byte(s)) | |
392 | if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { | |
393 | // We've resolved to exactly the type we want, so use that. | |
394 | out.Set(resolvedv) | |
395 | return true | |
396 | } | |
397 | // Perhaps we can use the value as a TextUnmarshaler to | |
398 | // set its value. | |
399 | if out.CanAddr() { | |
400 | u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) | |
401 | if ok { | |
402 | var text []byte | |
403 | if tag == yaml_BINARY_TAG { | |
404 | text = []byte(resolved.(string)) | |
405 | } else { | |
406 | // We let any value be unmarshaled into TextUnmarshaler. | |
407 | // That might be more lax than we'd like, but the | |
408 | // TextUnmarshaler itself should bowl out any dubious values. | |
409 | text = []byte(n.value) | |
410 | } | |
411 | err := u.UnmarshalText(text) | |
357 | 412 | if err != nil { |
358 | 413 | fail(err) |
359 | 414 | } |
364 | 419 | case reflect.String: |
365 | 420 | if tag == yaml_BINARY_TAG { |
366 | 421 | out.SetString(resolved.(string)) |
367 | good = true | |
368 | } else if resolved != nil { | |
422 | return true | |
423 | } | |
424 | if resolved != nil { | |
369 | 425 | out.SetString(n.value) |
370 | good = true | |
426 | return true | |
371 | 427 | } |
372 | 428 | case reflect.Interface: |
373 | 429 | if resolved == nil { |
374 | 430 | out.Set(reflect.Zero(out.Type())) |
431 | } else if tag == yaml_TIMESTAMP_TAG { | |
432 | // It looks like a timestamp but for backward compatibility | |
433 | // reasons we set it as a string, so that code that unmarshals | |
434 | // timestamp-like values into interface{} will continue to | |
435 | // see a string and not a time.Time. | |
436 | // TODO(v3) Drop this. | |
437 | out.Set(reflect.ValueOf(n.value)) | |
375 | 438 | } else { |
376 | 439 | out.Set(reflect.ValueOf(resolved)) |
377 | 440 | } |
378 | good = true | |
441 | return true | |
379 | 442 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
380 | 443 | switch resolved := resolved.(type) { |
381 | 444 | case int: |
382 | 445 | if !out.OverflowInt(int64(resolved)) { |
383 | 446 | out.SetInt(int64(resolved)) |
384 | good = true | |
447 | return true | |
385 | 448 | } |
386 | 449 | case int64: |
387 | 450 | if !out.OverflowInt(resolved) { |
388 | 451 | out.SetInt(resolved) |
389 | good = true | |
452 | return true | |
390 | 453 | } |
391 | 454 | case uint64: |
392 | 455 | if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { |
393 | 456 | out.SetInt(int64(resolved)) |
394 | good = true | |
457 | return true | |
395 | 458 | } |
396 | 459 | case float64: |
397 | 460 | if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { |
398 | 461 | out.SetInt(int64(resolved)) |
399 | good = true | |
462 | return true | |
400 | 463 | } |
401 | 464 | case string: |
402 | 465 | if out.Type() == durationType { |
403 | 466 | d, err := time.ParseDuration(resolved) |
404 | 467 | if err == nil { |
405 | 468 | out.SetInt(int64(d)) |
406 | good = true | |
469 | return true | |
407 | 470 | } |
408 | 471 | } |
409 | 472 | } |
412 | 475 | case int: |
413 | 476 | if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { |
414 | 477 | out.SetUint(uint64(resolved)) |
415 | good = true | |
478 | return true | |
416 | 479 | } |
417 | 480 | case int64: |
418 | 481 | if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { |
419 | 482 | out.SetUint(uint64(resolved)) |
420 | good = true | |
483 | return true | |
421 | 484 | } |
422 | 485 | case uint64: |
423 | 486 | if !out.OverflowUint(uint64(resolved)) { |
424 | 487 | out.SetUint(uint64(resolved)) |
425 | good = true | |
488 | return true | |
426 | 489 | } |
427 | 490 | case float64: |
428 | 491 | if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { |
429 | 492 | out.SetUint(uint64(resolved)) |
430 | good = true | |
493 | return true | |
431 | 494 | } |
432 | 495 | } |
433 | 496 | case reflect.Bool: |
434 | 497 | switch resolved := resolved.(type) { |
435 | 498 | case bool: |
436 | 499 | out.SetBool(resolved) |
437 | good = true | |
500 | return true | |
438 | 501 | } |
439 | 502 | case reflect.Float32, reflect.Float64: |
440 | 503 | switch resolved := resolved.(type) { |
441 | 504 | case int: |
442 | 505 | out.SetFloat(float64(resolved)) |
443 | good = true | |
506 | return true | |
444 | 507 | case int64: |
445 | 508 | out.SetFloat(float64(resolved)) |
446 | good = true | |
509 | return true | |
447 | 510 | case uint64: |
448 | 511 | out.SetFloat(float64(resolved)) |
449 | good = true | |
512 | return true | |
450 | 513 | case float64: |
451 | 514 | out.SetFloat(resolved) |
452 | good = true | |
515 | return true | |
516 | } | |
517 | case reflect.Struct: | |
518 | if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { | |
519 | out.Set(resolvedv) | |
520 | return true | |
453 | 521 | } |
454 | 522 | case reflect.Ptr: |
455 | 523 | if out.Type().Elem() == reflect.TypeOf(resolved) { |
457 | 525 | elem := reflect.New(out.Type().Elem()) |
458 | 526 | elem.Elem().Set(reflect.ValueOf(resolved)) |
459 | 527 | out.Set(elem) |
460 | good = true | |
461 | } | |
462 | } | |
463 | if !good { | |
464 | d.terror(n, tag, out) | |
465 | } | |
466 | return good | |
528 | return true | |
529 | } | |
530 | } | |
531 | d.terror(n, tag, out) | |
532 | return false | |
467 | 533 | } |
468 | 534 | |
469 | 535 | func settableValueOf(i interface{}) reflect.Value { |
480 | 546 | switch out.Kind() { |
481 | 547 | case reflect.Slice: |
482 | 548 | out.Set(reflect.MakeSlice(out.Type(), l, l)) |
549 | case reflect.Array: | |
550 | if l != out.Len() { | |
551 | failf("invalid array: want %d elements but got %d", out.Len(), l) | |
552 | } | |
483 | 553 | case reflect.Interface: |
484 | 554 | // No type hints. Will have to use a generic sequence. |
485 | 555 | iface = out |
498 | 568 | j++ |
499 | 569 | } |
500 | 570 | } |
501 | out.Set(out.Slice(0, j)) | |
571 | if out.Kind() != reflect.Array { | |
572 | out.Set(out.Slice(0, j)) | |
573 | } | |
502 | 574 | if iface.IsValid() { |
503 | 575 | iface.Set(out) |
504 | 576 | } |
559 | 631 | } |
560 | 632 | e := reflect.New(et).Elem() |
561 | 633 | if d.unmarshal(n.children[i+1], e) { |
562 | out.SetMapIndex(k, e) | |
634 | d.setMapIndex(n.children[i+1], out, k, e) | |
563 | 635 | } |
564 | 636 | } |
565 | 637 | } |
566 | 638 | d.mapType = mapType |
567 | 639 | return true |
640 | } | |
641 | ||
642 | func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { | |
643 | if d.strict && out.MapIndex(k) != zeroValue { | |
644 | d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) | |
645 | return | |
646 | } | |
647 | out.SetMapIndex(k, v) | |
568 | 648 | } |
569 | 649 | |
570 | 650 | func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { |
614 | 694 | elemType = inlineMap.Type().Elem() |
615 | 695 | } |
616 | 696 | |
697 | var doneFields []bool | |
698 | if d.strict { | |
699 | doneFields = make([]bool, len(sinfo.FieldsList)) | |
700 | } | |
617 | 701 | for i := 0; i < l; i += 2 { |
618 | 702 | ni := n.children[i] |
619 | 703 | if isMerge(ni) { |
624 | 708 | continue |
625 | 709 | } |
626 | 710 | if info, ok := sinfo.FieldsMap[name.String()]; ok { |
711 | if d.strict { | |
712 | if doneFields[info.Id] { | |
713 | d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) | |
714 | continue | |
715 | } | |
716 | doneFields[info.Id] = true | |
717 | } | |
627 | 718 | var field reflect.Value |
628 | 719 | if info.Inline == nil { |
629 | 720 | field = out.Field(info.Num) |
637 | 728 | } |
638 | 729 | value := reflect.New(elemType).Elem() |
639 | 730 | d.unmarshal(n.children[i+1], value) |
640 | inlineMap.SetMapIndex(name, value) | |
731 | d.setMapIndex(n.children[i+1], inlineMap, name, value) | |
732 | } else if d.strict { | |
733 | d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) | |
641 | 734 | } |
642 | 735 | } |
643 | 736 | return true |
1 | 1 | |
2 | 2 | import ( |
3 | 3 | "errors" |
4 | . "gopkg.in/check.v1" | |
5 | "gopkg.in/yaml.v2" | |
4 | "io" | |
6 | 5 | "math" |
7 | "net" | |
8 | 6 | "reflect" |
9 | 7 | "strings" |
10 | 8 | "time" |
9 | ||
10 | . "gopkg.in/check.v1" | |
11 | "gopkg.in/yaml.v2" | |
11 | 12 | ) |
12 | 13 | |
13 | 14 | var unmarshalIntTest = 123 |
18 | 19 | }{ |
19 | 20 | { |
20 | 21 | "", |
21 | &struct{}{}, | |
22 | }, { | |
22 | (*struct{})(nil), | |
23 | }, | |
24 | { | |
23 | 25 | "{}", &struct{}{}, |
24 | 26 | }, { |
25 | 27 | "v: hi", |
127 | 129 | "bin: -0b101010", |
128 | 130 | map[string]interface{}{"bin": -42}, |
129 | 131 | }, { |
132 | "bin: -0b1000000000000000000000000000000000000000000000000000000000000000", | |
133 | map[string]interface{}{"bin": -9223372036854775808}, | |
134 | }, { | |
130 | 135 | "decimal: +685_230", |
131 | 136 | map[string]int{"decimal": 685230}, |
132 | 137 | }, |
238 | 243 | }, { |
239 | 244 | "a: [1, 2]", |
240 | 245 | &struct{ A []int }{[]int{1, 2}}, |
246 | }, { | |
247 | "a: [1, 2]", | |
248 | &struct{ A [2]int }{[2]int{1, 2}}, | |
241 | 249 | }, { |
242 | 250 | "a: 1", |
243 | 251 | &struct{ B int }{0}, |
397 | 405 | "v: !!float '1.1'", |
398 | 406 | map[string]interface{}{"v": 1.1}, |
399 | 407 | }, { |
408 | "v: !!float 0", | |
409 | map[string]interface{}{"v": float64(0)}, | |
410 | }, { | |
411 | "v: !!float -1", | |
412 | map[string]interface{}{"v": float64(-1)}, | |
413 | }, { | |
400 | 414 | "v: !!null ''", |
401 | 415 | map[string]interface{}{"v": nil}, |
402 | 416 | }, { |
403 | 417 | "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", |
404 | 418 | map[string]interface{}{"v": 1}, |
419 | }, | |
420 | ||
421 | // Non-specific tag (Issue #75) | |
422 | { | |
423 | "v: ! test", | |
424 | map[string]interface{}{"v": "test"}, | |
405 | 425 | }, |
406 | 426 | |
407 | 427 | // Anchors and aliases. |
418 | 438 | }, { |
419 | 439 | "a: &a [1, 2]\nb: *a", |
420 | 440 | &struct{ B []int }{[]int{1, 2}}, |
421 | }, { | |
422 | "b: *a\na: &a {c: 1}", | |
423 | &struct { | |
424 | A, B struct { | |
425 | C int | |
426 | } | |
427 | }{struct{ C int }{1}, struct{ C int }{1}}, | |
428 | 441 | }, |
429 | 442 | |
430 | 443 | // Bug #1133337 |
433 | 446 | map[string]*string{"foo": new(string)}, |
434 | 447 | }, { |
435 | 448 | "foo: null", |
449 | map[string]*string{"foo": nil}, | |
450 | }, { | |
451 | "foo: null", | |
436 | 452 | map[string]string{"foo": ""}, |
437 | 453 | }, { |
438 | 454 | "foo: null", |
455 | map[string]interface{}{"foo": nil}, | |
456 | }, | |
457 | ||
458 | // Support for ~ | |
459 | { | |
460 | "foo: ~", | |
461 | map[string]*string{"foo": nil}, | |
462 | }, { | |
463 | "foo: ~", | |
464 | map[string]string{"foo": ""}, | |
465 | }, { | |
466 | "foo: ~", | |
439 | 467 | map[string]interface{}{"foo": nil}, |
440 | 468 | }, |
441 | 469 | |
495 | 523 | map[string]interface{}{"a": "50cent_of_dollar"}, |
496 | 524 | }, |
497 | 525 | |
526 | // issue #295 (allow scalars with colons in flow mappings and sequences) | |
527 | { | |
528 | "a: {b: https://github.com/go-yaml/yaml}", | |
529 | map[string]interface{}{"a": map[interface{}]interface{}{ | |
530 | "b": "https://github.com/go-yaml/yaml", | |
531 | }}, | |
532 | }, | |
533 | { | |
534 | "a: [https://github.com/go-yaml/yaml]", | |
535 | map[string]interface{}{"a": []interface{}{"https://github.com/go-yaml/yaml"}}, | |
536 | }, | |
537 | ||
498 | 538 | // Duration |
499 | 539 | { |
500 | 540 | "a: 3s", |
546 | 586 | // Support encoding.TextUnmarshaler. |
547 | 587 | { |
548 | 588 | "a: 1.2.3.4\n", |
549 | map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, | |
589 | map[string]textUnmarshaler{"a": textUnmarshaler{S: "1.2.3.4"}}, | |
550 | 590 | }, |
551 | 591 | { |
552 | 592 | "a: 2015-02-24T18:19:39Z\n", |
553 | map[string]time.Time{"a": time.Unix(1424801979, 0).In(time.UTC)}, | |
593 | map[string]textUnmarshaler{"a": textUnmarshaler{"2015-02-24T18:19:39Z"}}, | |
594 | }, | |
595 | ||
596 | // Timestamps | |
597 | { | |
598 | // Date only. | |
599 | "a: 2015-01-01\n", | |
600 | map[string]time.Time{"a": time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)}, | |
601 | }, | |
602 | { | |
603 | // RFC3339 | |
604 | "a: 2015-02-24T18:19:39.12Z\n", | |
605 | map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, .12e9, time.UTC)}, | |
606 | }, | |
607 | { | |
608 | // RFC3339 with short dates. | |
609 | "a: 2015-2-3T3:4:5Z", | |
610 | map[string]time.Time{"a": time.Date(2015, 2, 3, 3, 4, 5, 0, time.UTC)}, | |
611 | }, | |
612 | { | |
613 | // ISO8601 lower case t | |
614 | "a: 2015-02-24t18:19:39Z\n", | |
615 | map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC)}, | |
616 | }, | |
617 | { | |
618 | // space separate, no time zone | |
619 | "a: 2015-02-24 18:19:39\n", | |
620 | map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC)}, | |
621 | }, | |
622 | // Some cases not currently handled. Uncomment these when | |
623 | // the code is fixed. | |
624 | // { | |
625 | // // space separated with time zone | |
626 | // "a: 2001-12-14 21:59:43.10 -5", | |
627 | // map[string]interface{}{"a": time.Date(2001, 12, 14, 21, 59, 43, .1e9, time.UTC)}, | |
628 | // }, | |
629 | // { | |
630 | // // arbitrary whitespace between fields | |
631 | // "a: 2001-12-14 \t\t \t21:59:43.10 \t Z", | |
632 | // map[string]interface{}{"a": time.Date(2001, 12, 14, 21, 59, 43, .1e9, time.UTC)}, | |
633 | // }, | |
634 | { | |
635 | // explicit string tag | |
636 | "a: !!str 2015-01-01", | |
637 | map[string]interface{}{"a": "2015-01-01"}, | |
638 | }, | |
639 | { | |
640 | // explicit timestamp tag on quoted string | |
641 | "a: !!timestamp \"2015-01-01\"", | |
642 | map[string]time.Time{"a": time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)}, | |
643 | }, | |
644 | { | |
645 | // explicit timestamp tag on unquoted string | |
646 | "a: !!timestamp 2015-01-01", | |
647 | map[string]time.Time{"a": time.Date(2015, 1, 1, 0, 0, 0, 0, time.UTC)}, | |
648 | }, | |
649 | { | |
650 | // quoted string that's a valid timestamp | |
651 | "a: \"2015-01-01\"", | |
652 | map[string]interface{}{"a": "2015-01-01"}, | |
653 | }, | |
654 | { | |
655 | // explicit timestamp tag into interface. | |
656 | "a: !!timestamp \"2015-01-01\"", | |
657 | map[string]interface{}{"a": "2015-01-01"}, | |
658 | }, | |
659 | { | |
660 | // implicit timestamp tag into interface. | |
661 | "a: 2015-01-01", | |
662 | map[string]interface{}{"a": "2015-01-01"}, | |
554 | 663 | }, |
555 | 664 | |
556 | 665 | // Encode empty lists as zero-length slices. |
588 | 697 | }, { |
589 | 698 | "a: 123456E1\n", |
590 | 699 | M{"a": "123456E1"}, |
700 | }, | |
701 | // yaml-test-suite 3GZX: Spec Example 7.1. Alias Nodes | |
702 | { | |
703 | "First occurrence: &anchor Foo\nSecond occurrence: *anchor\nOverride anchor: &anchor Bar\nReuse anchor: *anchor\n", | |
704 | map[interface{}]interface{}{ | |
705 | "Reuse anchor": "Bar", | |
706 | "First occurrence": "Foo", | |
707 | "Second occurrence": "Foo", | |
708 | "Override anchor": "Bar", | |
709 | }, | |
710 | }, | |
711 | // Single document with garbage following it. | |
712 | { | |
713 | "---\nhello\n...\n}not yaml", | |
714 | "hello", | |
591 | 715 | }, |
592 | 716 | } |
593 | 717 | |
603 | 727 | } |
604 | 728 | |
605 | 729 | func (s *S) TestUnmarshal(c *C) { |
606 | for _, item := range unmarshalTests { | |
730 | for i, item := range unmarshalTests { | |
731 | c.Logf("test %d: %q", i, item.data) | |
607 | 732 | t := reflect.ValueOf(item.value).Type() |
608 | var value interface{} | |
609 | switch t.Kind() { | |
610 | case reflect.Map: | |
611 | value = reflect.MakeMap(t).Interface() | |
612 | case reflect.String: | |
613 | value = reflect.New(t).Interface() | |
614 | case reflect.Ptr: | |
615 | value = reflect.New(t.Elem()).Interface() | |
616 | default: | |
617 | c.Fatalf("missing case for %s", t) | |
618 | } | |
619 | err := yaml.Unmarshal([]byte(item.data), value) | |
733 | value := reflect.New(t) | |
734 | err := yaml.Unmarshal([]byte(item.data), value.Interface()) | |
620 | 735 | if _, ok := err.(*yaml.TypeError); !ok { |
621 | 736 | c.Assert(err, IsNil) |
622 | 737 | } |
623 | if t.Kind() == reflect.String { | |
624 | c.Assert(*value.(*string), Equals, item.value) | |
625 | } else { | |
626 | c.Assert(value, DeepEquals, item.value) | |
738 | c.Assert(value.Elem().Interface(), DeepEquals, item.value, Commentf("error: %v", err)) | |
739 | } | |
740 | } | |
741 | ||
742 | // TODO(v3): This test should also work when unmarshaling onto an interface{}. | |
743 | func (s *S) TestUnmarshalFullTimestamp(c *C) { | |
744 | // Full timestamp in same format as encoded. This is confirmed to be | |
745 | // properly decoded by Python as a timestamp as well. | |
746 | var str = "2015-02-24T18:19:39.123456789-03:00" | |
747 | var t time.Time | |
748 | err := yaml.Unmarshal([]byte(str), &t) | |
749 | c.Assert(err, IsNil) | |
750 | c.Assert(t, Equals, time.Date(2015, 2, 24, 18, 19, 39, 123456789, t.Location())) | |
751 | c.Assert(t.In(time.UTC), Equals, time.Date(2015, 2, 24, 21, 19, 39, 123456789, time.UTC)) | |
752 | } | |
753 | ||
754 | func (s *S) TestDecoderSingleDocument(c *C) { | |
755 | // Test that Decoder.Decode works as expected on | |
756 | // all the unmarshal tests. | |
757 | for i, item := range unmarshalTests { | |
758 | c.Logf("test %d: %q", i, item.data) | |
759 | if item.data == "" { | |
760 | // Behaviour differs when there's no YAML. | |
761 | continue | |
627 | 762 | } |
628 | } | |
763 | t := reflect.ValueOf(item.value).Type() | |
764 | value := reflect.New(t) | |
765 | err := yaml.NewDecoder(strings.NewReader(item.data)).Decode(value.Interface()) | |
766 | if _, ok := err.(*yaml.TypeError); !ok { | |
767 | c.Assert(err, IsNil) | |
768 | } | |
769 | c.Assert(value.Elem().Interface(), DeepEquals, item.value) | |
770 | } | |
771 | } | |
772 | ||
773 | var decoderTests = []struct { | |
774 | data string | |
775 | values []interface{} | |
776 | }{{ | |
777 | "", | |
778 | nil, | |
779 | }, { | |
780 | "a: b", | |
781 | []interface{}{ | |
782 | map[interface{}]interface{}{"a": "b"}, | |
783 | }, | |
784 | }, { | |
785 | "---\na: b\n...\n", | |
786 | []interface{}{ | |
787 | map[interface{}]interface{}{"a": "b"}, | |
788 | }, | |
789 | }, { | |
790 | "---\n'hello'\n...\n---\ngoodbye\n...\n", | |
791 | []interface{}{ | |
792 | "hello", | |
793 | "goodbye", | |
794 | }, | |
795 | }} | |
796 | ||
797 | func (s *S) TestDecoder(c *C) { | |
798 | for i, item := range decoderTests { | |
799 | c.Logf("test %d: %q", i, item.data) | |
800 | var values []interface{} | |
801 | dec := yaml.NewDecoder(strings.NewReader(item.data)) | |
802 | for { | |
803 | var value interface{} | |
804 | err := dec.Decode(&value) | |
805 | if err == io.EOF { | |
806 | break | |
807 | } | |
808 | c.Assert(err, IsNil) | |
809 | values = append(values, value) | |
810 | } | |
811 | c.Assert(values, DeepEquals, item.values) | |
812 | } | |
813 | } | |
814 | ||
815 | type errReader struct{} | |
816 | ||
817 | func (errReader) Read([]byte) (int, error) { | |
818 | return 0, errors.New("some read error") | |
819 | } | |
820 | ||
821 | func (s *S) TestDecoderReadError(c *C) { | |
822 | err := yaml.NewDecoder(errReader{}).Decode(&struct{}{}) | |
823 | c.Assert(err, ErrorMatches, `yaml: input error: some read error`) | |
629 | 824 | } |
630 | 825 | |
631 | 826 | func (s *S) TestUnmarshalNaN(c *C) { |
641 | 836 | {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, |
642 | 837 | {"v: [A,", "yaml: line 1: did not find expected node content"}, |
643 | 838 | {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, |
839 | {"a:\n- b: *,", "yaml: line 2: did not find expected alphabetic or numeric character"}, | |
644 | 840 | {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, |
645 | 841 | {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, |
646 | 842 | {"value: -", "yaml: block sequence entries are not allowed in this context"}, |
647 | 843 | {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, |
648 | 844 | {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, |
649 | 845 | {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, |
846 | {"b: *a\na: &a {c: 1}", `yaml: unknown anchor 'a' referenced`}, | |
847 | {"%TAG !%79! tag:yaml.org,2002:\n---\nv: !%79!int '1'", "yaml: did not find expected whitespace"}, | |
650 | 848 | } |
651 | 849 | |
652 | 850 | func (s *S) TestUnmarshalErrors(c *C) { |
851 | for i, item := range unmarshalErrorTests { | |
852 | c.Logf("test %d: %q", i, item.data) | |
853 | var value interface{} | |
854 | err := yaml.Unmarshal([]byte(item.data), &value) | |
855 | c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) | |
856 | } | |
857 | } | |
858 | ||
859 | func (s *S) TestDecoderErrors(c *C) { | |
653 | 860 | for _, item := range unmarshalErrorTests { |
654 | 861 | var value interface{} |
655 | err := yaml.Unmarshal([]byte(item.data), &value) | |
862 | err := yaml.NewDecoder(strings.NewReader(item.data)).Decode(&value) | |
656 | 863 | c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) |
657 | 864 | } |
658 | 865 | } |
965 | 1172 | v := struct{ A []int }{[]int{1}} |
966 | 1173 | yaml.Unmarshal([]byte("a: [2]"), &v) |
967 | 1174 | c.Assert(v.A, DeepEquals, []int{2}) |
1175 | } | |
1176 | ||
1177 | var unmarshalStrictTests = []struct { | |
1178 | data string | |
1179 | value interface{} | |
1180 | error string | |
1181 | }{{ | |
1182 | data: "a: 1\nc: 2\n", | |
1183 | value: struct{ A, B int }{A: 1}, | |
1184 | error: `yaml: unmarshal errors:\n line 2: field c not found in type struct { A int; B int }`, | |
1185 | }, { | |
1186 | data: "a: 1\nb: 2\na: 3\n", | |
1187 | value: struct{ A, B int }{A: 3, B: 2}, | |
1188 | error: `yaml: unmarshal errors:\n line 3: field a already set in type struct { A int; B int }`, | |
1189 | }, { | |
1190 | data: "c: 3\na: 1\nb: 2\nc: 4\n", | |
1191 | value: struct { | |
1192 | A int | |
1193 | inlineB `yaml:",inline"` | |
1194 | }{ | |
1195 | A: 1, | |
1196 | inlineB: inlineB{ | |
1197 | B: 2, | |
1198 | inlineC: inlineC{ | |
1199 | C: 4, | |
1200 | }, | |
1201 | }, | |
1202 | }, | |
1203 | error: `yaml: unmarshal errors:\n line 4: field c already set in type struct { A int; yaml_test.inlineB "yaml:\\",inline\\"" }`, | |
1204 | }, { | |
1205 | data: "c: 0\na: 1\nb: 2\nc: 1\n", | |
1206 | value: struct { | |
1207 | A int | |
1208 | inlineB `yaml:",inline"` | |
1209 | }{ | |
1210 | A: 1, | |
1211 | inlineB: inlineB{ | |
1212 | B: 2, | |
1213 | inlineC: inlineC{ | |
1214 | C: 1, | |
1215 | }, | |
1216 | }, | |
1217 | }, | |
1218 | error: `yaml: unmarshal errors:\n line 4: field c already set in type struct { A int; yaml_test.inlineB "yaml:\\",inline\\"" }`, | |
1219 | }, { | |
1220 | data: "c: 1\na: 1\nb: 2\nc: 3\n", | |
1221 | value: struct { | |
1222 | A int | |
1223 | M map[string]interface{} `yaml:",inline"` | |
1224 | }{ | |
1225 | A: 1, | |
1226 | M: map[string]interface{}{ | |
1227 | "b": 2, | |
1228 | "c": 3, | |
1229 | }, | |
1230 | }, | |
1231 | error: `yaml: unmarshal errors:\n line 4: key "c" already set in map`, | |
1232 | }, { | |
1233 | data: "a: 1\n9: 2\nnull: 3\n9: 4", | |
1234 | value: map[interface{}]interface{}{ | |
1235 | "a": 1, | |
1236 | nil: 3, | |
1237 | 9: 4, | |
1238 | }, | |
1239 | error: `yaml: unmarshal errors:\n line 4: key 9 already set in map`, | |
1240 | }} | |
1241 | ||
1242 | func (s *S) TestUnmarshalStrict(c *C) { | |
1243 | for i, item := range unmarshalStrictTests { | |
1244 | c.Logf("test %d: %q", i, item.data) | |
1245 | // First test that normal Unmarshal unmarshals to the expected value. | |
1246 | t := reflect.ValueOf(item.value).Type() | |
1247 | value := reflect.New(t) | |
1248 | err := yaml.Unmarshal([]byte(item.data), value.Interface()) | |
1249 | c.Assert(err, Equals, nil) | |
1250 | c.Assert(value.Elem().Interface(), DeepEquals, item.value) | |
1251 | ||
1252 | // Then test that UnmarshalStrict fails on the same thing. | |
1253 | t = reflect.ValueOf(item.value).Type() | |
1254 | value = reflect.New(t) | |
1255 | err = yaml.UnmarshalStrict([]byte(item.data), value.Interface()) | |
1256 | c.Assert(err, ErrorMatches, item.error) | |
1257 | } | |
1258 | } | |
1259 | ||
1260 | type textUnmarshaler struct { | |
1261 | S string | |
1262 | } | |
1263 | ||
1264 | func (t *textUnmarshaler) UnmarshalText(s []byte) error { | |
1265 | t.S = string(s) | |
1266 | return nil | |
1267 | } | |
1268 | ||
1269 | func (s *S) TestFuzzCrashers(c *C) { | |
1270 | cases := []string{ | |
1271 | // runtime error: index out of range | |
1272 | "\"\\0\\\r\n", | |
1273 | ||
1274 | // should not happen | |
1275 | " 0: [\n] 0", | |
1276 | "? ? \"\n\" 0", | |
1277 | " - {\n000}0", | |
1278 | "0:\n 0: [0\n] 0", | |
1279 | " - \"\n000\"0", | |
1280 | " - \"\n000\"\"", | |
1281 | "0:\n - {\n000}0", | |
1282 | "0:\n - \"\n000\"0", | |
1283 | "0:\n - \"\n000\"\"", | |
1284 | ||
1285 | // runtime error: index out of range | |
1286 | " \ufeff\n", | |
1287 | "? \ufeff\n", | |
1288 | "? \ufeff:\n", | |
1289 | "0: \ufeff\n", | |
1290 | "? \ufeff: \ufeff\n", | |
1291 | } | |
1292 | for _, data := range cases { | |
1293 | var v interface{} | |
1294 | _ = yaml.Unmarshal([]byte(data), &v) | |
1295 | } | |
968 | 1296 | } |
969 | 1297 | |
970 | 1298 | //var data []byte |
1 | 1 | |
2 | 2 | import ( |
3 | 3 | "bytes" |
4 | "fmt" | |
4 | 5 | ) |
5 | 6 | |
6 | 7 | // Flush the buffer if needed. |
663 | 664 | return yaml_emitter_emit_mapping_start(emitter, event) |
664 | 665 | default: |
665 | 666 | return yaml_emitter_set_emitter_error(emitter, |
666 | "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") | |
667 | fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) | |
667 | 668 | } |
668 | 669 | } |
669 | 670 | |
841 | 842 | return true |
842 | 843 | } |
843 | 844 | |
844 | // Write an achor. | |
845 | // Write an anchor. | |
845 | 846 | func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { |
846 | 847 | if emitter.anchor_data.anchor == nil { |
847 | 848 | return true |
993 | 994 | break_space = false |
994 | 995 | space_break = false |
995 | 996 | |
996 | preceeded_by_whitespace = false | |
997 | followed_by_whitespace = false | |
998 | previous_space = false | |
999 | previous_break = false | |
997 | preceded_by_whitespace = false | |
998 | followed_by_whitespace = false | |
999 | previous_space = false | |
1000 | previous_break = false | |
1000 | 1001 | ) |
1001 | 1002 | |
1002 | 1003 | emitter.scalar_data.value = value |
1015 | 1016 | flow_indicators = true |
1016 | 1017 | } |
1017 | 1018 | |
1018 | preceeded_by_whitespace = true | |
1019 | preceded_by_whitespace = true | |
1019 | 1020 | for i, w := 0, 0; i < len(value); i += w { |
1020 | 1021 | w = width(value[i]) |
1021 | 1022 | followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) |
1046 | 1047 | block_indicators = true |
1047 | 1048 | } |
1048 | 1049 | case '#': |
1049 | if preceeded_by_whitespace { | |
1050 | if preceded_by_whitespace { | |
1050 | 1051 | flow_indicators = true |
1051 | 1052 | block_indicators = true |
1052 | 1053 | } |
1087 | 1088 | } |
1088 | 1089 | |
1089 | 1090 | // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. |
1090 | preceeded_by_whitespace = is_blankz(value, i) | |
1091 | preceded_by_whitespace = is_blankz(value, i) | |
1091 | 1092 | } |
1092 | 1093 | |
1093 | 1094 | emitter.scalar_data.multiline = line_breaks |
2 | 2 | import ( |
3 | 3 | "encoding" |
4 | 4 | "fmt" |
5 | "io" | |
5 | 6 | "reflect" |
6 | 7 | "regexp" |
7 | 8 | "sort" |
8 | 9 | "strconv" |
9 | 10 | "strings" |
10 | 11 | "time" |
12 | "unicode/utf8" | |
11 | 13 | ) |
12 | 14 | |
13 | 15 | type encoder struct { |
15 | 17 | event yaml_event_t |
16 | 18 | out []byte |
17 | 19 | flow bool |
18 | } | |
19 | ||
20 | func newEncoder() (e *encoder) { | |
21 | e = &encoder{} | |
22 | e.must(yaml_emitter_initialize(&e.emitter)) | |
20 | // doneInit holds whether the initial stream_start_event has been | |
21 | // emitted. | |
22 | doneInit bool | |
23 | } | |
24 | ||
25 | func newEncoder() *encoder { | |
26 | e := &encoder{} | |
27 | yaml_emitter_initialize(&e.emitter) | |
23 | 28 | yaml_emitter_set_output_string(&e.emitter, &e.out) |
24 | 29 | yaml_emitter_set_unicode(&e.emitter, true) |
25 | e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) | |
26 | e.emit() | |
27 | e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) | |
28 | e.emit() | |
29 | 30 | return e |
30 | 31 | } |
31 | 32 | |
33 | func newEncoderWithWriter(w io.Writer) *encoder { | |
34 | e := &encoder{} | |
35 | yaml_emitter_initialize(&e.emitter) | |
36 | yaml_emitter_set_output_writer(&e.emitter, w) | |
37 | yaml_emitter_set_unicode(&e.emitter, true) | |
38 | return e | |
39 | } | |
40 | ||
41 | func (e *encoder) init() { | |
42 | if e.doneInit { | |
43 | return | |
44 | } | |
45 | yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) | |
46 | e.emit() | |
47 | e.doneInit = true | |
48 | } | |
49 | ||
32 | 50 | func (e *encoder) finish() { |
33 | e.must(yaml_document_end_event_initialize(&e.event, true)) | |
34 | e.emit() | |
35 | 51 | e.emitter.open_ended = false |
36 | e.must(yaml_stream_end_event_initialize(&e.event)) | |
52 | yaml_stream_end_event_initialize(&e.event) | |
37 | 53 | e.emit() |
38 | 54 | } |
39 | 55 | |
43 | 59 | |
44 | 60 | func (e *encoder) emit() { |
45 | 61 | // This will internally delete the e.event value. |
46 | if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { | |
47 | e.must(false) | |
48 | } | |
62 | e.must(yaml_emitter_emit(&e.emitter, &e.event)) | |
49 | 63 | } |
50 | 64 | |
51 | 65 | func (e *encoder) must(ok bool) { |
58 | 72 | } |
59 | 73 | } |
60 | 74 | |
75 | func (e *encoder) marshalDoc(tag string, in reflect.Value) { | |
76 | e.init() | |
77 | yaml_document_start_event_initialize(&e.event, nil, nil, true) | |
78 | e.emit() | |
79 | e.marshal(tag, in) | |
80 | yaml_document_end_event_initialize(&e.event, true) | |
81 | e.emit() | |
82 | } | |
83 | ||
61 | 84 | func (e *encoder) marshal(tag string, in reflect.Value) { |
62 | if !in.IsValid() { | |
85 | if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { | |
63 | 86 | e.nilv() |
64 | 87 | return |
65 | 88 | } |
66 | 89 | iface := in.Interface() |
67 | if m, ok := iface.(Marshaler); ok { | |
90 | switch m := iface.(type) { | |
91 | case time.Time, *time.Time: | |
92 | // Although time.Time implements TextMarshaler, | |
93 | // we don't want to treat it as a string for YAML | |
94 | // purposes because YAML has special support for | |
95 | // timestamps. | |
96 | case Marshaler: | |
68 | 97 | v, err := m.MarshalYAML() |
69 | 98 | if err != nil { |
70 | 99 | fail(err) |
74 | 103 | return |
75 | 104 | } |
76 | 105 | in = reflect.ValueOf(v) |
77 | } else if m, ok := iface.(encoding.TextMarshaler); ok { | |
106 | case encoding.TextMarshaler: | |
78 | 107 | text, err := m.MarshalText() |
79 | 108 | if err != nil { |
80 | 109 | fail(err) |
81 | 110 | } |
82 | 111 | in = reflect.ValueOf(string(text)) |
112 | case nil: | |
113 | e.nilv() | |
114 | return | |
83 | 115 | } |
84 | 116 | switch in.Kind() { |
85 | 117 | case reflect.Interface: |
86 | if in.IsNil() { | |
87 | e.nilv() | |
88 | } else { | |
89 | e.marshal(tag, in.Elem()) | |
90 | } | |
118 | e.marshal(tag, in.Elem()) | |
91 | 119 | case reflect.Map: |
92 | 120 | e.mapv(tag, in) |
93 | 121 | case reflect.Ptr: |
94 | if in.IsNil() { | |
95 | e.nilv() | |
122 | if in.Type() == ptrTimeType { | |
123 | e.timev(tag, in.Elem()) | |
96 | 124 | } else { |
97 | 125 | e.marshal(tag, in.Elem()) |
98 | 126 | } |
99 | 127 | case reflect.Struct: |
100 | e.structv(tag, in) | |
101 | case reflect.Slice: | |
128 | if in.Type() == timeType { | |
129 | e.timev(tag, in) | |
130 | } else { | |
131 | e.structv(tag, in) | |
132 | } | |
133 | case reflect.Slice, reflect.Array: | |
102 | 134 | if in.Type().Elem() == mapItemType { |
103 | 135 | e.itemsv(tag, in) |
104 | 136 | } else { |
190 | 222 | e.flow = false |
191 | 223 | style = yaml_FLOW_MAPPING_STYLE |
192 | 224 | } |
193 | e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) | |
225 | yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) | |
194 | 226 | e.emit() |
195 | 227 | f() |
196 | e.must(yaml_mapping_end_event_initialize(&e.event)) | |
228 | yaml_mapping_end_event_initialize(&e.event) | |
197 | 229 | e.emit() |
198 | 230 | } |
199 | 231 | |
239 | 271 | func (e *encoder) stringv(tag string, in reflect.Value) { |
240 | 272 | var style yaml_scalar_style_t |
241 | 273 | s := in.String() |
242 | rtag, rs := resolve("", s) | |
243 | if rtag == yaml_BINARY_TAG { | |
244 | if tag == "" || tag == yaml_STR_TAG { | |
245 | tag = rtag | |
246 | s = rs.(string) | |
247 | } else if tag == yaml_BINARY_TAG { | |
274 | canUsePlain := true | |
275 | switch { | |
276 | case !utf8.ValidString(s): | |
277 | if tag == yaml_BINARY_TAG { | |
248 | 278 | failf("explicitly tagged !!binary data must be base64-encoded") |
249 | } else { | |
279 | } | |
280 | if tag != "" { | |
250 | 281 | failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) |
251 | 282 | } |
252 | } | |
253 | if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { | |
283 | // It can't be encoded directly as YAML so use a binary tag | |
284 | // and encode it as base64. | |
285 | tag = yaml_BINARY_TAG | |
286 | s = encodeBase64(s) | |
287 | case tag == "": | |
288 | // Check to see if it would resolve to a specific | |
289 | // tag when encoded unquoted. If it doesn't, | |
290 | // there's no need to quote it. | |
291 | rtag, _ := resolve("", s) | |
292 | canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) | |
293 | } | |
294 | // Note: it's possible for user code to emit invalid YAML | |
295 | // if they explicitly specify a tag and a string containing | |
296 | // text that's incompatible with that tag. | |
297 | switch { | |
298 | case strings.Contains(s, "\n"): | |
299 | style = yaml_LITERAL_SCALAR_STYLE | |
300 | case canUsePlain: | |
301 | style = yaml_PLAIN_SCALAR_STYLE | |
302 | default: | |
254 | 303 | style = yaml_DOUBLE_QUOTED_SCALAR_STYLE |
255 | } else if strings.Contains(s, "\n") { | |
256 | style = yaml_LITERAL_SCALAR_STYLE | |
257 | } else { | |
258 | style = yaml_PLAIN_SCALAR_STYLE | |
259 | 304 | } |
260 | 305 | e.emitScalar(s, "", tag, style) |
261 | 306 | } |
280 | 325 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) |
281 | 326 | } |
282 | 327 | |
328 | func (e *encoder) timev(tag string, in reflect.Value) { | |
329 | t := in.Interface().(time.Time) | |
330 | s := t.Format(time.RFC3339Nano) | |
331 | e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) | |
332 | } | |
333 | ||
283 | 334 | func (e *encoder) floatv(tag string, in reflect.Value) { |
284 | // FIXME: Handle 64 bits here. | |
285 | s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) | |
335 | // Issue #352: When formatting, use the precision of the underlying value | |
336 | precision := 64 | |
337 | if in.Kind() == reflect.Float32 { | |
338 | precision = 32 | |
339 | } | |
340 | ||
341 | s := strconv.FormatFloat(in.Float(), 'g', -1, precision) | |
286 | 342 | switch s { |
287 | 343 | case "+Inf": |
288 | 344 | s = ".inf" |
0 | 0 | package yaml_test |
1 | 1 | |
2 | 2 | import ( |
3 | "bytes" | |
3 | 4 | "fmt" |
4 | 5 | "math" |
5 | 6 | "strconv" |
6 | 7 | "strings" |
7 | 8 | "time" |
8 | 9 | |
10 | "net" | |
11 | "os" | |
12 | ||
9 | 13 | . "gopkg.in/check.v1" |
10 | 14 | "gopkg.in/yaml.v2" |
11 | "net" | |
12 | "os" | |
13 | 15 | ) |
14 | 16 | |
15 | 17 | var marshalIntTest = 123 |
22 | 24 | nil, |
23 | 25 | "null\n", |
24 | 26 | }, { |
27 | (*marshalerType)(nil), | |
28 | "null\n", | |
29 | }, { | |
25 | 30 | &struct{}{}, |
26 | 31 | "{}\n", |
27 | 32 | }, { |
69 | 74 | }, { |
70 | 75 | map[string]interface{}{"v": float64(0.1)}, |
71 | 76 | "v: 0.1\n", |
77 | }, { | |
78 | map[string]interface{}{"v": float32(0.99)}, | |
79 | "v: 0.99\n", | |
72 | 80 | }, { |
73 | 81 | map[string]interface{}{"v": -0.1}, |
74 | 82 | "v: -0.1\n", |
142 | 150 | &struct{ A []int }{[]int{1, 2}}, |
143 | 151 | "a:\n- 1\n- 2\n", |
144 | 152 | }, { |
153 | &struct{ A [2]int }{[2]int{1, 2}}, | |
154 | "a:\n- 1\n- 2\n", | |
155 | }, { | |
145 | 156 | &struct { |
146 | 157 | B int "a" |
147 | 158 | }{1}, |
195 | 206 | B float64 "b,omitempty" |
196 | 207 | }{1, 0}, |
197 | 208 | "a: 1\n", |
209 | }, | |
210 | { | |
211 | &struct { | |
212 | T1 time.Time "t1,omitempty" | |
213 | T2 time.Time "t2,omitempty" | |
214 | T3 *time.Time "t3,omitempty" | |
215 | T4 *time.Time "t4,omitempty" | |
216 | }{ | |
217 | T2: time.Date(2018, 1, 9, 10, 40, 47, 0, time.UTC), | |
218 | T4: newTime(time.Date(2098, 1, 9, 10, 40, 47, 0, time.UTC)), | |
219 | }, | |
220 | "t2: 2018-01-09T10:40:47Z\nt4: 2098-01-09T10:40:47Z\n", | |
221 | }, | |
222 | // Nil interface that implements Marshaler. | |
223 | { | |
224 | map[string]yaml.Marshaler{ | |
225 | "a": nil, | |
226 | }, | |
227 | "a: null\n", | |
198 | 228 | }, |
199 | 229 | |
200 | 230 | // Flow flag |
301 | 331 | map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, |
302 | 332 | "a: 1.2.3.4\n", |
303 | 333 | }, |
304 | { | |
305 | map[string]time.Time{"a": time.Unix(1424801979, 0)}, | |
334 | // time.Time gets a timestamp tag. | |
335 | { | |
336 | map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC)}, | |
306 | 337 | "a: 2015-02-24T18:19:39Z\n", |
338 | }, | |
339 | { | |
340 | map[string]*time.Time{"a": newTime(time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC))}, | |
341 | "a: 2015-02-24T18:19:39Z\n", | |
342 | }, | |
343 | { | |
344 | // This is confirmed to be properly decoded in Python (libyaml) without a timestamp tag. | |
345 | map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 123456789, time.FixedZone("FOO", -3*60*60))}, | |
346 | "a: 2015-02-24T18:19:39.123456789-03:00\n", | |
347 | }, | |
348 | // Ensure timestamp-like strings are quoted. | |
349 | { | |
350 | map[string]string{"a": "2015-02-24T18:19:39Z"}, | |
351 | "a: \"2015-02-24T18:19:39Z\"\n", | |
307 | 352 | }, |
308 | 353 | |
309 | 354 | // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). |
326 | 371 | func (s *S) TestMarshal(c *C) { |
327 | 372 | defer os.Setenv("TZ", os.Getenv("TZ")) |
328 | 373 | os.Setenv("TZ", "UTC") |
329 | for _, item := range marshalTests { | |
374 | for i, item := range marshalTests { | |
375 | c.Logf("test %d: %q", i, item.data) | |
330 | 376 | data, err := yaml.Marshal(item.value) |
331 | 377 | c.Assert(err, IsNil) |
332 | 378 | c.Assert(string(data), Equals, item.data) |
333 | 379 | } |
380 | } | |
381 | ||
382 | func (s *S) TestEncoderSingleDocument(c *C) { | |
383 | for i, item := range marshalTests { | |
384 | c.Logf("test %d. %q", i, item.data) | |
385 | var buf bytes.Buffer | |
386 | enc := yaml.NewEncoder(&buf) | |
387 | err := enc.Encode(item.value) | |
388 | c.Assert(err, Equals, nil) | |
389 | err = enc.Close() | |
390 | c.Assert(err, Equals, nil) | |
391 | c.Assert(buf.String(), Equals, item.data) | |
392 | } | |
393 | } | |
394 | ||
395 | func (s *S) TestEncoderMultipleDocuments(c *C) { | |
396 | var buf bytes.Buffer | |
397 | enc := yaml.NewEncoder(&buf) | |
398 | err := enc.Encode(map[string]string{"a": "b"}) | |
399 | c.Assert(err, Equals, nil) | |
400 | err = enc.Encode(map[string]string{"c": "d"}) | |
401 | c.Assert(err, Equals, nil) | |
402 | err = enc.Close() | |
403 | c.Assert(err, Equals, nil) | |
404 | c.Assert(buf.String(), Equals, "a: b\n---\nc: d\n") | |
405 | } | |
406 | ||
407 | func (s *S) TestEncoderWriteError(c *C) { | |
408 | enc := yaml.NewEncoder(errorWriter{}) | |
409 | err := enc.Encode(map[string]string{"a": "b"}) | |
410 | c.Assert(err, ErrorMatches, `yaml: write error: some write error`) // Data not flushed yet | |
411 | } | |
412 | ||
413 | type errorWriter struct{} | |
414 | ||
415 | func (errorWriter) Write([]byte) (int, error) { | |
416 | return 0, fmt.Errorf("some write error") | |
334 | 417 | } |
335 | 418 | |
336 | 419 | var marshalErrorTests = []struct { |
454 | 537 | "1", |
455 | 538 | "2", |
456 | 539 | "a!10", |
457 | "a/2", | |
540 | "a/0001", | |
541 | "a/002", | |
542 | "a/3", | |
458 | 543 | "a/10", |
544 | "a/11", | |
545 | "a/0012", | |
546 | "a/100", | |
459 | 547 | "a~10", |
460 | 548 | "ab/1", |
461 | 549 | "b/1", |
470 | 558 | "c2.10", |
471 | 559 | "c10.2", |
472 | 560 | "d1", |
561 | "d7", | |
562 | "d7abc", | |
473 | 563 | "d12", |
474 | 564 | "d12a", |
475 | 565 | } |
498 | 588 | last = index |
499 | 589 | } |
500 | 590 | } |
591 | ||
592 | func newTime(t time.Time) *time.Time { | |
593 | return &t | |
594 | } |
0 | package yaml_test | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "log" | |
5 | ||
6 | "gopkg.in/yaml.v2" | |
7 | ) | |
8 | ||
9 | // An example showing how to unmarshal embedded | |
10 | // structs from YAML. | |
11 | ||
12 | type StructA struct { | |
13 | A string `yaml:"a"` | |
14 | } | |
15 | ||
16 | type StructB struct { | |
17 | // Embedded structs are not treated as embedded in YAML by default. To do that, | |
18 | // add the ",inline" annotation below | |
19 | StructA `yaml:",inline"` | |
20 | B string `yaml:"b"` | |
21 | } | |
22 | ||
23 | var data = ` | |
24 | a: a string from struct A | |
25 | b: a string from struct B | |
26 | ` | |
27 | ||
28 | func ExampleUnmarshal_embedded() { | |
29 | var b StructB | |
30 | ||
31 | err := yaml.Unmarshal([]byte(data), &b) | |
32 | if err != nil { | |
33 | log.Fatalf("cannot unmarshal data: %v", err) | |
34 | } | |
35 | fmt.Println(b.A) | |
36 | fmt.Println(b.B) | |
37 | // Output: | |
38 | // a string from struct A | |
39 | // a string from struct B | |
40 | } |
0 | module "gopkg.in/yaml.v2" | |
1 | ||
2 | require ( | |
3 | "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 | |
4 | ) |
92 | 92 | panic("read handler must be set") |
93 | 93 | } |
94 | 94 | |
95 | // [Go] This function was changed to guarantee the requested length size at EOF. | |
96 | // The fact we need to do this is pretty awful, but the description above implies | |
97 | // for that to be the case, and there are tests | |
98 | ||
95 | 99 | // If the EOF flag is set and the raw buffer is empty, do nothing. |
96 | 100 | if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { |
97 | return true | |
101 | // [Go] ACTUALLY! Read the documentation of this function above. | |
102 | // This is just broken. To return true, we need to have the | |
103 | // given length in the buffer. Not doing that means every single | |
104 | // check that calls this function to make sure the buffer has a | |
105 | // given length is Go) panicking; or C) accessing invalid memory. | |
106 | //return true | |
98 | 107 | } |
99 | 108 | |
100 | 109 | // Return if the buffer contains enough characters. |
388 | 397 | break |
389 | 398 | } |
390 | 399 | } |
400 | // [Go] Read the documentation of this function above. To return true, | |
401 | // we need to have the given length in the buffer. Not doing that means | |
402 | // every single check that calls this function to make sure the buffer | |
403 | // has a given length is Go) panicking; or C) accessing invalid memory. | |
404 | // This happens here due to the EOF above breaking early. | |
405 | for buffer_len < length { | |
406 | parser.buffer[buffer_len] = 0 | |
407 | buffer_len++ | |
408 | } | |
391 | 409 | parser.buffer = parser.buffer[:buffer_len] |
392 | 410 | return true |
393 | 411 | } |
5 | 5 | "regexp" |
6 | 6 | "strconv" |
7 | 7 | "strings" |
8 | "unicode/utf8" | |
8 | "time" | |
9 | 9 | ) |
10 | 10 | |
11 | 11 | type resolveMapItem struct { |
74 | 74 | |
75 | 75 | func resolvableTag(tag string) bool { |
76 | 76 | switch tag { |
77 | case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: | |
77 | case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: | |
78 | 78 | return true |
79 | 79 | } |
80 | 80 | return false |
91 | 91 | switch tag { |
92 | 92 | case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: |
93 | 93 | return |
94 | case yaml_FLOAT_TAG: | |
95 | if rtag == yaml_INT_TAG { | |
96 | switch v := out.(type) { | |
97 | case int64: | |
98 | rtag = yaml_FLOAT_TAG | |
99 | out = float64(v) | |
100 | return | |
101 | case int: | |
102 | rtag = yaml_FLOAT_TAG | |
103 | out = float64(v) | |
104 | return | |
105 | } | |
106 | } | |
94 | 107 | } |
95 | 108 | failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) |
96 | 109 | }() |
124 | 137 | |
125 | 138 | case 'D', 'S': |
126 | 139 | // Int, float, or timestamp. |
140 | // Only try values as a timestamp if the value is unquoted or there's an explicit | |
141 | // !!timestamp tag. | |
142 | if tag == "" || tag == yaml_TIMESTAMP_TAG { | |
143 | t, ok := parseTimestamp(in) | |
144 | if ok { | |
145 | return yaml_TIMESTAMP_TAG, t | |
146 | } | |
147 | } | |
148 | ||
127 | 149 | plain := strings.Replace(in, "_", "", -1) |
128 | 150 | intv, err := strconv.ParseInt(plain, 0, 64) |
129 | 151 | if err == nil { |
157 | 179 | return yaml_INT_TAG, uintv |
158 | 180 | } |
159 | 181 | } else if strings.HasPrefix(plain, "-0b") { |
160 | intv, err := strconv.ParseInt(plain[3:], 2, 64) | |
161 | if err == nil { | |
162 | if intv == int64(int(intv)) { | |
163 | return yaml_INT_TAG, -int(intv) | |
182 | intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) | |
183 | if err == nil { | |
184 | if true || intv == int64(int(intv)) { | |
185 | return yaml_INT_TAG, int(intv) | |
164 | 186 | } else { |
165 | return yaml_INT_TAG, -intv | |
187 | return yaml_INT_TAG, intv | |
166 | 188 | } |
167 | 189 | } |
168 | 190 | } |
169 | // XXX Handle timestamps here. | |
170 | ||
171 | 191 | default: |
172 | 192 | panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") |
173 | 193 | } |
174 | 194 | } |
175 | if tag == yaml_BINARY_TAG { | |
176 | return yaml_BINARY_TAG, in | |
177 | } | |
178 | if utf8.ValidString(in) { | |
179 | return yaml_STR_TAG, in | |
180 | } | |
181 | return yaml_BINARY_TAG, encodeBase64(in) | |
195 | return yaml_STR_TAG, in | |
182 | 196 | } |
183 | 197 | |
184 | 198 | // encodeBase64 encodes s as base64 that is broken up into multiple lines |
205 | 219 | } |
206 | 220 | return string(out[:k]) |
207 | 221 | } |
222 | ||
223 | // This is a subset of the formats allowed by the regular expression | |
224 | // defined at http://yaml.org/type/timestamp.html. | |
225 | var allowedTimestampFormats = []string{ | |
226 | "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. | |
227 | "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". | |
228 | "2006-1-2 15:4:5.999999999", // space separated with no time zone | |
229 | "2006-1-2", // date only | |
230 | // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" | |
231 | // from the set of examples. | |
232 | } | |
233 | ||
234 | // parseTimestamp parses s as a timestamp string and | |
235 | // returns the timestamp and reports whether it succeeded. | |
236 | // Timestamp formats are defined at http://yaml.org/type/timestamp.html | |
237 | func parseTimestamp(s string) (time.Time, bool) { | |
238 | // TODO write code to check all the formats supported by | |
239 | // http://yaml.org/type/timestamp.html instead of using time.Parse. | |
240 | ||
241 | // Quick check: all date formats start with YYYY-. | |
242 | i := 0 | |
243 | for ; i < len(s); i++ { | |
244 | if c := s[i]; c < '0' || c > '9' { | |
245 | break | |
246 | } | |
247 | } | |
248 | if i != 4 || i == len(s) || s[i] != '-' { | |
249 | return time.Time{}, false | |
250 | } | |
251 | for _, format := range allowedTimestampFormats { | |
252 | if t, err := time.Parse(format, s); err == nil { | |
253 | return t, true | |
254 | } | |
255 | } | |
256 | return time.Time{}, false | |
257 | } |
610 | 610 | if directive { |
611 | 611 | context = "while parsing a %TAG directive" |
612 | 612 | } |
613 | return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") | |
613 | return yaml_parser_set_scanner_error(parser, context, context_mark, problem) | |
614 | 614 | } |
615 | 615 | |
616 | 616 | func trace(args ...interface{}) func() { |
870 | 870 | |
871 | 871 | required := parser.flow_level == 0 && parser.indent == parser.mark.column |
872 | 872 | |
873 | // A simple key is required only when it is the first token in the current | |
874 | // line. Therefore it is always allowed. But we add a check anyway. | |
875 | if required && !parser.simple_key_allowed { | |
876 | panic("should not happen") | |
877 | } | |
878 | ||
879 | 873 | // |
880 | 874 | // If the current position may start a simple key, save it. |
881 | 875 | // |
1943 | 1937 | } else { |
1944 | 1938 | // It's either the '!' tag or not really a tag handle. If it's a %TAG |
1945 | 1939 | // directive, it's an error. If it's a tag token, it must be a part of URI. |
1946 | if directive && !(s[0] == '!' && s[1] == 0) { | |
1940 | if directive && string(s) != "!" { | |
1947 | 1941 | yaml_parser_set_scanner_tag_error(parser, directive, |
1948 | 1942 | start_mark, "did not find expected '!'") |
1949 | 1943 | return false |
1958 | 1952 | func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { |
1959 | 1953 | //size_t length = head ? strlen((char *)head) : 0 |
1960 | 1954 | var s []byte |
1955 | hasTag := len(head) > 0 | |
1961 | 1956 | |
1962 | 1957 | // Copy the head if needed. |
1963 | 1958 | // |
1999 | 1994 | if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { |
2000 | 1995 | return false |
2001 | 1996 | } |
2002 | } | |
2003 | ||
2004 | // Check if the tag is non-empty. | |
2005 | if len(s) == 0 { | |
1997 | hasTag = true | |
1998 | } | |
1999 | ||
2000 | if !hasTag { | |
2006 | 2001 | yaml_parser_set_scanner_tag_error(parser, directive, |
2007 | 2002 | start_mark, "did not find expected tag URI") |
2008 | 2003 | return false |
2473 | 2468 | } |
2474 | 2469 | } |
2475 | 2470 | |
2471 | if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { | |
2472 | return false | |
2473 | } | |
2474 | ||
2476 | 2475 | // Check if we are at the end of the scalar. |
2477 | 2476 | if single { |
2478 | 2477 | if parser.buffer[parser.buffer_pos] == '\'' { |
2485 | 2484 | } |
2486 | 2485 | |
2487 | 2486 | // Consume blank characters. |
2488 | if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { | |
2489 | return false | |
2490 | } | |
2491 | ||
2492 | 2487 | for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { |
2493 | 2488 | if is_blank(parser.buffer, parser.buffer_pos) { |
2494 | 2489 | // Consume a space or a tab character. |
2590 | 2585 | // Consume non-blank characters. |
2591 | 2586 | for !is_blankz(parser.buffer, parser.buffer_pos) { |
2592 | 2587 | |
2593 | // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". | |
2594 | if parser.flow_level > 0 && | |
2595 | parser.buffer[parser.buffer_pos] == ':' && | |
2596 | !is_blankz(parser.buffer, parser.buffer_pos+1) { | |
2597 | yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", | |
2598 | start_mark, "found unexpected ':'") | |
2599 | return false | |
2600 | } | |
2601 | ||
2602 | 2588 | // Check for indicators that may end a plain scalar. |
2603 | 2589 | if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || |
2604 | 2590 | (parser.flow_level > 0 && |
2605 | (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || | |
2591 | (parser.buffer[parser.buffer_pos] == ',' || | |
2606 | 2592 | parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || |
2607 | 2593 | parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || |
2608 | 2594 | parser.buffer[parser.buffer_pos] == '}')) { |
2654 | 2640 | for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { |
2655 | 2641 | if is_blank(parser.buffer, parser.buffer_pos) { |
2656 | 2642 | |
2657 | // Check for tab character that abuse indentation. | |
2643 | // Check for tab characters that abuse indentation. | |
2658 | 2644 | if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { |
2659 | 2645 | yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", |
2660 | start_mark, "found a tab character that violate indentation") | |
2646 | start_mark, "found a tab character that violates indentation") | |
2661 | 2647 | return false |
2662 | 2648 | } |
2663 | 2649 |
50 | 50 | } |
51 | 51 | var ai, bi int |
52 | 52 | var an, bn int64 |
53 | if ar[i] == '0' || br[i] == '0' { | |
54 | for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { | |
55 | if ar[j] != '0' { | |
56 | an = 1 | |
57 | bn = 1 | |
58 | break | |
59 | } | |
60 | } | |
61 | } | |
53 | 62 | for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { |
54 | 63 | an = an*10 + int64(ar[ai]-'0') |
55 | 64 | } |
17 | 17 | return true |
18 | 18 | } |
19 | 19 | |
20 | // If the output encoding is UTF-8, we don't need to recode the buffer. | |
21 | if emitter.encoding == yaml_UTF8_ENCODING { | |
22 | if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { | |
23 | return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) | |
24 | } | |
25 | emitter.buffer_pos = 0 | |
26 | return true | |
27 | } | |
28 | ||
29 | // Recode the buffer into the raw buffer. | |
30 | var low, high int | |
31 | if emitter.encoding == yaml_UTF16LE_ENCODING { | |
32 | low, high = 0, 1 | |
33 | } else { | |
34 | high, low = 1, 0 | |
35 | } | |
36 | ||
37 | pos := 0 | |
38 | for pos < emitter.buffer_pos { | |
39 | // See the "reader.c" code for more details on UTF-8 encoding. Note | |
40 | // that we assume that the buffer contains a valid UTF-8 sequence. | |
41 | ||
42 | // Read the next UTF-8 character. | |
43 | octet := emitter.buffer[pos] | |
44 | ||
45 | var w int | |
46 | var value rune | |
47 | switch { | |
48 | case octet&0x80 == 0x00: | |
49 | w, value = 1, rune(octet&0x7F) | |
50 | case octet&0xE0 == 0xC0: | |
51 | w, value = 2, rune(octet&0x1F) | |
52 | case octet&0xF0 == 0xE0: | |
53 | w, value = 3, rune(octet&0x0F) | |
54 | case octet&0xF8 == 0xF0: | |
55 | w, value = 4, rune(octet&0x07) | |
56 | } | |
57 | for k := 1; k < w; k++ { | |
58 | octet = emitter.buffer[pos+k] | |
59 | value = (value << 6) + (rune(octet) & 0x3F) | |
60 | } | |
61 | pos += w | |
62 | ||
63 | // Write the character. | |
64 | if value < 0x10000 { | |
65 | var b [2]byte | |
66 | b[high] = byte(value >> 8) | |
67 | b[low] = byte(value & 0xFF) | |
68 | emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) | |
69 | } else { | |
70 | // Write the character using a surrogate pair (check "reader.c"). | |
71 | var b [4]byte | |
72 | value -= 0x10000 | |
73 | b[high] = byte(0xD8 + (value >> 18)) | |
74 | b[low] = byte((value >> 10) & 0xFF) | |
75 | b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) | |
76 | b[low+2] = byte(value & 0xFF) | |
77 | emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) | |
78 | } | |
79 | } | |
80 | ||
81 | // Write the raw buffer. | |
82 | if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { | |
20 | if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { | |
83 | 21 | return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) |
84 | 22 | } |
85 | 23 | emitter.buffer_pos = 0 |
86 | emitter.raw_buffer = emitter.raw_buffer[:0] | |
87 | 24 | return true |
88 | 25 | } |
8 | 8 | import ( |
9 | 9 | "errors" |
10 | 10 | "fmt" |
11 | "io" | |
11 | 12 | "reflect" |
12 | 13 | "strings" |
13 | 14 | "sync" |
76 | 77 | // supported tag options. |
77 | 78 | // |
78 | 79 | func Unmarshal(in []byte, out interface{}) (err error) { |
80 | return unmarshal(in, out, false) | |
81 | } | |
82 | ||
83 | // UnmarshalStrict is like Unmarshal except that any fields that are found | |
84 | // in the data that do not have corresponding struct members, or mapping | |
85 | // keys that are duplicates, will result in | |
86 | // an error. | |
87 | func UnmarshalStrict(in []byte, out interface{}) (err error) { | |
88 | return unmarshal(in, out, true) | |
89 | } | |
90 | ||
91 | // A Decorder reads and decodes YAML values from an input stream. | |
92 | type Decoder struct { | |
93 | strict bool | |
94 | parser *parser | |
95 | } | |
96 | ||
97 | // NewDecoder returns a new decoder that reads from r. | |
98 | // | |
99 | // The decoder introduces its own buffering and may read | |
100 | // data from r beyond the YAML values requested. | |
101 | func NewDecoder(r io.Reader) *Decoder { | |
102 | return &Decoder{ | |
103 | parser: newParserFromReader(r), | |
104 | } | |
105 | } | |
106 | ||
107 | // SetStrict sets whether strict decoding behaviour is enabled when | |
108 | // decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. | |
109 | func (dec *Decoder) SetStrict(strict bool) { | |
110 | dec.strict = strict | |
111 | } | |
112 | ||
113 | // Decode reads the next YAML-encoded value from its input | |
114 | // and stores it in the value pointed to by v. | |
115 | // | |
116 | // See the documentation for Unmarshal for details about the | |
117 | // conversion of YAML into a Go value. | |
118 | func (dec *Decoder) Decode(v interface{}) (err error) { | |
119 | d := newDecoder(dec.strict) | |
79 | 120 | defer handleErr(&err) |
80 | d := newDecoder() | |
121 | node := dec.parser.parse() | |
122 | if node == nil { | |
123 | return io.EOF | |
124 | } | |
125 | out := reflect.ValueOf(v) | |
126 | if out.Kind() == reflect.Ptr && !out.IsNil() { | |
127 | out = out.Elem() | |
128 | } | |
129 | d.unmarshal(node, out) | |
130 | if len(d.terrors) > 0 { | |
131 | return &TypeError{d.terrors} | |
132 | } | |
133 | return nil | |
134 | } | |
135 | ||
136 | func unmarshal(in []byte, out interface{}, strict bool) (err error) { | |
137 | defer handleErr(&err) | |
138 | d := newDecoder(strict) | |
81 | 139 | p := newParser(in) |
82 | 140 | defer p.destroy() |
83 | 141 | node := p.parse() |
98 | 156 | // of the generated document will reflect the structure of the value itself. |
99 | 157 | // Maps and pointers (to struct, string, int, etc) are accepted as the in value. |
100 | 158 | // |
101 | // Struct fields are only unmarshalled if they are exported (have an upper case | |
102 | // first letter), and are unmarshalled using the field name lowercased as the | |
159 | // Struct fields are only marshalled if they are exported (have an upper case | |
160 | // first letter), and are marshalled using the field name lowercased as the | |
103 | 161 | // default key. Custom keys may be defined via the "yaml" name in the field |
104 | 162 | // tag: the content preceding the first comma is used as the key, and the |
105 | 163 | // following comma-separated options are used to tweak the marshalling process. |
113 | 171 | // |
114 | 172 | // omitempty Only include the field if it's not set to the zero |
115 | 173 | // value for the type or to empty slices or maps. |
116 | // Does not apply to zero valued structs. | |
174 | // Zero valued structs will be omitted if all their public | |
175 | // fields are zero, unless they implement an IsZero | |
176 | // method (see the IsZeroer interface type), in which | |
177 | // case the field will be included if that method returns true. | |
117 | 178 | // |
118 | 179 | // flow Marshal using a flow style (useful for structs, |
119 | 180 | // sequences and maps). |
128 | 189 | // For example: |
129 | 190 | // |
130 | 191 | // type T struct { |
131 | // F int "a,omitempty" | |
192 | // F int `yaml:"a,omitempty"` | |
132 | 193 | // B int |
133 | 194 | // } |
134 | 195 | // yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" |
138 | 199 | defer handleErr(&err) |
139 | 200 | e := newEncoder() |
140 | 201 | defer e.destroy() |
141 | e.marshal("", reflect.ValueOf(in)) | |
202 | e.marshalDoc("", reflect.ValueOf(in)) | |
142 | 203 | e.finish() |
143 | 204 | out = e.out |
144 | 205 | return |
206 | } | |
207 | ||
208 | // An Encoder writes YAML values to an output stream. | |
209 | type Encoder struct { | |
210 | encoder *encoder | |
211 | } | |
212 | ||
213 | // NewEncoder returns a new encoder that writes to w. | |
214 | // The Encoder should be closed after use to flush all data | |
215 | // to w. | |
216 | func NewEncoder(w io.Writer) *Encoder { | |
217 | return &Encoder{ | |
218 | encoder: newEncoderWithWriter(w), | |
219 | } | |
220 | } | |
221 | ||
222 | // Encode writes the YAML encoding of v to the stream. | |
223 | // If multiple items are encoded to the stream, the | |
224 | // second and subsequent document will be preceded | |
225 | // with a "---" document separator, but the first will not. | |
226 | // | |
227 | // See the documentation for Marshal for details about the conversion of Go | |
228 | // values to YAML. | |
229 | func (e *Encoder) Encode(v interface{}) (err error) { | |
230 | defer handleErr(&err) | |
231 | e.encoder.marshalDoc("", reflect.ValueOf(v)) | |
232 | return nil | |
233 | } | |
234 | ||
235 | // Close closes the encoder by writing any remaining data. | |
236 | // It does not write a stream terminating string "...". | |
237 | func (e *Encoder) Close() (err error) { | |
238 | defer handleErr(&err) | |
239 | e.encoder.finish() | |
240 | return nil | |
145 | 241 | } |
146 | 242 | |
147 | 243 | func handleErr(err *error) { |
199 | 295 | Num int |
200 | 296 | OmitEmpty bool |
201 | 297 | Flow bool |
298 | // Id holds the unique field identifier, so we can cheaply | |
299 | // check for field duplicates without maintaining an extra map. | |
300 | Id int | |
202 | 301 | |
203 | 302 | // Inline holds the field index if the field is part of an inlined struct. |
204 | 303 | Inline []int |
278 | 377 | } else { |
279 | 378 | finfo.Inline = append([]int{i}, finfo.Inline...) |
280 | 379 | } |
380 | finfo.Id = len(fieldsList) | |
281 | 381 | fieldsMap[finfo.Key] = finfo |
282 | 382 | fieldsList = append(fieldsList, finfo) |
283 | 383 | } |
299 | 399 | return nil, errors.New(msg) |
300 | 400 | } |
301 | 401 | |
402 | info.Id = len(fieldsList) | |
302 | 403 | fieldsList = append(fieldsList, info) |
303 | 404 | fieldsMap[info.Key] = info |
304 | 405 | } |
305 | 406 | |
306 | sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} | |
407 | sinfo = &structInfo{ | |
408 | FieldsMap: fieldsMap, | |
409 | FieldsList: fieldsList, | |
410 | InlineMap: inlineMap, | |
411 | } | |
307 | 412 | |
308 | 413 | fieldMapMutex.Lock() |
309 | 414 | structMap[st] = sinfo |
311 | 416 | return sinfo, nil |
312 | 417 | } |
313 | 418 | |
419 | // IsZeroer is used to check whether an object is zero to | |
420 | // determine whether it should be omitted when marshaling | |
421 | // with the omitempty flag. One notable implementation | |
422 | // is time.Time. | |
423 | type IsZeroer interface { | |
424 | IsZero() bool | |
425 | } | |
426 | ||
314 | 427 | func isZero(v reflect.Value) bool { |
315 | switch v.Kind() { | |
428 | kind := v.Kind() | |
429 | if z, ok := v.Interface().(IsZeroer); ok { | |
430 | if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { | |
431 | return true | |
432 | } | |
433 | return z.IsZero() | |
434 | } | |
435 | switch kind { | |
316 | 436 | case reflect.String: |
317 | 437 | return len(v.String()) == 0 |
318 | 438 | case reflect.Interface, reflect.Ptr: |
0 | 0 | package yaml |
1 | 1 | |
2 | 2 | import ( |
3 | "fmt" | |
3 | 4 | "io" |
4 | 5 | ) |
5 | 6 | |
237 | 238 | yaml_MAPPING_START_EVENT // A MAPPING-START event. |
238 | 239 | yaml_MAPPING_END_EVENT // A MAPPING-END event. |
239 | 240 | ) |
241 | ||
242 | var eventStrings = []string{ | |
243 | yaml_NO_EVENT: "none", | |
244 | yaml_STREAM_START_EVENT: "stream start", | |
245 | yaml_STREAM_END_EVENT: "stream end", | |
246 | yaml_DOCUMENT_START_EVENT: "document start", | |
247 | yaml_DOCUMENT_END_EVENT: "document end", | |
248 | yaml_ALIAS_EVENT: "alias", | |
249 | yaml_SCALAR_EVENT: "scalar", | |
250 | yaml_SEQUENCE_START_EVENT: "sequence start", | |
251 | yaml_SEQUENCE_END_EVENT: "sequence end", | |
252 | yaml_MAPPING_START_EVENT: "mapping start", | |
253 | yaml_MAPPING_END_EVENT: "mapping end", | |
254 | } | |
255 | ||
256 | func (e yaml_event_type_t) String() string { | |
257 | if e < 0 || int(e) >= len(eventStrings) { | |
258 | return fmt.Sprintf("unknown event %d", e) | |
259 | } | |
260 | return eventStrings[e] | |
261 | } | |
240 | 262 | |
241 | 263 | // The event structure. |
242 | 264 | type yaml_event_t struct { |
507 | 529 | |
508 | 530 | problem string // Error description. |
509 | 531 | |
510 | // The byte about which the problem occured. | |
532 | // The byte about which the problem occurred. | |
511 | 533 | problem_offset int |
512 | 534 | problem_value int |
513 | 535 | problem_mark yaml_mark_t |
520 | 542 | |
521 | 543 | read_handler yaml_read_handler_t // Read handler. |
522 | 544 | |
523 | input_file io.Reader // File input data. | |
524 | input []byte // String input data. | |
525 | input_pos int | |
545 | input_reader io.Reader // File input data. | |
546 | input []byte // String input data. | |
547 | input_pos int | |
526 | 548 | |
527 | 549 | eof bool // EOF flag |
528 | 550 | |
631 | 653 | write_handler yaml_write_handler_t // Write handler. |
632 | 654 | |
633 | 655 | output_buffer *[]byte // String output data. |
634 | output_file io.Writer // File output data. | |
656 | output_writer io.Writer // File output data. | |
635 | 657 | |
636 | 658 | buffer []byte // The working buffer. |
637 | 659 | buffer_pos int // The current position of the buffer. |