Codebase list flatbuffers / 7101224
Reworked reflection.h to be more general. e.g. support generic reading/writing from structs/vectors etc. Change-Id: I2eb6e24db088a72da444d5c8df7e506e53d5bc2d Tested: on Linux. Bug: 22660837 Wouter van Oortmerssen 8 years ago
6 changed file(s) with 743 addition(s) and 449 deletion(s). Raw diff Collapse all Expand all
2424 include/flatbuffers/reflection_generated.h
2525 src/idl_parser.cpp
2626 src/idl_gen_text.cpp
27 src/reflection.cpp
2728 )
2829
2930 set(FlatBuffers_Compiler_SRCS
4243 )
4344
4445 set(FlatBuffers_Tests_SRCS
45 include/flatbuffers/flatbuffers.h
46 include/flatbuffers/hash.h
47 include/flatbuffers/idl.h
48 include/flatbuffers/util.h
49 src/idl_parser.cpp
46 ${FlatBuffers_Library_SRCS}
47 src/idl_gen_fbs.cpp
5048 src/idl_gen_general.cpp
51 src/idl_gen_text.cpp
52 src/idl_gen_fbs.cpp
5349 tests/test.cpp
5450 # file generate by running compiler on tests/monster_test.fbs
5551 ${CMAKE_CURRENT_BINARY_DIR}/tests/monster_test_generated.h
3030 ../../tests/test.cpp \
3131 ../../src/idl_parser.cpp \
3232 ../../src/idl_gen_text.cpp \
33 ../../src/idl_gen_fbs.cpp
33 ../../src/idl_gen_fbs.cpp \
34 ../../src/idl_gen_general.cpp \
35 ../../src/reflection.cpp
3436 LOCAL_LDLIBS := -llog -landroid
3537 LOCAL_STATIC_LIBRARIES := android_native_app_glue flatbuffers
3638 LOCAL_ARM_MODE := arm
350350 // result here.
351351 return -table->KeyCompareWithValue(*key);
352352 }
353 };
354
355 // Represent a vector much like the template above, but in this case we
356 // don't know what the element types are (used with reflection.h).
357 class VectorOfAny {
358 public:
359 uoffset_t size() const { return EndianScalar(length_); }
360
361 const uint8_t *Data() const {
362 return reinterpret_cast<const uint8_t *>(&length_ + 1);
363 }
364 uint8_t *Data() {
365 return reinterpret_cast<uint8_t *>(&length_ + 1);
366 }
367 protected:
368 VectorOfAny();
369
370 uoffset_t length_;
353371 };
354372
355373 // Convenient helper function to get the length of any vector, regardless
9941012 return reinterpret_cast<T>(&data_[o]);
9951013 }
9961014
1015 const uint8_t *GetAddressOf(uoffset_t o) const { return &data_[o]; }
1016 uint8_t *GetAddressOf(uoffset_t o) { return &data_[o]; }
1017
9971018 private:
9981019 uint8_t data_[1];
9991020 };
10261047 ? reinterpret_cast<P>(p + ReadScalar<uoffset_t>(p))
10271048 : nullptr;
10281049 }
1029
10301050 template<typename P> P GetPointer(voffset_t field) const {
10311051 return const_cast<Table *>(this)->GetPointer<P>(field);
10321052 }
10491069 if (!field_offset) return false;
10501070 WriteScalar(data_ + field_offset, val - (data_ + field_offset));
10511071 return true;
1072 }
1073
1074 uint8_t *GetAddressOf(voffset_t field) {
1075 auto field_offset = GetOptionalFieldOffset(field);
1076 return field_offset ? data_ + field_offset : nullptr;
1077 }
1078 const uint8_t *GetAddressOf(voffset_t field) const {
1079 return const_cast<Table *>(this)->GetAddressOf(field);
10521080 }
10531081
10541082 uint8_t *GetVTable() { return data_ - ReadScalar<soffset_t>(data_); }
1616 #ifndef FLATBUFFERS_REFLECTION_H_
1717 #define FLATBUFFERS_REFLECTION_H_
1818
19 #include "flatbuffers/util.h"
20
2119 // This is somewhat of a circular dependency because flatc (and thus this
2220 // file) is needed to generate this header in the first place.
2321 // Should normally not be a problem since it can be generated by the
2927
3028 namespace flatbuffers {
3129
30 // ------------------------- GETTERS -------------------------
31
32 // Size of a basic type, don't use with structs.
3233 inline size_t GetTypeSize(reflection::BaseType base_type) {
3334 // This needs to correspond to the BaseType enum.
3435 static size_t sizes[] = { 0, 1, 1, 1, 1, 2, 2, 4, 4, 8, 8, 4, 8, 4, 4, 4, 4 };
3536 return sizes[base_type];
37 }
38
39 // Same as above, but now correctly returns the size of a struct if
40 // the field (or vector element) is a struct.
41 inline size_t GetTypeSizeInline(reflection::BaseType base_type,
42 int type_index,
43 const reflection::Schema &schema) {
44 if (base_type == reflection::Obj &&
45 schema.objects()->Get(type_index)->is_struct()) {
46 return schema.objects()->Get(type_index)->bytesize();
47 } else {
48 return GetTypeSize(base_type);
49 }
3650 }
3751
3852 // Get the root, regardless of what type it is.
7488 return table.GetPointer<Vector<T> *>(field.offset());
7589 }
7690
91 // Get a field, if you know it's a vector, generically.
92 // To actually access elements, use the return value together with
93 // field.type()->element() in any of GetAnyVectorElemI below etc.
94 inline VectorOfAny *GetFieldAnyV(const Table &table,
95 const reflection::Field &field) {
96 return table.GetPointer<VectorOfAny *>(field.offset());
97 }
98
7799 // Get a field, if you know it's a table.
78100 inline Table *GetFieldT(const Table &table,
79101 const reflection::Field &field) {
82104 return table.GetPointer<Table *>(field.offset());
83105 }
84106
85 // Get any field as a 64bit int, regardless of what it is (bool/int/float/str).
107 // Raw helper functions used below: get any value in memory as a 64bit int, a
108 // double or a string.
109 // All scalars get static_cast to an int64_t, strings use strtoull, every other
110 // data type returns 0.
111 int64_t GetAnyValueI(reflection::BaseType type, const uint8_t *data);
112 // All scalars static cast to double, strings use strtod, every other data
113 // type is 0.0.
114 double GetAnyValueF(reflection::BaseType type, const uint8_t *data);
115 // All scalars converted using stringstream, strings as-is, and all other
116 // data types provide some level of debug-pretty-printing.
117 std::string GetAnyValueS(reflection::BaseType type, const uint8_t *data,
118 const reflection::Schema *schema,
119 int type_index);
120
121 // Get any table field as a 64bit int, regardless of what type it is.
86122 inline int64_t GetAnyFieldI(const Table &table,
87123 const reflection::Field &field) {
88 # define FLATBUFFERS_GET(C, T) \
89 static_cast<int64_t>(GetField##C<T>(table, field))
90 switch (field.type()->base_type()) {
91 case reflection::UType:
92 case reflection::Bool:
93 case reflection::UByte: return FLATBUFFERS_GET(I, uint8_t);
94 case reflection::Byte: return FLATBUFFERS_GET(I, int8_t);
95 case reflection::Short: return FLATBUFFERS_GET(I, int16_t);
96 case reflection::UShort: return FLATBUFFERS_GET(I, uint16_t);
97 case reflection::Int: return FLATBUFFERS_GET(I, int32_t);
98 case reflection::UInt: return FLATBUFFERS_GET(I, uint32_t);
99 case reflection::Long: return FLATBUFFERS_GET(I, int64_t);
100 case reflection::ULong: return FLATBUFFERS_GET(I, uint64_t);
101 case reflection::Float: return FLATBUFFERS_GET(F, float);
102 case reflection::Double: return FLATBUFFERS_GET(F, double);
103 case reflection::String: {
104 auto s = GetFieldS(table, field);
105 return s ? StringToInt(s->c_str()) : 0;
106 }
107 default: return 0;
108 }
109 # undef FLATBUFFERS_GET
110 }
111
112 // Get any field as a double, regardless of what it is (bool/int/float/str).
124 auto field_ptr = table.GetAddressOf(field.offset());
125 return field_ptr ? GetAnyValueI(field.type()->base_type(), field_ptr)
126 : field.default_integer();
127 }
128
129 // Get any table field as a double, regardless of what type it is.
113130 inline double GetAnyFieldF(const Table &table,
114131 const reflection::Field &field) {
115 switch (field.type()->base_type()) {
116 case reflection::Float: return GetFieldF<float>(table, field);
117 case reflection::Double: return GetFieldF<double>(table, field);
118 case reflection::String: {
119 auto s = GetFieldS(table, field);
120 return s ? strtod(s->c_str(), nullptr) : 0.0;
121 }
122 default: return static_cast<double>(GetAnyFieldI(table, field));
123 }
124 }
125
126 // Get any field as a string, regardless of what it is (bool/int/float/str).
132 auto field_ptr = table.GetAddressOf(field.offset());
133 return field_ptr ? GetAnyValueF(field.type()->base_type(), field_ptr)
134 : field.default_real();
135 }
136
137
138 // Get any table field as a string, regardless of what type it is.
139 // You may pass nullptr for the schema if you don't care to have fields that
140 // are of table type pretty-printed.
127141 inline std::string GetAnyFieldS(const Table &table,
128142 const reflection::Field &field,
129 const reflection::Schema &schema) {
130 switch (field.type()->base_type()) {
131 case reflection::Float:
132 case reflection::Double: return NumToString(GetAnyFieldF(table, field));
133 case reflection::String: {
134 auto s = GetFieldS(table, field);
135 return s ? s->c_str() : "";
136 }
137 case reflection::Obj: {
138 // Convert the table to a string. This is mostly for debugging purposes,
139 // and does NOT promise to be JSON compliant.
140 // Also prefixes the type.
141 auto &objectdef = *schema.objects()->Get(field.type()->index());
142 auto s = objectdef.name()->str();
143 if (objectdef.is_struct()) {
144 s += "(struct)"; // TODO: implement this as well.
145 } else {
146 auto table_field = GetFieldT(table, field);
147 s += " { ";
148 auto fielddefs = objectdef.fields();
149 for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
150 auto &fielddef = **it;
151 if (!table.CheckField(fielddef.offset())) continue;
152 auto val = GetAnyFieldS(*table_field, fielddef, schema);
153 if (fielddef.type()->base_type() == reflection::String)
154 val = "\"" + val + "\""; // Doesn't deal with escape codes etc.
155 s += fielddef.name()->str();
156 s += ": ";
157 s += val;
158 s += ", ";
159 }
160 s += "}";
161 }
162 return s;
163 }
164 case reflection::Vector:
165 return "[(elements)]"; // TODO: implement this as well.
166 case reflection::Union:
167 return "(union)"; // TODO: implement this as well.
168 default: return NumToString(GetAnyFieldI(table, field));
169 }
170 }
143 const reflection::Schema *schema) {
144 auto field_ptr = table.GetAddressOf(field.offset());
145 return field_ptr ? GetAnyValueS(field.type()->base_type(), field_ptr, schema,
146 field.type()->index())
147 : "";
148 }
149
150 // Get any struct field as a 64bit int, regardless of what type it is.
151 inline int64_t GetAnyFieldI(const Struct &st,
152 const reflection::Field &field) {
153 return GetAnyValueI(field.type()->base_type(),
154 st.GetAddressOf(field.offset()));
155 }
156
157 // Get any struct field as a double, regardless of what type it is.
158 inline double GetAnyFieldF(const Struct &st,
159 const reflection::Field &field) {
160 return GetAnyValueF(field.type()->base_type(),
161 st.GetAddressOf(field.offset()));
162 }
163
164 // Get any struct field as a string, regardless of what type it is.
165 inline std::string GetAnyFieldS(const Struct &st,
166 const reflection::Field &field) {
167 return GetAnyValueS(field.type()->base_type(),
168 st.GetAddressOf(field.offset()), nullptr, -1);
169 }
170
171 // Get any vector element as a 64bit int, regardless of what type it is.
172 inline int64_t GetAnyVectorElemI(const VectorOfAny *vec,
173 reflection::BaseType elem_type, size_t i) {
174 return GetAnyValueI(elem_type, vec->Data() + GetTypeSize(elem_type) * i);
175 }
176
177 // Get any vector element as a double, regardless of what type it is.
178 inline double GetAnyVectorElemF(const VectorOfAny *vec,
179 reflection::BaseType elem_type, size_t i) {
180 return GetAnyValueF(elem_type, vec->Data() + GetTypeSize(elem_type) * i);
181 }
182
183 // Get any vector element as a string, regardless of what type it is.
184 inline std::string GetAnyVectorElemS(const VectorOfAny *vec,
185 reflection::BaseType elem_type, size_t i) {
186 return GetAnyValueS(elem_type, vec->Data() + GetTypeSize(elem_type) * i,
187 nullptr, -1);
188 }
189
190 // Get a vector element that's a table/string/vector from a generic vector.
191 // Pass Table/String/VectorOfAny as template parameter.
192 // Warning: does no typechecking.
193 template<typename T> T *GetAnyVectorElemPointer(const VectorOfAny *vec,
194 size_t i) {
195 auto elem_ptr = vec->Data() + sizeof(uoffset_t) * i;
196 return (T *)(elem_ptr + ReadScalar<uoffset_t>(elem_ptr));
197 }
198
199 // Get the inline-address of a vector element. Useful for Structs (pass Struct
200 // as template arg), or being able to address a range of scalars in-line.
201 // Get elem_size from GetTypeSizeInline().
202 // Note: little-endian data on all platforms, use EndianScalar() instead of
203 // raw pointer access with scalars).
204 template<typename T> T *GetAnyVectorElemAddressOf(const VectorOfAny *vec,
205 size_t i,
206 size_t elem_size) {
207 // C-cast to allow const conversion.
208 return (T *)(vec->Data() + elem_size * i);
209 }
210
211 // Similarly, for elements of tables.
212 template<typename T> T *GetAnyFieldAddressOf(const Table &table,
213 const reflection::Field &field) {
214 return (T *)table.GetAddressOf(field.offset());
215 }
216
217 // Similarly, for elements of structs.
218 template<typename T> T *GetAnyFieldAddressOf(const Struct &st,
219 const reflection::Field &field) {
220 return (T *)st.GetAddressOf(field.offset());
221 }
222
223 // ------------------------- SETTERS -------------------------
171224
172225 // Set any scalar field, if you know its exact type.
173226 template<typename T> bool SetField(Table *table, const reflection::Field &field,
176229 return table->SetField(field.offset(), val);
177230 }
178231
179 // Set any field as a 64bit int, regardless of what it is (bool/int/float/str).
180 inline void SetAnyFieldI(Table *table, const reflection::Field &field,
232 // Raw helper functions used below: set any value in memory as a 64bit int, a
233 // double or a string.
234 // These work for all scalar values, but do nothing for other data types.
235 // To set a string, see SetString below.
236 void SetAnyValueI(reflection::BaseType type, uint8_t *data, int64_t val);
237 void SetAnyValueF(reflection::BaseType type, uint8_t *data, double val);
238 void SetAnyValueS(reflection::BaseType type, uint8_t *data, const char *val);
239
240 // Set any table field as a 64bit int, regardless of type what it is.
241 inline bool SetAnyFieldI(Table *table, const reflection::Field &field,
181242 int64_t val) {
182 # define FLATBUFFERS_SET(T) SetField<T>(table, field, static_cast<T>(val))
183 switch (field.type()->base_type()) {
184 case reflection::UType:
185 case reflection::Bool:
186 case reflection::UByte: FLATBUFFERS_SET(uint8_t ); break;
187 case reflection::Byte: FLATBUFFERS_SET(int8_t ); break;
188 case reflection::Short: FLATBUFFERS_SET(int16_t ); break;
189 case reflection::UShort: FLATBUFFERS_SET(uint16_t ); break;
190 case reflection::Int: FLATBUFFERS_SET(int32_t ); break;
191 case reflection::UInt: FLATBUFFERS_SET(uint32_t ); break;
192 case reflection::Long: FLATBUFFERS_SET(int64_t ); break;
193 case reflection::ULong: FLATBUFFERS_SET(uint64_t ); break;
194 case reflection::Float: FLATBUFFERS_SET(float ); break;
195 case reflection::Double: FLATBUFFERS_SET(double ); break;
196 // TODO: support strings
197 default: break;
198 }
199 # undef FLATBUFFERS_SET
200 }
201
202 // Set any field as a double, regardless of what it is (bool/int/float/str).
203 inline void SetAnyFieldF(Table *table, const reflection::Field &field,
243 auto field_ptr = table->GetAddressOf(field.offset());
244 if (!field_ptr) return false;
245 SetAnyValueI(field.type()->base_type(), field_ptr, val);
246 return true;
247 }
248
249 // Set any table field as a double, regardless of what type it is.
250 inline bool SetAnyFieldF(Table *table, const reflection::Field &field,
204251 double val) {
205 switch (field.type()->base_type()) {
206 case reflection::Float: SetField<float> (table, field,
207 static_cast<float>(val)); break;
208 case reflection::Double: SetField<double>(table, field, val); break;
209 // TODO: support strings.
210 default: SetAnyFieldI(table, field, static_cast<int64_t>(val)); break;
211 }
212 }
213
214 // Set any field as a string, regardless of what it is (bool/int/float/str).
215 inline void SetAnyFieldS(Table *table, const reflection::Field &field,
252 auto field_ptr = table->GetAddressOf(field.offset());
253 if (!field_ptr) return false;
254 SetAnyValueF(field.type()->base_type(), field_ptr, val);
255 return true;
256 }
257
258 // Set any table field as a string, regardless of what type it is.
259 inline bool SetAnyFieldS(Table *table, const reflection::Field &field,
260 const char *val) {
261 auto field_ptr = table->GetAddressOf(field.offset());
262 if (!field_ptr) return false;
263 SetAnyValueS(field.type()->base_type(), field_ptr, val);
264 return true;
265 }
266
267 // Set any struct field as a 64bit int, regardless of type what it is.
268 inline void SetAnyFieldI(Struct *st, const reflection::Field &field,
269 int64_t val) {
270 SetAnyValueI(field.type()->base_type(), st->GetAddressOf(field.offset()),
271 val);
272 }
273
274 // Set any struct field as a double, regardless of type what it is.
275 inline void SetAnyFieldF(Struct *st, const reflection::Field &field,
276 double val) {
277 SetAnyValueF(field.type()->base_type(), st->GetAddressOf(field.offset()),
278 val);
279 }
280
281 // Set any struct field as a string, regardless of type what it is.
282 inline void SetAnyFieldS(Struct *st, const reflection::Field &field,
216283 const char *val) {
217 switch (field.type()->base_type()) {
218 case reflection::Float:
219 case reflection::Double: SetAnyFieldF(table, field, strtod(val, nullptr));
220 // TODO: support strings.
221 default: SetAnyFieldI(table, field, StringToInt(val)); break;
222 }
223 }
284 SetAnyValueS(field.type()->base_type(), st->GetAddressOf(field.offset()),
285 val);
286 }
287
288 // Set any vector element as a 64bit int, regardless of type what it is.
289 inline void SetAnyVectorElemI(VectorOfAny *vec, reflection::BaseType elem_type,
290 size_t i, int64_t val) {
291 SetAnyValueI(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val);
292 }
293
294 // Set any vector element as a double, regardless of type what it is.
295 inline void SetAnyVectorElemF(VectorOfAny *vec, reflection::BaseType elem_type,
296 size_t i, double val) {
297 SetAnyValueF(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val);
298 }
299
300 // Set any vector element as a string, regardless of type what it is.
301 inline void SetAnyVectorElemS(VectorOfAny *vec, reflection::BaseType elem_type,
302 size_t i, const char *val) {
303 SetAnyValueS(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val);
304 }
305
306
307 // ------------------------- RESIZING SETTERS -------------------------
224308
225309 // "smart" pointer for use with resizing vectors: turns a pointer inside
226310 // a vector into a relative offset, such that it is not affected by resizes.
264348 return *enumval->object();
265349 }
266350
267 // Resize a FlatBuffer in-place by iterating through all offsets in the buffer
268 // and adjusting them by "delta" if they straddle the start offset.
269 // Once that is done, bytes can now be inserted/deleted safely.
270 // "delta" may be negative (shrinking).
271 // Unless "delta" is a multiple of the largest alignment, you'll create a small
272 // amount of garbage space in the buffer (usually 0..7 bytes).
273 // If your FlatBuffer's root table is not the schema's root table, you should
274 // pass in your root_table type as well.
275 class ResizeContext {
276 public:
277 ResizeContext(const reflection::Schema &schema, uoffset_t start, int delta,
278 std::vector<uint8_t> *flatbuf,
279 const reflection::Object *root_table = nullptr)
280 : schema_(schema), startptr_(flatbuf->data() + start),
281 delta_(delta), buf_(*flatbuf),
282 dag_check_(flatbuf->size() / sizeof(uoffset_t), false) {
283 auto mask = static_cast<int>(sizeof(largest_scalar_t) - 1);
284 delta_ = (delta_ + mask) & ~mask;
285 if (!delta_) return; // We can't shrink by less than largest_scalar_t.
286 // Now change all the offsets by delta_.
287 auto root = GetAnyRoot(buf_.data());
288 Straddle<uoffset_t, 1>(buf_.data(), root, buf_.data());
289 ResizeTable(root_table ? *root_table : *schema.root_table(), root);
290 // We can now add or remove bytes at start.
291 if (delta_ > 0) buf_.insert(buf_.begin() + start, delta_, 0);
292 else buf_.erase(buf_.begin() + start, buf_.begin() + start - delta_);
293 }
294
295 // Check if the range between first (lower address) and second straddles
296 // the insertion point. If it does, change the offset at offsetloc (of
297 // type T, with direction D).
298 template<typename T, int D> void Straddle(void *first, void *second,
299 void *offsetloc) {
300 if (first <= startptr_ && second >= startptr_) {
301 WriteScalar<T>(offsetloc, ReadScalar<T>(offsetloc) + delta_ * D);
302 DagCheck(offsetloc) = true;
303 }
304 }
305
306 // This returns a boolean that records if the corresponding offset location
307 // has been modified already. If so, we can't even read the corresponding
308 // offset, since it is pointing to a location that is illegal until the
309 // resize actually happens.
310 // This must be checked for every offset, since we can't know which offsets
311 // will straddle and which won't.
312 uint8_t &DagCheck(void *offsetloc) {
313 auto dag_idx = reinterpret_cast<uoffset_t *>(offsetloc) -
314 reinterpret_cast<uoffset_t *>(buf_.data());
315 return dag_check_[dag_idx];
316 }
317
318 void ResizeTable(const reflection::Object &objectdef, Table *table) {
319 if (DagCheck(table))
320 return; // Table already visited.
321 auto vtable = table->GetVTable();
322 // Check if the vtable offset points beyond the insertion point.
323 Straddle<soffset_t, -1>(table, vtable, table);
324 // This direction shouldn't happen because vtables that sit before tables
325 // are always directly adjacent, but check just in case we ever change the
326 // way flatbuffers are built.
327 Straddle<soffset_t, -1>(vtable, table, table);
328 // Early out: since all fields inside the table must point forwards in
329 // memory, if the insertion point is before the table we can stop here.
330 auto tableloc = reinterpret_cast<uint8_t *>(table);
331 if (startptr_ <= tableloc) return;
332 // Check each field.
333 auto fielddefs = objectdef.fields();
334 for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
335 auto &fielddef = **it;
336 auto base_type = fielddef.type()->base_type();
337 // Ignore scalars.
338 if (base_type <= reflection::Double) continue;
339 // Ignore fields that are not stored.
340 auto offset = table->GetOptionalFieldOffset(fielddef.offset());
341 if (!offset) continue;
342 // Ignore structs.
343 auto subobjectdef = base_type == reflection::Obj ?
344 schema_.objects()->Get(fielddef.type()->index()) : nullptr;
345 if (subobjectdef && subobjectdef->is_struct()) continue;
346 // Get this fields' offset, and read it if safe.
347 auto offsetloc = tableloc + offset;
348 if (DagCheck(offsetloc))
349 continue; // This offset already visited.
350 auto ref = offsetloc + ReadScalar<uoffset_t>(offsetloc);
351 Straddle<uoffset_t, 1>(offsetloc, ref, offsetloc);
352 // Recurse.
353 switch (base_type) {
354 case reflection::Obj: {
355 ResizeTable(*subobjectdef, reinterpret_cast<Table *>(ref));
356 break;
357 }
358 case reflection::Vector: {
359 auto elem_type = fielddef.type()->element();
360 if (elem_type != reflection::Obj && elem_type != reflection::String)
361 break;
362 auto vec = reinterpret_cast<Vector<uoffset_t> *>(ref);
363 auto elemobjectdef = elem_type == reflection::Obj
364 ? schema_.objects()->Get(fielddef.type()->index())
365 : nullptr;
366 if (elemobjectdef && elemobjectdef->is_struct()) break;
367 for (uoffset_t i = 0; i < vec->size(); i++) {
368 auto loc = vec->Data() + i * sizeof(uoffset_t);
369 if (DagCheck(loc))
370 continue; // This offset already visited.
371 auto dest = loc + vec->Get(i);
372 Straddle<uoffset_t, 1>(loc, dest ,loc);
373 if (elemobjectdef)
374 ResizeTable(*elemobjectdef, reinterpret_cast<Table *>(dest));
375 }
376 break;
377 }
378 case reflection::Union: {
379 ResizeTable(GetUnionType(schema_, objectdef, fielddef, *table),
380 reinterpret_cast<Table *>(ref));
381 break;
382 }
383 case reflection::String:
384 break;
385 default:
386 assert(false);
387 }
388 }
389 }
390
391 void operator=(const ResizeContext &rc);
392
393 private:
394 const reflection::Schema &schema_;
395 uint8_t *startptr_;
396 int delta_;
397 std::vector<uint8_t> &buf_;
398 std::vector<uint8_t> dag_check_;
399 };
400
401351 // Changes the contents of a string inside a FlatBuffer. FlatBuffer must
402352 // live inside a std::vector so we can resize the buffer if needed.
403353 // "str" must live inside "flatbuf" and may be invalidated after this call.
404354 // If your FlatBuffer's root table is not the schema's root table, you should
405355 // pass in your root_table type as well.
406 inline void SetString(const reflection::Schema &schema, const std::string &val,
407 const String *str, std::vector<uint8_t> *flatbuf,
408 const reflection::Object *root_table = nullptr) {
409 auto delta = static_cast<int>(val.size()) - static_cast<int>(str->Length());
410 auto start = static_cast<uoffset_t>(reinterpret_cast<const uint8_t *>(str) -
411 flatbuf->data() +
412 sizeof(uoffset_t));
413 if (delta) {
414 // Clear the old string, since we don't want parts of it remaining.
415 memset(flatbuf->data() + start, 0, str->Length());
416 // Different size, we must expand (or contract).
417 ResizeContext(schema, start, delta, flatbuf, root_table);
418 }
419 // Copy new data. Safe because we created the right amount of space.
420 memcpy(flatbuf->data() + start, val.c_str(), val.size() + 1);
421 }
356 void SetString(const reflection::Schema &schema, const std::string &val,
357 const String *str, std::vector<uint8_t> *flatbuf,
358 const reflection::Object *root_table = nullptr);
422359
423360 // Resizes a flatbuffers::Vector inside a FlatBuffer. FlatBuffer must
424361 // live inside a std::vector so we can resize the buffer if needed.
425362 // "vec" must live inside "flatbuf" and may be invalidated after this call.
426363 // If your FlatBuffer's root table is not the schema's root table, you should
427364 // pass in your root_table type as well.
365 uint8_t *ResizeAnyVector(const reflection::Schema &schema, uoffset_t newsize,
366 const VectorOfAny *vec, uoffset_t num_elems,
367 uoffset_t elem_size, std::vector<uint8_t> *flatbuf,
368 const reflection::Object *root_table = nullptr);
369
428370 template <typename T>
429371 void ResizeVector(const reflection::Schema &schema, uoffset_t newsize, T val,
430372 const Vector<T> *vec, std::vector<uint8_t> *flatbuf,
431373 const reflection::Object *root_table = nullptr) {
432374 auto delta_elem = static_cast<int>(newsize) - static_cast<int>(vec->size());
433 auto delta_bytes = delta_elem * static_cast<int>(sizeof(T));
434 auto vec_start = reinterpret_cast<const uint8_t *>(vec) - flatbuf->data();
435 auto start = static_cast<uoffset_t>(vec_start + sizeof(uoffset_t) +
436 sizeof(T) * vec->size());
437 if (delta_bytes) {
438 if (delta_elem < 0) {
439 // Clear elements we're throwing away, since some might remain in the
440 // buffer.
441 memset(flatbuf->data() + start + delta_elem * sizeof(T), 0,
442 -delta_elem * sizeof(T));
443 }
444 ResizeContext(schema, start, delta_bytes, flatbuf, root_table);
445 WriteScalar(flatbuf->data() + vec_start, newsize); // Length field.
446 // Set new elements to "val".
447 for (int i = 0; i < delta_elem; i++) {
448 auto loc = flatbuf->data() + start + i * sizeof(T);
449 auto is_scalar = std::is_scalar<T>::value;
450 if (is_scalar) {
451 WriteScalar(loc, val);
452 } else { // struct
453 *reinterpret_cast<T *>(loc) = val;
454 }
375 auto newelems = ResizeAnyVector(schema, newsize,
376 reinterpret_cast<const VectorOfAny *>(vec),
377 vec->size(),
378 static_cast<uoffset_t>(sizeof(T)), flatbuf,
379 root_table);
380 // Set new elements to "val".
381 for (int i = 0; i < delta_elem; i++) {
382 auto loc = newelems + i * sizeof(T);
383 auto is_scalar = std::is_scalar<T>::value;
384 if (is_scalar) {
385 WriteScalar(loc, val);
386 } else { // struct
387 *reinterpret_cast<T *>(loc) = val;
455388 }
456389 }
457390 }
464397 // existing one.
465398 // The return value can now be set using Vector::MutateOffset or SetFieldT
466399 // below.
467 inline const uint8_t *AddFlatBuffer(std::vector<uint8_t> &flatbuf,
468 const uint8_t *newbuf, size_t newlen) {
469 // Align to sizeof(uoffset_t) past sizeof(largest_scalar_t) since we're
470 // going to chop off the root offset.
471 while ((flatbuf.size() & (sizeof(uoffset_t) - 1)) ||
472 !(flatbuf.size() & (sizeof(largest_scalar_t) - 1))) {
473 flatbuf.push_back(0);
474 }
475 auto insertion_point = static_cast<uoffset_t>(flatbuf.size());
476 // Insert the entire FlatBuffer minus the root pointer.
477 flatbuf.insert(flatbuf.end(), newbuf + sizeof(uoffset_t),
478 newbuf + newlen - sizeof(uoffset_t));
479 auto root_offset = ReadScalar<uoffset_t>(newbuf) - sizeof(uoffset_t);
480 return flatbuf.data() + insertion_point + root_offset;
481 }
400 const uint8_t *AddFlatBuffer(std::vector<uint8_t> &flatbuf,
401 const uint8_t *newbuf, size_t newlen);
482402
483403 inline bool SetFieldT(Table *table, const reflection::Field &field,
484404 const uint8_t *val) {
485405 assert(sizeof(uoffset_t) == GetTypeSize(field.type()->base_type()));
486406 return table->SetPointer(field.offset(), val);
487407 }
408
409 // ------------------------- COPYING -------------------------
488410
489411 // Generic copying of tables from a FlatBuffer into a FlatBuffer builder.
490412 // Can be used to do any kind of merging/selecting you may want to do out
494416 // Note: this does not deal with DAGs correctly. If the table passed forms a
495417 // DAG, the copy will be a tree instead (with duplicates).
496418
497 inline void CopyInline(FlatBufferBuilder &fbb,
498 const reflection::Field &fielddef,
499 const Table &table,
500 size_t align, size_t size) {
501 fbb.Align(align);
502 fbb.PushBytes(table.GetStruct<const uint8_t *>(fielddef.offset()), size);
503 fbb.TrackField(fielddef.offset(), fbb.GetSize());
504 }
505
506 inline Offset<const Table *> CopyTable(FlatBufferBuilder &fbb,
507 const reflection::Schema &schema,
508 const reflection::Object &objectdef,
509 const Table &table) {
510 // Before we can construct the table, we have to first generate any
511 // subobjects, and collect their offsets.
512 std::vector<uoffset_t> offsets;
513 auto fielddefs = objectdef.fields();
514 for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
515 auto &fielddef = **it;
516 // Skip if field is not present in the source.
517 if (!table.CheckField(fielddef.offset())) continue;
518 uoffset_t offset = 0;
519 switch (fielddef.type()->base_type()) {
520 case reflection::String: {
521 offset = fbb.CreateString(GetFieldS(table, fielddef)).o;
522 break;
523 }
524 case reflection::Obj: {
525 auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index());
526 if (!subobjectdef.is_struct()) {
527 offset = CopyTable(fbb, schema, subobjectdef,
528 *GetFieldT(table, fielddef)).o;
529 }
530 break;
531 }
532 case reflection::Union: {
533 auto &subobjectdef = GetUnionType(schema, objectdef, fielddef, table);
534 offset = CopyTable(fbb, schema, subobjectdef,
535 *GetFieldT(table, fielddef)).o;
536 break;
537 }
538 case reflection::Vector: {
539 auto vec = table.GetPointer<const Vector<Offset<Table>> *>(
540 fielddef.offset());
541 auto element_base_type = fielddef.type()->element();
542 auto elemobjectdef = element_base_type == reflection::Obj
543 ? schema.objects()->Get(fielddef.type()->index())
544 : nullptr;
545 switch (element_base_type) {
546 case reflection::String: {
547 std::vector<Offset<const String *>> elements(vec->size());
548 auto vec_s = reinterpret_cast<const Vector<Offset<String>> *>(vec);
549 for (uoffset_t i = 0; i < vec_s->size(); i++) {
550 elements[i] = fbb.CreateString(vec_s->Get(i)).o;
551 }
552 offset = fbb.CreateVector(elements).o;
553 break;
554 }
555 case reflection::Obj: {
556 if (!elemobjectdef->is_struct()) {
557 std::vector<Offset<const Table *>> elements(vec->size());
558 for (uoffset_t i = 0; i < vec->size(); i++) {
559 elements[i] =
560 CopyTable(fbb, schema, *elemobjectdef, *vec->Get(i));
561 }
562 offset = fbb.CreateVector(elements).o;
563 break;
564 }
565 // FALL-THRU:
566 }
567 default: { // Scalars and structs.
568 auto element_size = GetTypeSize(element_base_type);
569 if (elemobjectdef && elemobjectdef->is_struct())
570 element_size = elemobjectdef->bytesize();
571 fbb.StartVector(element_size, vec->size());
572 fbb.PushBytes(vec->Data(), element_size * vec->size());
573 offset = fbb.EndVector(vec->size());
574 break;
575 }
576 }
577 break;
578 }
579 default: // Scalars.
580 break;
581 }
582 if (offset) {
583 offsets.push_back(offset);
584 }
585 }
586 // Now we can build the actual table from either offsets or scalar data.
587 auto start = objectdef.is_struct()
588 ? fbb.StartStruct(objectdef.minalign())
589 : fbb.StartTable();
590 size_t offset_idx = 0;
591 for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
592 auto &fielddef = **it;
593 if (!table.CheckField(fielddef.offset())) continue;
594 auto base_type = fielddef.type()->base_type();
595 switch (base_type) {
596 case reflection::Obj: {
597 auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index());
598 if (subobjectdef.is_struct()) {
599 CopyInline(fbb, fielddef, table, subobjectdef.minalign(),
600 subobjectdef.bytesize());
601 break;
602 }
603 // else: FALL-THRU:
604 }
605 case reflection::Union:
606 case reflection::String:
607 case reflection::Vector:
608 fbb.AddOffset(fielddef.offset(), Offset<void>(offsets[offset_idx++]));
609 break;
610 default: { // Scalars.
611 auto size = GetTypeSize(base_type);
612 CopyInline(fbb, fielddef, table, size, size);
613 break;
614 }
615 }
616 }
617 assert(offset_idx == offsets.size());
618 if (objectdef.is_struct()) {
619 fbb.ClearOffsets();
620 return fbb.EndStruct();
621 } else {
622 return fbb.EndTable(start, static_cast<voffset_t>(fielddefs->size()));
623 }
624 }
419 Offset<const Table *> CopyTable(FlatBufferBuilder &fbb,
420 const reflection::Schema &schema,
421 const reflection::Object &objectdef,
422 const Table &table);
625423
626424 } // namespace flatbuffers
627425
0 /*
1 * Copyright 2015 Google Inc. All rights reserved.
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "flatbuffers/reflection.h"
17 #include "flatbuffers/util.h"
18
19 // Helper functionality for reflection.
20
21 namespace flatbuffers {
22
23 int64_t GetAnyValueI(reflection::BaseType type, const uint8_t *data) {
24 # define FLATBUFFERS_GET(T) static_cast<int64_t>(ReadScalar<T>(data))
25 switch (type) {
26 case reflection::UType:
27 case reflection::Bool:
28 case reflection::UByte: return FLATBUFFERS_GET(uint8_t);
29 case reflection::Byte: return FLATBUFFERS_GET(int8_t);
30 case reflection::Short: return FLATBUFFERS_GET(int16_t);
31 case reflection::UShort: return FLATBUFFERS_GET(uint16_t);
32 case reflection::Int: return FLATBUFFERS_GET(int32_t);
33 case reflection::UInt: return FLATBUFFERS_GET(uint32_t);
34 case reflection::Long: return FLATBUFFERS_GET(int64_t);
35 case reflection::ULong: return FLATBUFFERS_GET(uint64_t);
36 case reflection::Float: return FLATBUFFERS_GET(float);
37 case reflection::Double: return FLATBUFFERS_GET(double);
38 case reflection::String: {
39 auto s = reinterpret_cast<const String *>(ReadScalar<uoffset_t>(data) +
40 data);
41 return s ? StringToInt(s->c_str()) : 0;
42 }
43 default: return 0; // Tables & vectors do not make sense.
44 }
45 # undef FLATBUFFERS_GET
46 }
47
48 double GetAnyValueF(reflection::BaseType type, const uint8_t *data) {
49 switch (type) {
50 case reflection::Float: return static_cast<double>(ReadScalar<float>(data));
51 case reflection::Double: return ReadScalar<double>(data);
52 case reflection::String: {
53 auto s = reinterpret_cast<const String *>(ReadScalar<uoffset_t>(data) +
54 data);
55 return s ? strtod(s->c_str(), nullptr) : 0.0;
56 }
57 default: return static_cast<double>(GetAnyValueI(type, data));
58 }
59 }
60
61 std::string GetAnyValueS(reflection::BaseType type, const uint8_t *data,
62 const reflection::Schema *schema, int type_index) {
63 switch (type) {
64 case reflection::Float:
65 case reflection::Double: return NumToString(GetAnyValueF(type, data));
66 case reflection::String: {
67 auto s = reinterpret_cast<const String *>(ReadScalar<uoffset_t>(data) +
68 data);
69 return s ? s->c_str() : "";
70 }
71 case reflection::Obj:
72 if (schema) {
73 // Convert the table to a string. This is mostly for debugging purposes,
74 // and does NOT promise to be JSON compliant.
75 // Also prefixes the type.
76 auto &objectdef = *schema->objects()->Get(type_index);
77 auto s = objectdef.name()->str();
78 if (objectdef.is_struct()) {
79 s += "(struct)"; // TODO: implement this as well.
80 } else {
81 auto table_field = reinterpret_cast<const Table *>(
82 ReadScalar<uoffset_t>(data) + data);
83 s += " { ";
84 auto fielddefs = objectdef.fields();
85 for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
86 auto &fielddef = **it;
87 if (!table_field->CheckField(fielddef.offset())) continue;
88 auto val = GetAnyFieldS(*table_field, fielddef, schema);
89 if (fielddef.type()->base_type() == reflection::String)
90 val = "\"" + val + "\""; // Doesn't deal with escape codes etc.
91 s += fielddef.name()->str();
92 s += ": ";
93 s += val;
94 s += ", ";
95 }
96 s += "}";
97 }
98 return s;
99 } else {
100 return "(table)";
101 }
102 case reflection::Vector:
103 return "[(elements)]"; // TODO: implement this as well.
104 case reflection::Union:
105 return "(union)"; // TODO: implement this as well.
106 default: return NumToString(GetAnyValueI(type, data));
107 }
108 }
109
110 void SetAnyValueI(reflection::BaseType type, uint8_t *data, int64_t val) {
111 # define FLATBUFFERS_SET(T) WriteScalar(data, static_cast<T>(val))
112 switch (type) {
113 case reflection::UType:
114 case reflection::Bool:
115 case reflection::UByte: FLATBUFFERS_SET(uint8_t ); break;
116 case reflection::Byte: FLATBUFFERS_SET(int8_t ); break;
117 case reflection::Short: FLATBUFFERS_SET(int16_t ); break;
118 case reflection::UShort: FLATBUFFERS_SET(uint16_t); break;
119 case reflection::Int: FLATBUFFERS_SET(int32_t ); break;
120 case reflection::UInt: FLATBUFFERS_SET(uint32_t); break;
121 case reflection::Long: FLATBUFFERS_SET(int64_t ); break;
122 case reflection::ULong: FLATBUFFERS_SET(uint64_t); break;
123 case reflection::Float: FLATBUFFERS_SET(float ); break;
124 case reflection::Double: FLATBUFFERS_SET(double ); break;
125 // TODO: support strings
126 default: break;
127 }
128 # undef FLATBUFFERS_SET
129 }
130
131 void SetAnyValueF(reflection::BaseType type, uint8_t *data, double val) {
132 switch (type) {
133 case reflection::Float: WriteScalar(data, static_cast<float>(val)); break;
134 case reflection::Double: WriteScalar(data, val); break;
135 // TODO: support strings.
136 default: SetAnyValueI(type, data, static_cast<int64_t>(val)); break;
137 }
138 }
139
140 void SetAnyValueS(reflection::BaseType type, uint8_t *data, const char *val) {
141 switch (type) {
142 case reflection::Float:
143 case reflection::Double: SetAnyValueF(type, data, strtod(val, nullptr));
144 // TODO: support strings.
145 default: SetAnyValueI(type, data, StringToInt(val)); break;
146 }
147 }
148
149 // Resize a FlatBuffer in-place by iterating through all offsets in the buffer
150 // and adjusting them by "delta" if they straddle the start offset.
151 // Once that is done, bytes can now be inserted/deleted safely.
152 // "delta" may be negative (shrinking).
153 // Unless "delta" is a multiple of the largest alignment, you'll create a small
154 // amount of garbage space in the buffer (usually 0..7 bytes).
155 // If your FlatBuffer's root table is not the schema's root table, you should
156 // pass in your root_table type as well.
157 class ResizeContext {
158 public:
159 ResizeContext(const reflection::Schema &schema, uoffset_t start, int delta,
160 std::vector<uint8_t> *flatbuf,
161 const reflection::Object *root_table = nullptr)
162 : schema_(schema), startptr_(flatbuf->data() + start),
163 delta_(delta), buf_(*flatbuf),
164 dag_check_(flatbuf->size() / sizeof(uoffset_t), false) {
165 auto mask = static_cast<int>(sizeof(largest_scalar_t) - 1);
166 delta_ = (delta_ + mask) & ~mask;
167 if (!delta_) return; // We can't shrink by less than largest_scalar_t.
168 // Now change all the offsets by delta_.
169 auto root = GetAnyRoot(buf_.data());
170 Straddle<uoffset_t, 1>(buf_.data(), root, buf_.data());
171 ResizeTable(root_table ? *root_table : *schema.root_table(), root);
172 // We can now add or remove bytes at start.
173 if (delta_ > 0) buf_.insert(buf_.begin() + start, delta_, 0);
174 else buf_.erase(buf_.begin() + start, buf_.begin() + start - delta_);
175 }
176
177 // Check if the range between first (lower address) and second straddles
178 // the insertion point. If it does, change the offset at offsetloc (of
179 // type T, with direction D).
180 template<typename T, int D> void Straddle(void *first, void *second,
181 void *offsetloc) {
182 if (first <= startptr_ && second >= startptr_) {
183 WriteScalar<T>(offsetloc, ReadScalar<T>(offsetloc) + delta_ * D);
184 DagCheck(offsetloc) = true;
185 }
186 }
187
188 // This returns a boolean that records if the corresponding offset location
189 // has been modified already. If so, we can't even read the corresponding
190 // offset, since it is pointing to a location that is illegal until the
191 // resize actually happens.
192 // This must be checked for every offset, since we can't know which offsets
193 // will straddle and which won't.
194 uint8_t &DagCheck(void *offsetloc) {
195 auto dag_idx = reinterpret_cast<uoffset_t *>(offsetloc) -
196 reinterpret_cast<uoffset_t *>(buf_.data());
197 return dag_check_[dag_idx];
198 }
199
200 void ResizeTable(const reflection::Object &objectdef, Table *table) {
201 if (DagCheck(table))
202 return; // Table already visited.
203 auto vtable = table->GetVTable();
204 // Check if the vtable offset points beyond the insertion point.
205 Straddle<soffset_t, -1>(table, vtable, table);
206 // This direction shouldn't happen because vtables that sit before tables
207 // are always directly adjacent, but check just in case we ever change the
208 // way flatbuffers are built.
209 Straddle<soffset_t, -1>(vtable, table, table);
210 // Early out: since all fields inside the table must point forwards in
211 // memory, if the insertion point is before the table we can stop here.
212 auto tableloc = reinterpret_cast<uint8_t *>(table);
213 if (startptr_ <= tableloc) return;
214 // Check each field.
215 auto fielddefs = objectdef.fields();
216 for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
217 auto &fielddef = **it;
218 auto base_type = fielddef.type()->base_type();
219 // Ignore scalars.
220 if (base_type <= reflection::Double) continue;
221 // Ignore fields that are not stored.
222 auto offset = table->GetOptionalFieldOffset(fielddef.offset());
223 if (!offset) continue;
224 // Ignore structs.
225 auto subobjectdef = base_type == reflection::Obj ?
226 schema_.objects()->Get(fielddef.type()->index()) : nullptr;
227 if (subobjectdef && subobjectdef->is_struct()) continue;
228 // Get this fields' offset, and read it if safe.
229 auto offsetloc = tableloc + offset;
230 if (DagCheck(offsetloc))
231 continue; // This offset already visited.
232 auto ref = offsetloc + ReadScalar<uoffset_t>(offsetloc);
233 Straddle<uoffset_t, 1>(offsetloc, ref, offsetloc);
234 // Recurse.
235 switch (base_type) {
236 case reflection::Obj: {
237 ResizeTable(*subobjectdef, reinterpret_cast<Table *>(ref));
238 break;
239 }
240 case reflection::Vector: {
241 auto elem_type = fielddef.type()->element();
242 if (elem_type != reflection::Obj && elem_type != reflection::String)
243 break;
244 auto vec = reinterpret_cast<Vector<uoffset_t> *>(ref);
245 auto elemobjectdef = elem_type == reflection::Obj
246 ? schema_.objects()->Get(fielddef.type()->index())
247 : nullptr;
248 if (elemobjectdef && elemobjectdef->is_struct()) break;
249 for (uoffset_t i = 0; i < vec->size(); i++) {
250 auto loc = vec->Data() + i * sizeof(uoffset_t);
251 if (DagCheck(loc))
252 continue; // This offset already visited.
253 auto dest = loc + vec->Get(i);
254 Straddle<uoffset_t, 1>(loc, dest ,loc);
255 if (elemobjectdef)
256 ResizeTable(*elemobjectdef, reinterpret_cast<Table *>(dest));
257 }
258 break;
259 }
260 case reflection::Union: {
261 ResizeTable(GetUnionType(schema_, objectdef, fielddef, *table),
262 reinterpret_cast<Table *>(ref));
263 break;
264 }
265 case reflection::String:
266 break;
267 default:
268 assert(false);
269 }
270 }
271 }
272
273 void operator=(const ResizeContext &rc);
274
275 private:
276 const reflection::Schema &schema_;
277 uint8_t *startptr_;
278 int delta_;
279 std::vector<uint8_t> &buf_;
280 std::vector<uint8_t> dag_check_;
281 };
282
283 void SetString(const reflection::Schema &schema, const std::string &val,
284 const String *str, std::vector<uint8_t> *flatbuf,
285 const reflection::Object *root_table) {
286 auto delta = static_cast<int>(val.size()) - static_cast<int>(str->Length());
287 auto start = static_cast<uoffset_t>(reinterpret_cast<const uint8_t *>(str) -
288 flatbuf->data() +
289 sizeof(uoffset_t));
290 if (delta) {
291 // Clear the old string, since we don't want parts of it remaining.
292 memset(flatbuf->data() + start, 0, str->Length());
293 // Different size, we must expand (or contract).
294 ResizeContext(schema, start, delta, flatbuf, root_table);
295 }
296 // Copy new data. Safe because we created the right amount of space.
297 memcpy(flatbuf->data() + start, val.c_str(), val.size() + 1);
298 }
299
300 uint8_t *ResizeAnyVector(const reflection::Schema &schema, uoffset_t newsize,
301 const VectorOfAny *vec, uoffset_t num_elems,
302 uoffset_t elem_size, std::vector<uint8_t> *flatbuf,
303 const reflection::Object *root_table) {
304 auto delta_elem = static_cast<int>(newsize) - static_cast<int>(num_elems);
305 auto delta_bytes = delta_elem * static_cast<int>(elem_size);
306 auto vec_start = reinterpret_cast<const uint8_t *>(vec) - flatbuf->data();
307 auto start = static_cast<uoffset_t>(vec_start + sizeof(uoffset_t) +
308 elem_size * num_elems);
309 if (delta_bytes) {
310 if (delta_elem < 0) {
311 // Clear elements we're throwing away, since some might remain in the
312 // buffer.
313 auto size_clear = -delta_elem * elem_size;
314 memset(flatbuf->data() + start - size_clear, 0, size_clear);
315 }
316 ResizeContext(schema, start, delta_bytes, flatbuf, root_table);
317 WriteScalar(flatbuf->data() + vec_start, newsize); // Length field.
318 // Set new elements to 0.. this can be overwritten by the caller.
319 if (delta_elem > 0) {
320 memset(flatbuf->data() + start, 0, delta_elem * elem_size);
321 }
322 }
323 return flatbuf->data() + start;
324 }
325
326 const uint8_t *AddFlatBuffer(std::vector<uint8_t> &flatbuf,
327 const uint8_t *newbuf, size_t newlen) {
328 // Align to sizeof(uoffset_t) past sizeof(largest_scalar_t) since we're
329 // going to chop off the root offset.
330 while ((flatbuf.size() & (sizeof(uoffset_t) - 1)) ||
331 !(flatbuf.size() & (sizeof(largest_scalar_t) - 1))) {
332 flatbuf.push_back(0);
333 }
334 auto insertion_point = static_cast<uoffset_t>(flatbuf.size());
335 // Insert the entire FlatBuffer minus the root pointer.
336 flatbuf.insert(flatbuf.end(), newbuf + sizeof(uoffset_t),
337 newbuf + newlen - sizeof(uoffset_t));
338 auto root_offset = ReadScalar<uoffset_t>(newbuf) - sizeof(uoffset_t);
339 return flatbuf.data() + insertion_point + root_offset;
340 }
341
342 void CopyInline(FlatBufferBuilder &fbb, const reflection::Field &fielddef,
343 const Table &table, size_t align, size_t size) {
344 fbb.Align(align);
345 fbb.PushBytes(table.GetStruct<const uint8_t *>(fielddef.offset()), size);
346 fbb.TrackField(fielddef.offset(), fbb.GetSize());
347 }
348
349 Offset<const Table *> CopyTable(FlatBufferBuilder &fbb,
350 const reflection::Schema &schema,
351 const reflection::Object &objectdef,
352 const Table &table) {
353 // Before we can construct the table, we have to first generate any
354 // subobjects, and collect their offsets.
355 std::vector<uoffset_t> offsets;
356 auto fielddefs = objectdef.fields();
357 for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
358 auto &fielddef = **it;
359 // Skip if field is not present in the source.
360 if (!table.CheckField(fielddef.offset())) continue;
361 uoffset_t offset = 0;
362 switch (fielddef.type()->base_type()) {
363 case reflection::String: {
364 offset = fbb.CreateString(GetFieldS(table, fielddef)).o;
365 break;
366 }
367 case reflection::Obj: {
368 auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index());
369 if (!subobjectdef.is_struct()) {
370 offset = CopyTable(fbb, schema, subobjectdef,
371 *GetFieldT(table, fielddef)).o;
372 }
373 break;
374 }
375 case reflection::Union: {
376 auto &subobjectdef = GetUnionType(schema, objectdef, fielddef, table);
377 offset = CopyTable(fbb, schema, subobjectdef,
378 *GetFieldT(table, fielddef)).o;
379 break;
380 }
381 case reflection::Vector: {
382 auto vec = table.GetPointer<const Vector<Offset<Table>> *>(
383 fielddef.offset());
384 auto element_base_type = fielddef.type()->element();
385 auto elemobjectdef = element_base_type == reflection::Obj
386 ? schema.objects()->Get(fielddef.type()->index())
387 : nullptr;
388 switch (element_base_type) {
389 case reflection::String: {
390 std::vector<Offset<const String *>> elements(vec->size());
391 auto vec_s = reinterpret_cast<const Vector<Offset<String>> *>(vec);
392 for (uoffset_t i = 0; i < vec_s->size(); i++) {
393 elements[i] = fbb.CreateString(vec_s->Get(i)).o;
394 }
395 offset = fbb.CreateVector(elements).o;
396 break;
397 }
398 case reflection::Obj: {
399 if (!elemobjectdef->is_struct()) {
400 std::vector<Offset<const Table *>> elements(vec->size());
401 for (uoffset_t i = 0; i < vec->size(); i++) {
402 elements[i] =
403 CopyTable(fbb, schema, *elemobjectdef, *vec->Get(i));
404 }
405 offset = fbb.CreateVector(elements).o;
406 break;
407 }
408 // FALL-THRU:
409 }
410 default: { // Scalars and structs.
411 auto element_size = GetTypeSize(element_base_type);
412 if (elemobjectdef && elemobjectdef->is_struct())
413 element_size = elemobjectdef->bytesize();
414 fbb.StartVector(element_size, vec->size());
415 fbb.PushBytes(vec->Data(), element_size * vec->size());
416 offset = fbb.EndVector(vec->size());
417 break;
418 }
419 }
420 break;
421 }
422 default: // Scalars.
423 break;
424 }
425 if (offset) {
426 offsets.push_back(offset);
427 }
428 }
429 // Now we can build the actual table from either offsets or scalar data.
430 auto start = objectdef.is_struct()
431 ? fbb.StartStruct(objectdef.minalign())
432 : fbb.StartTable();
433 size_t offset_idx = 0;
434 for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
435 auto &fielddef = **it;
436 if (!table.CheckField(fielddef.offset())) continue;
437 auto base_type = fielddef.type()->base_type();
438 switch (base_type) {
439 case reflection::Obj: {
440 auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index());
441 if (subobjectdef.is_struct()) {
442 CopyInline(fbb, fielddef, table, subobjectdef.minalign(),
443 subobjectdef.bytesize());
444 break;
445 }
446 // else: FALL-THRU:
447 }
448 case reflection::Union:
449 case reflection::String:
450 case reflection::Vector:
451 fbb.AddOffset(fielddef.offset(), Offset<void>(offsets[offset_idx++]));
452 break;
453 default: { // Scalars.
454 auto size = GetTypeSize(base_type);
455 CopyInline(fbb, fielddef, table, size, size);
456 break;
457 }
458 }
459 }
460 assert(offset_idx == offsets.size());
461 if (objectdef.is_struct()) {
462 fbb.ClearOffsets();
463 return fbb.EndStruct();
464 } else {
465 return fbb.EndTable(start, static_cast<voffset_t>(fielddefs->size()));
466 }
467 }
468
469 } // namespace flatbuffers
323323 TEST_EQ(hp_int64, 80);
324324 auto hp_double = flatbuffers::GetAnyFieldF(root, hp_field);
325325 TEST_EQ(hp_double, 80.0);
326 auto hp_string = flatbuffers::GetAnyFieldS(root, hp_field, schema);
326 auto hp_string = flatbuffers::GetAnyFieldS(root, hp_field, &schema);
327327 TEST_EQ_STR(hp_string.c_str(), "80");
328328
329329 // We can also modify it.