16 | 16 |
#ifndef FLATBUFFERS_REFLECTION_H_
|
17 | 17 |
#define FLATBUFFERS_REFLECTION_H_
|
18 | 18 |
|
19 | |
#include "flatbuffers/util.h"
|
20 | |
|
21 | 19 |
// This is somewhat of a circular dependency because flatc (and thus this
|
22 | 20 |
// file) is needed to generate this header in the first place.
|
23 | 21 |
// Should normally not be a problem since it can be generated by the
|
|
29 | 27 |
|
30 | 28 |
namespace flatbuffers {
|
31 | 29 |
|
|
30 |
// ------------------------- GETTERS -------------------------
|
|
31 |
|
|
32 |
// Size of a basic type, don't use with structs.
|
32 | 33 |
inline size_t GetTypeSize(reflection::BaseType base_type) {
|
33 | 34 |
// This needs to correspond to the BaseType enum.
|
34 | 35 |
static size_t sizes[] = { 0, 1, 1, 1, 1, 2, 2, 4, 4, 8, 8, 4, 8, 4, 4, 4, 4 };
|
35 | 36 |
return sizes[base_type];
|
|
37 |
}
|
|
38 |
|
|
39 |
// Same as above, but now correctly returns the size of a struct if
|
|
40 |
// the field (or vector element) is a struct.
|
|
41 |
inline size_t GetTypeSizeInline(reflection::BaseType base_type,
|
|
42 |
int type_index,
|
|
43 |
const reflection::Schema &schema) {
|
|
44 |
if (base_type == reflection::Obj &&
|
|
45 |
schema.objects()->Get(type_index)->is_struct()) {
|
|
46 |
return schema.objects()->Get(type_index)->bytesize();
|
|
47 |
} else {
|
|
48 |
return GetTypeSize(base_type);
|
|
49 |
}
|
36 | 50 |
}
|
37 | 51 |
|
38 | 52 |
// Get the root, regardless of what type it is.
|
|
74 | 88 |
return table.GetPointer<Vector<T> *>(field.offset());
|
75 | 89 |
}
|
76 | 90 |
|
|
91 |
// Get a field, if you know it's a vector, generically.
|
|
92 |
// To actually access elements, use the return value together with
|
|
93 |
// field.type()->element() in any of GetAnyVectorElemI below etc.
|
|
94 |
inline VectorOfAny *GetFieldAnyV(const Table &table,
|
|
95 |
const reflection::Field &field) {
|
|
96 |
return table.GetPointer<VectorOfAny *>(field.offset());
|
|
97 |
}
|
|
98 |
|
77 | 99 |
// Get a field, if you know it's a table.
|
78 | 100 |
inline Table *GetFieldT(const Table &table,
|
79 | 101 |
const reflection::Field &field) {
|
|
82 | 104 |
return table.GetPointer<Table *>(field.offset());
|
83 | 105 |
}
|
84 | 106 |
|
85 | |
// Get any field as a 64bit int, regardless of what it is (bool/int/float/str).
|
|
107 |
// Raw helper functions used below: get any value in memory as a 64bit int, a
|
|
108 |
// double or a string.
|
|
109 |
// All scalars get static_cast to an int64_t, strings use strtoull, every other
|
|
110 |
// data type returns 0.
|
|
111 |
int64_t GetAnyValueI(reflection::BaseType type, const uint8_t *data);
|
|
112 |
// All scalars static cast to double, strings use strtod, every other data
|
|
113 |
// type is 0.0.
|
|
114 |
double GetAnyValueF(reflection::BaseType type, const uint8_t *data);
|
|
115 |
// All scalars converted using stringstream, strings as-is, and all other
|
|
116 |
// data types provide some level of debug-pretty-printing.
|
|
117 |
std::string GetAnyValueS(reflection::BaseType type, const uint8_t *data,
|
|
118 |
const reflection::Schema *schema,
|
|
119 |
int type_index);
|
|
120 |
|
|
121 |
// Get any table field as a 64bit int, regardless of what type it is.
|
86 | 122 |
inline int64_t GetAnyFieldI(const Table &table,
|
87 | 123 |
const reflection::Field &field) {
|
88 | |
# define FLATBUFFERS_GET(C, T) \
|
89 | |
static_cast<int64_t>(GetField##C<T>(table, field))
|
90 | |
switch (field.type()->base_type()) {
|
91 | |
case reflection::UType:
|
92 | |
case reflection::Bool:
|
93 | |
case reflection::UByte: return FLATBUFFERS_GET(I, uint8_t);
|
94 | |
case reflection::Byte: return FLATBUFFERS_GET(I, int8_t);
|
95 | |
case reflection::Short: return FLATBUFFERS_GET(I, int16_t);
|
96 | |
case reflection::UShort: return FLATBUFFERS_GET(I, uint16_t);
|
97 | |
case reflection::Int: return FLATBUFFERS_GET(I, int32_t);
|
98 | |
case reflection::UInt: return FLATBUFFERS_GET(I, uint32_t);
|
99 | |
case reflection::Long: return FLATBUFFERS_GET(I, int64_t);
|
100 | |
case reflection::ULong: return FLATBUFFERS_GET(I, uint64_t);
|
101 | |
case reflection::Float: return FLATBUFFERS_GET(F, float);
|
102 | |
case reflection::Double: return FLATBUFFERS_GET(F, double);
|
103 | |
case reflection::String: {
|
104 | |
auto s = GetFieldS(table, field);
|
105 | |
return s ? StringToInt(s->c_str()) : 0;
|
106 | |
}
|
107 | |
default: return 0;
|
108 | |
}
|
109 | |
# undef FLATBUFFERS_GET
|
110 | |
}
|
111 | |
|
112 | |
// Get any field as a double, regardless of what it is (bool/int/float/str).
|
|
124 |
auto field_ptr = table.GetAddressOf(field.offset());
|
|
125 |
return field_ptr ? GetAnyValueI(field.type()->base_type(), field_ptr)
|
|
126 |
: field.default_integer();
|
|
127 |
}
|
|
128 |
|
|
129 |
// Get any table field as a double, regardless of what type it is.
|
113 | 130 |
inline double GetAnyFieldF(const Table &table,
|
114 | 131 |
const reflection::Field &field) {
|
115 | |
switch (field.type()->base_type()) {
|
116 | |
case reflection::Float: return GetFieldF<float>(table, field);
|
117 | |
case reflection::Double: return GetFieldF<double>(table, field);
|
118 | |
case reflection::String: {
|
119 | |
auto s = GetFieldS(table, field);
|
120 | |
return s ? strtod(s->c_str(), nullptr) : 0.0;
|
121 | |
}
|
122 | |
default: return static_cast<double>(GetAnyFieldI(table, field));
|
123 | |
}
|
124 | |
}
|
125 | |
|
126 | |
// Get any field as a string, regardless of what it is (bool/int/float/str).
|
|
132 |
auto field_ptr = table.GetAddressOf(field.offset());
|
|
133 |
return field_ptr ? GetAnyValueF(field.type()->base_type(), field_ptr)
|
|
134 |
: field.default_real();
|
|
135 |
}
|
|
136 |
|
|
137 |
|
|
138 |
// Get any table field as a string, regardless of what type it is.
|
|
139 |
// You may pass nullptr for the schema if you don't care to have fields that
|
|
140 |
// are of table type pretty-printed.
|
127 | 141 |
inline std::string GetAnyFieldS(const Table &table,
|
128 | 142 |
const reflection::Field &field,
|
129 | |
const reflection::Schema &schema) {
|
130 | |
switch (field.type()->base_type()) {
|
131 | |
case reflection::Float:
|
132 | |
case reflection::Double: return NumToString(GetAnyFieldF(table, field));
|
133 | |
case reflection::String: {
|
134 | |
auto s = GetFieldS(table, field);
|
135 | |
return s ? s->c_str() : "";
|
136 | |
}
|
137 | |
case reflection::Obj: {
|
138 | |
// Convert the table to a string. This is mostly for debugging purposes,
|
139 | |
// and does NOT promise to be JSON compliant.
|
140 | |
// Also prefixes the type.
|
141 | |
auto &objectdef = *schema.objects()->Get(field.type()->index());
|
142 | |
auto s = objectdef.name()->str();
|
143 | |
if (objectdef.is_struct()) {
|
144 | |
s += "(struct)"; // TODO: implement this as well.
|
145 | |
} else {
|
146 | |
auto table_field = GetFieldT(table, field);
|
147 | |
s += " { ";
|
148 | |
auto fielddefs = objectdef.fields();
|
149 | |
for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
|
150 | |
auto &fielddef = **it;
|
151 | |
if (!table.CheckField(fielddef.offset())) continue;
|
152 | |
auto val = GetAnyFieldS(*table_field, fielddef, schema);
|
153 | |
if (fielddef.type()->base_type() == reflection::String)
|
154 | |
val = "\"" + val + "\""; // Doesn't deal with escape codes etc.
|
155 | |
s += fielddef.name()->str();
|
156 | |
s += ": ";
|
157 | |
s += val;
|
158 | |
s += ", ";
|
159 | |
}
|
160 | |
s += "}";
|
161 | |
}
|
162 | |
return s;
|
163 | |
}
|
164 | |
case reflection::Vector:
|
165 | |
return "[(elements)]"; // TODO: implement this as well.
|
166 | |
case reflection::Union:
|
167 | |
return "(union)"; // TODO: implement this as well.
|
168 | |
default: return NumToString(GetAnyFieldI(table, field));
|
169 | |
}
|
170 | |
}
|
|
143 |
const reflection::Schema *schema) {
|
|
144 |
auto field_ptr = table.GetAddressOf(field.offset());
|
|
145 |
return field_ptr ? GetAnyValueS(field.type()->base_type(), field_ptr, schema,
|
|
146 |
field.type()->index())
|
|
147 |
: "";
|
|
148 |
}
|
|
149 |
|
|
150 |
// Get any struct field as a 64bit int, regardless of what type it is.
|
|
151 |
inline int64_t GetAnyFieldI(const Struct &st,
|
|
152 |
const reflection::Field &field) {
|
|
153 |
return GetAnyValueI(field.type()->base_type(),
|
|
154 |
st.GetAddressOf(field.offset()));
|
|
155 |
}
|
|
156 |
|
|
157 |
// Get any struct field as a double, regardless of what type it is.
|
|
158 |
inline double GetAnyFieldF(const Struct &st,
|
|
159 |
const reflection::Field &field) {
|
|
160 |
return GetAnyValueF(field.type()->base_type(),
|
|
161 |
st.GetAddressOf(field.offset()));
|
|
162 |
}
|
|
163 |
|
|
164 |
// Get any struct field as a string, regardless of what type it is.
|
|
165 |
inline std::string GetAnyFieldS(const Struct &st,
|
|
166 |
const reflection::Field &field) {
|
|
167 |
return GetAnyValueS(field.type()->base_type(),
|
|
168 |
st.GetAddressOf(field.offset()), nullptr, -1);
|
|
169 |
}
|
|
170 |
|
|
171 |
// Get any vector element as a 64bit int, regardless of what type it is.
|
|
172 |
inline int64_t GetAnyVectorElemI(const VectorOfAny *vec,
|
|
173 |
reflection::BaseType elem_type, size_t i) {
|
|
174 |
return GetAnyValueI(elem_type, vec->Data() + GetTypeSize(elem_type) * i);
|
|
175 |
}
|
|
176 |
|
|
177 |
// Get any vector element as a double, regardless of what type it is.
|
|
178 |
inline double GetAnyVectorElemF(const VectorOfAny *vec,
|
|
179 |
reflection::BaseType elem_type, size_t i) {
|
|
180 |
return GetAnyValueF(elem_type, vec->Data() + GetTypeSize(elem_type) * i);
|
|
181 |
}
|
|
182 |
|
|
183 |
// Get any vector element as a string, regardless of what type it is.
|
|
184 |
inline std::string GetAnyVectorElemS(const VectorOfAny *vec,
|
|
185 |
reflection::BaseType elem_type, size_t i) {
|
|
186 |
return GetAnyValueS(elem_type, vec->Data() + GetTypeSize(elem_type) * i,
|
|
187 |
nullptr, -1);
|
|
188 |
}
|
|
189 |
|
|
190 |
// Get a vector element that's a table/string/vector from a generic vector.
|
|
191 |
// Pass Table/String/VectorOfAny as template parameter.
|
|
192 |
// Warning: does no typechecking.
|
|
193 |
template<typename T> T *GetAnyVectorElemPointer(const VectorOfAny *vec,
|
|
194 |
size_t i) {
|
|
195 |
auto elem_ptr = vec->Data() + sizeof(uoffset_t) * i;
|
|
196 |
return (T *)(elem_ptr + ReadScalar<uoffset_t>(elem_ptr));
|
|
197 |
}
|
|
198 |
|
|
199 |
// Get the inline-address of a vector element. Useful for Structs (pass Struct
|
|
200 |
// as template arg), or being able to address a range of scalars in-line.
|
|
201 |
// Get elem_size from GetTypeSizeInline().
|
|
202 |
// Note: little-endian data on all platforms, use EndianScalar() instead of
|
|
203 |
// raw pointer access with scalars).
|
|
204 |
template<typename T> T *GetAnyVectorElemAddressOf(const VectorOfAny *vec,
|
|
205 |
size_t i,
|
|
206 |
size_t elem_size) {
|
|
207 |
// C-cast to allow const conversion.
|
|
208 |
return (T *)(vec->Data() + elem_size * i);
|
|
209 |
}
|
|
210 |
|
|
211 |
// Similarly, for elements of tables.
|
|
212 |
template<typename T> T *GetAnyFieldAddressOf(const Table &table,
|
|
213 |
const reflection::Field &field) {
|
|
214 |
return (T *)table.GetAddressOf(field.offset());
|
|
215 |
}
|
|
216 |
|
|
217 |
// Similarly, for elements of structs.
|
|
218 |
template<typename T> T *GetAnyFieldAddressOf(const Struct &st,
|
|
219 |
const reflection::Field &field) {
|
|
220 |
return (T *)st.GetAddressOf(field.offset());
|
|
221 |
}
|
|
222 |
|
|
223 |
// ------------------------- SETTERS -------------------------
|
171 | 224 |
|
172 | 225 |
// Set any scalar field, if you know its exact type.
|
173 | 226 |
template<typename T> bool SetField(Table *table, const reflection::Field &field,
|
|
176 | 229 |
return table->SetField(field.offset(), val);
|
177 | 230 |
}
|
178 | 231 |
|
179 | |
// Set any field as a 64bit int, regardless of what it is (bool/int/float/str).
|
180 | |
inline void SetAnyFieldI(Table *table, const reflection::Field &field,
|
|
232 |
// Raw helper functions used below: set any value in memory as a 64bit int, a
|
|
233 |
// double or a string.
|
|
234 |
// These work for all scalar values, but do nothing for other data types.
|
|
235 |
// To set a string, see SetString below.
|
|
236 |
void SetAnyValueI(reflection::BaseType type, uint8_t *data, int64_t val);
|
|
237 |
void SetAnyValueF(reflection::BaseType type, uint8_t *data, double val);
|
|
238 |
void SetAnyValueS(reflection::BaseType type, uint8_t *data, const char *val);
|
|
239 |
|
|
240 |
// Set any table field as a 64bit int, regardless of type what it is.
|
|
241 |
inline bool SetAnyFieldI(Table *table, const reflection::Field &field,
|
181 | 242 |
int64_t val) {
|
182 | |
# define FLATBUFFERS_SET(T) SetField<T>(table, field, static_cast<T>(val))
|
183 | |
switch (field.type()->base_type()) {
|
184 | |
case reflection::UType:
|
185 | |
case reflection::Bool:
|
186 | |
case reflection::UByte: FLATBUFFERS_SET(uint8_t ); break;
|
187 | |
case reflection::Byte: FLATBUFFERS_SET(int8_t ); break;
|
188 | |
case reflection::Short: FLATBUFFERS_SET(int16_t ); break;
|
189 | |
case reflection::UShort: FLATBUFFERS_SET(uint16_t ); break;
|
190 | |
case reflection::Int: FLATBUFFERS_SET(int32_t ); break;
|
191 | |
case reflection::UInt: FLATBUFFERS_SET(uint32_t ); break;
|
192 | |
case reflection::Long: FLATBUFFERS_SET(int64_t ); break;
|
193 | |
case reflection::ULong: FLATBUFFERS_SET(uint64_t ); break;
|
194 | |
case reflection::Float: FLATBUFFERS_SET(float ); break;
|
195 | |
case reflection::Double: FLATBUFFERS_SET(double ); break;
|
196 | |
// TODO: support strings
|
197 | |
default: break;
|
198 | |
}
|
199 | |
# undef FLATBUFFERS_SET
|
200 | |
}
|
201 | |
|
202 | |
// Set any field as a double, regardless of what it is (bool/int/float/str).
|
203 | |
inline void SetAnyFieldF(Table *table, const reflection::Field &field,
|
|
243 |
auto field_ptr = table->GetAddressOf(field.offset());
|
|
244 |
if (!field_ptr) return false;
|
|
245 |
SetAnyValueI(field.type()->base_type(), field_ptr, val);
|
|
246 |
return true;
|
|
247 |
}
|
|
248 |
|
|
249 |
// Set any table field as a double, regardless of what type it is.
|
|
250 |
inline bool SetAnyFieldF(Table *table, const reflection::Field &field,
|
204 | 251 |
double val) {
|
205 | |
switch (field.type()->base_type()) {
|
206 | |
case reflection::Float: SetField<float> (table, field,
|
207 | |
static_cast<float>(val)); break;
|
208 | |
case reflection::Double: SetField<double>(table, field, val); break;
|
209 | |
// TODO: support strings.
|
210 | |
default: SetAnyFieldI(table, field, static_cast<int64_t>(val)); break;
|
211 | |
}
|
212 | |
}
|
213 | |
|
214 | |
// Set any field as a string, regardless of what it is (bool/int/float/str).
|
215 | |
inline void SetAnyFieldS(Table *table, const reflection::Field &field,
|
|
252 |
auto field_ptr = table->GetAddressOf(field.offset());
|
|
253 |
if (!field_ptr) return false;
|
|
254 |
SetAnyValueF(field.type()->base_type(), field_ptr, val);
|
|
255 |
return true;
|
|
256 |
}
|
|
257 |
|
|
258 |
// Set any table field as a string, regardless of what type it is.
|
|
259 |
inline bool SetAnyFieldS(Table *table, const reflection::Field &field,
|
|
260 |
const char *val) {
|
|
261 |
auto field_ptr = table->GetAddressOf(field.offset());
|
|
262 |
if (!field_ptr) return false;
|
|
263 |
SetAnyValueS(field.type()->base_type(), field_ptr, val);
|
|
264 |
return true;
|
|
265 |
}
|
|
266 |
|
|
267 |
// Set any struct field as a 64bit int, regardless of type what it is.
|
|
268 |
inline void SetAnyFieldI(Struct *st, const reflection::Field &field,
|
|
269 |
int64_t val) {
|
|
270 |
SetAnyValueI(field.type()->base_type(), st->GetAddressOf(field.offset()),
|
|
271 |
val);
|
|
272 |
}
|
|
273 |
|
|
274 |
// Set any struct field as a double, regardless of type what it is.
|
|
275 |
inline void SetAnyFieldF(Struct *st, const reflection::Field &field,
|
|
276 |
double val) {
|
|
277 |
SetAnyValueF(field.type()->base_type(), st->GetAddressOf(field.offset()),
|
|
278 |
val);
|
|
279 |
}
|
|
280 |
|
|
281 |
// Set any struct field as a string, regardless of type what it is.
|
|
282 |
inline void SetAnyFieldS(Struct *st, const reflection::Field &field,
|
216 | 283 |
const char *val) {
|
217 | |
switch (field.type()->base_type()) {
|
218 | |
case reflection::Float:
|
219 | |
case reflection::Double: SetAnyFieldF(table, field, strtod(val, nullptr));
|
220 | |
// TODO: support strings.
|
221 | |
default: SetAnyFieldI(table, field, StringToInt(val)); break;
|
222 | |
}
|
223 | |
}
|
|
284 |
SetAnyValueS(field.type()->base_type(), st->GetAddressOf(field.offset()),
|
|
285 |
val);
|
|
286 |
}
|
|
287 |
|
|
288 |
// Set any vector element as a 64bit int, regardless of type what it is.
|
|
289 |
inline void SetAnyVectorElemI(VectorOfAny *vec, reflection::BaseType elem_type,
|
|
290 |
size_t i, int64_t val) {
|
|
291 |
SetAnyValueI(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val);
|
|
292 |
}
|
|
293 |
|
|
294 |
// Set any vector element as a double, regardless of type what it is.
|
|
295 |
inline void SetAnyVectorElemF(VectorOfAny *vec, reflection::BaseType elem_type,
|
|
296 |
size_t i, double val) {
|
|
297 |
SetAnyValueF(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val);
|
|
298 |
}
|
|
299 |
|
|
300 |
// Set any vector element as a string, regardless of type what it is.
|
|
301 |
inline void SetAnyVectorElemS(VectorOfAny *vec, reflection::BaseType elem_type,
|
|
302 |
size_t i, const char *val) {
|
|
303 |
SetAnyValueS(elem_type, vec->Data() + GetTypeSize(elem_type) * i, val);
|
|
304 |
}
|
|
305 |
|
|
306 |
|
|
307 |
// ------------------------- RESIZING SETTERS -------------------------
|
224 | 308 |
|
225 | 309 |
// "smart" pointer for use with resizing vectors: turns a pointer inside
|
226 | 310 |
// a vector into a relative offset, such that it is not affected by resizes.
|
|
264 | 348 |
return *enumval->object();
|
265 | 349 |
}
|
266 | 350 |
|
267 | |
// Resize a FlatBuffer in-place by iterating through all offsets in the buffer
|
268 | |
// and adjusting them by "delta" if they straddle the start offset.
|
269 | |
// Once that is done, bytes can now be inserted/deleted safely.
|
270 | |
// "delta" may be negative (shrinking).
|
271 | |
// Unless "delta" is a multiple of the largest alignment, you'll create a small
|
272 | |
// amount of garbage space in the buffer (usually 0..7 bytes).
|
273 | |
// If your FlatBuffer's root table is not the schema's root table, you should
|
274 | |
// pass in your root_table type as well.
|
275 | |
class ResizeContext {
|
276 | |
public:
|
277 | |
ResizeContext(const reflection::Schema &schema, uoffset_t start, int delta,
|
278 | |
std::vector<uint8_t> *flatbuf,
|
279 | |
const reflection::Object *root_table = nullptr)
|
280 | |
: schema_(schema), startptr_(flatbuf->data() + start),
|
281 | |
delta_(delta), buf_(*flatbuf),
|
282 | |
dag_check_(flatbuf->size() / sizeof(uoffset_t), false) {
|
283 | |
auto mask = static_cast<int>(sizeof(largest_scalar_t) - 1);
|
284 | |
delta_ = (delta_ + mask) & ~mask;
|
285 | |
if (!delta_) return; // We can't shrink by less than largest_scalar_t.
|
286 | |
// Now change all the offsets by delta_.
|
287 | |
auto root = GetAnyRoot(buf_.data());
|
288 | |
Straddle<uoffset_t, 1>(buf_.data(), root, buf_.data());
|
289 | |
ResizeTable(root_table ? *root_table : *schema.root_table(), root);
|
290 | |
// We can now add or remove bytes at start.
|
291 | |
if (delta_ > 0) buf_.insert(buf_.begin() + start, delta_, 0);
|
292 | |
else buf_.erase(buf_.begin() + start, buf_.begin() + start - delta_);
|
293 | |
}
|
294 | |
|
295 | |
// Check if the range between first (lower address) and second straddles
|
296 | |
// the insertion point. If it does, change the offset at offsetloc (of
|
297 | |
// type T, with direction D).
|
298 | |
template<typename T, int D> void Straddle(void *first, void *second,
|
299 | |
void *offsetloc) {
|
300 | |
if (first <= startptr_ && second >= startptr_) {
|
301 | |
WriteScalar<T>(offsetloc, ReadScalar<T>(offsetloc) + delta_ * D);
|
302 | |
DagCheck(offsetloc) = true;
|
303 | |
}
|
304 | |
}
|
305 | |
|
306 | |
// This returns a boolean that records if the corresponding offset location
|
307 | |
// has been modified already. If so, we can't even read the corresponding
|
308 | |
// offset, since it is pointing to a location that is illegal until the
|
309 | |
// resize actually happens.
|
310 | |
// This must be checked for every offset, since we can't know which offsets
|
311 | |
// will straddle and which won't.
|
312 | |
uint8_t &DagCheck(void *offsetloc) {
|
313 | |
auto dag_idx = reinterpret_cast<uoffset_t *>(offsetloc) -
|
314 | |
reinterpret_cast<uoffset_t *>(buf_.data());
|
315 | |
return dag_check_[dag_idx];
|
316 | |
}
|
317 | |
|
318 | |
void ResizeTable(const reflection::Object &objectdef, Table *table) {
|
319 | |
if (DagCheck(table))
|
320 | |
return; // Table already visited.
|
321 | |
auto vtable = table->GetVTable();
|
322 | |
// Check if the vtable offset points beyond the insertion point.
|
323 | |
Straddle<soffset_t, -1>(table, vtable, table);
|
324 | |
// This direction shouldn't happen because vtables that sit before tables
|
325 | |
// are always directly adjacent, but check just in case we ever change the
|
326 | |
// way flatbuffers are built.
|
327 | |
Straddle<soffset_t, -1>(vtable, table, table);
|
328 | |
// Early out: since all fields inside the table must point forwards in
|
329 | |
// memory, if the insertion point is before the table we can stop here.
|
330 | |
auto tableloc = reinterpret_cast<uint8_t *>(table);
|
331 | |
if (startptr_ <= tableloc) return;
|
332 | |
// Check each field.
|
333 | |
auto fielddefs = objectdef.fields();
|
334 | |
for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
|
335 | |
auto &fielddef = **it;
|
336 | |
auto base_type = fielddef.type()->base_type();
|
337 | |
// Ignore scalars.
|
338 | |
if (base_type <= reflection::Double) continue;
|
339 | |
// Ignore fields that are not stored.
|
340 | |
auto offset = table->GetOptionalFieldOffset(fielddef.offset());
|
341 | |
if (!offset) continue;
|
342 | |
// Ignore structs.
|
343 | |
auto subobjectdef = base_type == reflection::Obj ?
|
344 | |
schema_.objects()->Get(fielddef.type()->index()) : nullptr;
|
345 | |
if (subobjectdef && subobjectdef->is_struct()) continue;
|
346 | |
// Get this fields' offset, and read it if safe.
|
347 | |
auto offsetloc = tableloc + offset;
|
348 | |
if (DagCheck(offsetloc))
|
349 | |
continue; // This offset already visited.
|
350 | |
auto ref = offsetloc + ReadScalar<uoffset_t>(offsetloc);
|
351 | |
Straddle<uoffset_t, 1>(offsetloc, ref, offsetloc);
|
352 | |
// Recurse.
|
353 | |
switch (base_type) {
|
354 | |
case reflection::Obj: {
|
355 | |
ResizeTable(*subobjectdef, reinterpret_cast<Table *>(ref));
|
356 | |
break;
|
357 | |
}
|
358 | |
case reflection::Vector: {
|
359 | |
auto elem_type = fielddef.type()->element();
|
360 | |
if (elem_type != reflection::Obj && elem_type != reflection::String)
|
361 | |
break;
|
362 | |
auto vec = reinterpret_cast<Vector<uoffset_t> *>(ref);
|
363 | |
auto elemobjectdef = elem_type == reflection::Obj
|
364 | |
? schema_.objects()->Get(fielddef.type()->index())
|
365 | |
: nullptr;
|
366 | |
if (elemobjectdef && elemobjectdef->is_struct()) break;
|
367 | |
for (uoffset_t i = 0; i < vec->size(); i++) {
|
368 | |
auto loc = vec->Data() + i * sizeof(uoffset_t);
|
369 | |
if (DagCheck(loc))
|
370 | |
continue; // This offset already visited.
|
371 | |
auto dest = loc + vec->Get(i);
|
372 | |
Straddle<uoffset_t, 1>(loc, dest ,loc);
|
373 | |
if (elemobjectdef)
|
374 | |
ResizeTable(*elemobjectdef, reinterpret_cast<Table *>(dest));
|
375 | |
}
|
376 | |
break;
|
377 | |
}
|
378 | |
case reflection::Union: {
|
379 | |
ResizeTable(GetUnionType(schema_, objectdef, fielddef, *table),
|
380 | |
reinterpret_cast<Table *>(ref));
|
381 | |
break;
|
382 | |
}
|
383 | |
case reflection::String:
|
384 | |
break;
|
385 | |
default:
|
386 | |
assert(false);
|
387 | |
}
|
388 | |
}
|
389 | |
}
|
390 | |
|
391 | |
void operator=(const ResizeContext &rc);
|
392 | |
|
393 | |
private:
|
394 | |
const reflection::Schema &schema_;
|
395 | |
uint8_t *startptr_;
|
396 | |
int delta_;
|
397 | |
std::vector<uint8_t> &buf_;
|
398 | |
std::vector<uint8_t> dag_check_;
|
399 | |
};
|
400 | |
|
401 | 351 |
// Changes the contents of a string inside a FlatBuffer. FlatBuffer must
|
402 | 352 |
// live inside a std::vector so we can resize the buffer if needed.
|
403 | 353 |
// "str" must live inside "flatbuf" and may be invalidated after this call.
|
404 | 354 |
// If your FlatBuffer's root table is not the schema's root table, you should
|
405 | 355 |
// pass in your root_table type as well.
|
406 | |
inline void SetString(const reflection::Schema &schema, const std::string &val,
|
407 | |
const String *str, std::vector<uint8_t> *flatbuf,
|
408 | |
const reflection::Object *root_table = nullptr) {
|
409 | |
auto delta = static_cast<int>(val.size()) - static_cast<int>(str->Length());
|
410 | |
auto start = static_cast<uoffset_t>(reinterpret_cast<const uint8_t *>(str) -
|
411 | |
flatbuf->data() +
|
412 | |
sizeof(uoffset_t));
|
413 | |
if (delta) {
|
414 | |
// Clear the old string, since we don't want parts of it remaining.
|
415 | |
memset(flatbuf->data() + start, 0, str->Length());
|
416 | |
// Different size, we must expand (or contract).
|
417 | |
ResizeContext(schema, start, delta, flatbuf, root_table);
|
418 | |
}
|
419 | |
// Copy new data. Safe because we created the right amount of space.
|
420 | |
memcpy(flatbuf->data() + start, val.c_str(), val.size() + 1);
|
421 | |
}
|
|
356 |
void SetString(const reflection::Schema &schema, const std::string &val,
|
|
357 |
const String *str, std::vector<uint8_t> *flatbuf,
|
|
358 |
const reflection::Object *root_table = nullptr);
|
422 | 359 |
|
423 | 360 |
// Resizes a flatbuffers::Vector inside a FlatBuffer. FlatBuffer must
|
424 | 361 |
// live inside a std::vector so we can resize the buffer if needed.
|
425 | 362 |
// "vec" must live inside "flatbuf" and may be invalidated after this call.
|
426 | 363 |
// If your FlatBuffer's root table is not the schema's root table, you should
|
427 | 364 |
// pass in your root_table type as well.
|
|
365 |
uint8_t *ResizeAnyVector(const reflection::Schema &schema, uoffset_t newsize,
|
|
366 |
const VectorOfAny *vec, uoffset_t num_elems,
|
|
367 |
uoffset_t elem_size, std::vector<uint8_t> *flatbuf,
|
|
368 |
const reflection::Object *root_table = nullptr);
|
|
369 |
|
428 | 370 |
template <typename T>
|
429 | 371 |
void ResizeVector(const reflection::Schema &schema, uoffset_t newsize, T val,
|
430 | 372 |
const Vector<T> *vec, std::vector<uint8_t> *flatbuf,
|
431 | 373 |
const reflection::Object *root_table = nullptr) {
|
432 | 374 |
auto delta_elem = static_cast<int>(newsize) - static_cast<int>(vec->size());
|
433 | |
auto delta_bytes = delta_elem * static_cast<int>(sizeof(T));
|
434 | |
auto vec_start = reinterpret_cast<const uint8_t *>(vec) - flatbuf->data();
|
435 | |
auto start = static_cast<uoffset_t>(vec_start + sizeof(uoffset_t) +
|
436 | |
sizeof(T) * vec->size());
|
437 | |
if (delta_bytes) {
|
438 | |
if (delta_elem < 0) {
|
439 | |
// Clear elements we're throwing away, since some might remain in the
|
440 | |
// buffer.
|
441 | |
memset(flatbuf->data() + start + delta_elem * sizeof(T), 0,
|
442 | |
-delta_elem * sizeof(T));
|
443 | |
}
|
444 | |
ResizeContext(schema, start, delta_bytes, flatbuf, root_table);
|
445 | |
WriteScalar(flatbuf->data() + vec_start, newsize); // Length field.
|
446 | |
// Set new elements to "val".
|
447 | |
for (int i = 0; i < delta_elem; i++) {
|
448 | |
auto loc = flatbuf->data() + start + i * sizeof(T);
|
449 | |
auto is_scalar = std::is_scalar<T>::value;
|
450 | |
if (is_scalar) {
|
451 | |
WriteScalar(loc, val);
|
452 | |
} else { // struct
|
453 | |
*reinterpret_cast<T *>(loc) = val;
|
454 | |
}
|
|
375 |
auto newelems = ResizeAnyVector(schema, newsize,
|
|
376 |
reinterpret_cast<const VectorOfAny *>(vec),
|
|
377 |
vec->size(),
|
|
378 |
static_cast<uoffset_t>(sizeof(T)), flatbuf,
|
|
379 |
root_table);
|
|
380 |
// Set new elements to "val".
|
|
381 |
for (int i = 0; i < delta_elem; i++) {
|
|
382 |
auto loc = newelems + i * sizeof(T);
|
|
383 |
auto is_scalar = std::is_scalar<T>::value;
|
|
384 |
if (is_scalar) {
|
|
385 |
WriteScalar(loc, val);
|
|
386 |
} else { // struct
|
|
387 |
*reinterpret_cast<T *>(loc) = val;
|
455 | 388 |
}
|
456 | 389 |
}
|
457 | 390 |
}
|
|
464 | 397 |
// existing one.
|
465 | 398 |
// The return value can now be set using Vector::MutateOffset or SetFieldT
|
466 | 399 |
// below.
|
467 | |
inline const uint8_t *AddFlatBuffer(std::vector<uint8_t> &flatbuf,
|
468 | |
const uint8_t *newbuf, size_t newlen) {
|
469 | |
// Align to sizeof(uoffset_t) past sizeof(largest_scalar_t) since we're
|
470 | |
// going to chop off the root offset.
|
471 | |
while ((flatbuf.size() & (sizeof(uoffset_t) - 1)) ||
|
472 | |
!(flatbuf.size() & (sizeof(largest_scalar_t) - 1))) {
|
473 | |
flatbuf.push_back(0);
|
474 | |
}
|
475 | |
auto insertion_point = static_cast<uoffset_t>(flatbuf.size());
|
476 | |
// Insert the entire FlatBuffer minus the root pointer.
|
477 | |
flatbuf.insert(flatbuf.end(), newbuf + sizeof(uoffset_t),
|
478 | |
newbuf + newlen - sizeof(uoffset_t));
|
479 | |
auto root_offset = ReadScalar<uoffset_t>(newbuf) - sizeof(uoffset_t);
|
480 | |
return flatbuf.data() + insertion_point + root_offset;
|
481 | |
}
|
|
400 |
const uint8_t *AddFlatBuffer(std::vector<uint8_t> &flatbuf,
|
|
401 |
const uint8_t *newbuf, size_t newlen);
|
482 | 402 |
|
483 | 403 |
inline bool SetFieldT(Table *table, const reflection::Field &field,
|
484 | 404 |
const uint8_t *val) {
|
485 | 405 |
assert(sizeof(uoffset_t) == GetTypeSize(field.type()->base_type()));
|
486 | 406 |
return table->SetPointer(field.offset(), val);
|
487 | 407 |
}
|
|
408 |
|
|
409 |
// ------------------------- COPYING -------------------------
|
488 | 410 |
|
489 | 411 |
// Generic copying of tables from a FlatBuffer into a FlatBuffer builder.
|
490 | 412 |
// Can be used to do any kind of merging/selecting you may want to do out
|
|
494 | 416 |
// Note: this does not deal with DAGs correctly. If the table passed forms a
|
495 | 417 |
// DAG, the copy will be a tree instead (with duplicates).
|
496 | 418 |
|
497 | |
inline void CopyInline(FlatBufferBuilder &fbb,
|
498 | |
const reflection::Field &fielddef,
|
499 | |
const Table &table,
|
500 | |
size_t align, size_t size) {
|
501 | |
fbb.Align(align);
|
502 | |
fbb.PushBytes(table.GetStruct<const uint8_t *>(fielddef.offset()), size);
|
503 | |
fbb.TrackField(fielddef.offset(), fbb.GetSize());
|
504 | |
}
|
505 | |
|
506 | |
inline Offset<const Table *> CopyTable(FlatBufferBuilder &fbb,
|
507 | |
const reflection::Schema &schema,
|
508 | |
const reflection::Object &objectdef,
|
509 | |
const Table &table) {
|
510 | |
// Before we can construct the table, we have to first generate any
|
511 | |
// subobjects, and collect their offsets.
|
512 | |
std::vector<uoffset_t> offsets;
|
513 | |
auto fielddefs = objectdef.fields();
|
514 | |
for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
|
515 | |
auto &fielddef = **it;
|
516 | |
// Skip if field is not present in the source.
|
517 | |
if (!table.CheckField(fielddef.offset())) continue;
|
518 | |
uoffset_t offset = 0;
|
519 | |
switch (fielddef.type()->base_type()) {
|
520 | |
case reflection::String: {
|
521 | |
offset = fbb.CreateString(GetFieldS(table, fielddef)).o;
|
522 | |
break;
|
523 | |
}
|
524 | |
case reflection::Obj: {
|
525 | |
auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index());
|
526 | |
if (!subobjectdef.is_struct()) {
|
527 | |
offset = CopyTable(fbb, schema, subobjectdef,
|
528 | |
*GetFieldT(table, fielddef)).o;
|
529 | |
}
|
530 | |
break;
|
531 | |
}
|
532 | |
case reflection::Union: {
|
533 | |
auto &subobjectdef = GetUnionType(schema, objectdef, fielddef, table);
|
534 | |
offset = CopyTable(fbb, schema, subobjectdef,
|
535 | |
*GetFieldT(table, fielddef)).o;
|
536 | |
break;
|
537 | |
}
|
538 | |
case reflection::Vector: {
|
539 | |
auto vec = table.GetPointer<const Vector<Offset<Table>> *>(
|
540 | |
fielddef.offset());
|
541 | |
auto element_base_type = fielddef.type()->element();
|
542 | |
auto elemobjectdef = element_base_type == reflection::Obj
|
543 | |
? schema.objects()->Get(fielddef.type()->index())
|
544 | |
: nullptr;
|
545 | |
switch (element_base_type) {
|
546 | |
case reflection::String: {
|
547 | |
std::vector<Offset<const String *>> elements(vec->size());
|
548 | |
auto vec_s = reinterpret_cast<const Vector<Offset<String>> *>(vec);
|
549 | |
for (uoffset_t i = 0; i < vec_s->size(); i++) {
|
550 | |
elements[i] = fbb.CreateString(vec_s->Get(i)).o;
|
551 | |
}
|
552 | |
offset = fbb.CreateVector(elements).o;
|
553 | |
break;
|
554 | |
}
|
555 | |
case reflection::Obj: {
|
556 | |
if (!elemobjectdef->is_struct()) {
|
557 | |
std::vector<Offset<const Table *>> elements(vec->size());
|
558 | |
for (uoffset_t i = 0; i < vec->size(); i++) {
|
559 | |
elements[i] =
|
560 | |
CopyTable(fbb, schema, *elemobjectdef, *vec->Get(i));
|
561 | |
}
|
562 | |
offset = fbb.CreateVector(elements).o;
|
563 | |
break;
|
564 | |
}
|
565 | |
// FALL-THRU:
|
566 | |
}
|
567 | |
default: { // Scalars and structs.
|
568 | |
auto element_size = GetTypeSize(element_base_type);
|
569 | |
if (elemobjectdef && elemobjectdef->is_struct())
|
570 | |
element_size = elemobjectdef->bytesize();
|
571 | |
fbb.StartVector(element_size, vec->size());
|
572 | |
fbb.PushBytes(vec->Data(), element_size * vec->size());
|
573 | |
offset = fbb.EndVector(vec->size());
|
574 | |
break;
|
575 | |
}
|
576 | |
}
|
577 | |
break;
|
578 | |
}
|
579 | |
default: // Scalars.
|
580 | |
break;
|
581 | |
}
|
582 | |
if (offset) {
|
583 | |
offsets.push_back(offset);
|
584 | |
}
|
585 | |
}
|
586 | |
// Now we can build the actual table from either offsets or scalar data.
|
587 | |
auto start = objectdef.is_struct()
|
588 | |
? fbb.StartStruct(objectdef.minalign())
|
589 | |
: fbb.StartTable();
|
590 | |
size_t offset_idx = 0;
|
591 | |
for (auto it = fielddefs->begin(); it != fielddefs->end(); ++it) {
|
592 | |
auto &fielddef = **it;
|
593 | |
if (!table.CheckField(fielddef.offset())) continue;
|
594 | |
auto base_type = fielddef.type()->base_type();
|
595 | |
switch (base_type) {
|
596 | |
case reflection::Obj: {
|
597 | |
auto &subobjectdef = *schema.objects()->Get(fielddef.type()->index());
|
598 | |
if (subobjectdef.is_struct()) {
|
599 | |
CopyInline(fbb, fielddef, table, subobjectdef.minalign(),
|
600 | |
subobjectdef.bytesize());
|
601 | |
break;
|
602 | |
}
|
603 | |
// else: FALL-THRU:
|
604 | |
}
|
605 | |
case reflection::Union:
|
606 | |
case reflection::String:
|
607 | |
case reflection::Vector:
|
608 | |
fbb.AddOffset(fielddef.offset(), Offset<void>(offsets[offset_idx++]));
|
609 | |
break;
|
610 | |
default: { // Scalars.
|
611 | |
auto size = GetTypeSize(base_type);
|
612 | |
CopyInline(fbb, fielddef, table, size, size);
|
613 | |
break;
|
614 | |
}
|
615 | |
}
|
616 | |
}
|
617 | |
assert(offset_idx == offsets.size());
|
618 | |
if (objectdef.is_struct()) {
|
619 | |
fbb.ClearOffsets();
|
620 | |
return fbb.EndStruct();
|
621 | |
} else {
|
622 | |
return fbb.EndTable(start, static_cast<voffset_t>(fielddefs->size()));
|
623 | |
}
|
624 | |
}
|
|
419 |
Offset<const Table *> CopyTable(FlatBufferBuilder &fbb,
|
|
420 |
const reflection::Schema &schema,
|
|
421 |
const reflection::Object &objectdef,
|
|
422 |
const Table &table);
|
625 | 423 |
|
626 | 424 |
} // namespace flatbuffers
|
627 | 425 |
|