Update FlatBuffers source code to 23.5.9

This commit is contained in:
Dmitry Kurtaev 2023-05-10 14:39:36 +03:00
parent 8baabdfb57
commit 676afdc494
13 changed files with 653 additions and 291 deletions

View File

@ -1 +1 @@
Origin: https://github.com/google/flatbuffers/tree/v23.1.21
Origin: https://github.com/google/flatbuffers/tree/v23.5.9

View File

@ -17,6 +17,7 @@
#ifndef FLATBUFFERS_ARRAY_H_
#define FLATBUFFERS_ARRAY_H_
#include <cstdint>
#include <memory>
#include "flatbuffers/base.h"
@ -37,7 +38,7 @@ template<typename T, uint16_t length> class Array {
public:
typedef uint16_t size_type;
typedef typename IndirectHelper<IndirectHelperType>::return_type return_type;
typedef VectorConstIterator<T, return_type> const_iterator;
typedef VectorConstIterator<T, return_type, uoffset_t> const_iterator;
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
// If T is a LE-scalar or a struct (!scalar_tag::value).
@ -158,11 +159,13 @@ template<typename T, uint16_t length> class Array {
// Specialization for Array[struct] with access using Offset<void> pointer.
// This specialization used by idl_gen_text.cpp.
template<typename T, uint16_t length> class Array<Offset<T>, length> {
template<typename T, uint16_t length, template<typename> class OffsetT>
class Array<OffsetT<T>, length> {
static_assert(flatbuffers::is_same<T, void>::value, "unexpected type T");
public:
typedef const void *return_type;
typedef uint16_t size_type;
const uint8_t *Data() const { return data_; }

View File

@ -43,6 +43,7 @@
#include <vector>
#include <set>
#include <algorithm>
#include <limits>
#include <iterator>
#include <memory>
@ -139,8 +140,8 @@
#endif // !defined(FLATBUFFERS_LITTLEENDIAN)
#define FLATBUFFERS_VERSION_MAJOR 23
#define FLATBUFFERS_VERSION_MINOR 1
#define FLATBUFFERS_VERSION_REVISION 21
#define FLATBUFFERS_VERSION_MINOR 5
#define FLATBUFFERS_VERSION_REVISION 9
#define FLATBUFFERS_STRING_EXPAND(X) #X
#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X)
namespace flatbuffers {
@ -233,13 +234,18 @@ namespace flatbuffers {
}
#define FLATBUFFERS_HAS_STRING_VIEW 1
// Check for absl::string_view
#elif __has_include("absl/strings/string_view.h")
#elif __has_include("absl/strings/string_view.h") && \
__has_include("absl/base/config.h") && \
(__cplusplus >= 201411)
#include "absl/base/config.h"
#if !defined(ABSL_USES_STD_STRING_VIEW)
#include "absl/strings/string_view.h"
namespace flatbuffers {
typedef absl::string_view string_view;
}
#define FLATBUFFERS_HAS_STRING_VIEW 1
#endif
#endif
#endif // __has_include
#endif // !FLATBUFFERS_HAS_STRING_VIEW
@ -318,9 +324,11 @@ namespace flatbuffers {
// Also, using a consistent offset type maintains compatibility of serialized
// offset values between 32bit and 64bit systems.
typedef uint32_t uoffset_t;
typedef uint64_t uoffset64_t;
// Signed offsets for references that can go in both directions.
typedef int32_t soffset_t;
typedef int64_t soffset64_t;
// Offset/index used in v-tables, can be changed to uint8_t in
// format forks to save a bit of space if desired.
@ -329,7 +337,8 @@ typedef uint16_t voffset_t;
typedef uintmax_t largest_scalar_t;
// In 32bits, this evaluates to 2GB - 1
#define FLATBUFFERS_MAX_BUFFER_SIZE ((1ULL << (sizeof(::flatbuffers::soffset_t) * 8 - 1)) - 1)
#define FLATBUFFERS_MAX_BUFFER_SIZE std::numeric_limits<::flatbuffers::soffset_t>::max()
#define FLATBUFFERS_MAX_64_BUFFER_SIZE std::numeric_limits<::flatbuffers::soffset64_t>::max()
// The minimum size buffer that can be a valid flatbuffer.
// Includes the offset to the root table (uoffset_t), the offset to the vtable

View File

@ -25,14 +25,33 @@ namespace flatbuffers {
// Wrapper for uoffset_t to allow safe template specialization.
// Value is allowed to be 0 to indicate a null object (see e.g. AddOffset).
template<typename T> struct Offset {
uoffset_t o;
template<typename T = void> struct Offset {
// The type of offset to use.
typedef uoffset_t offset_type;
offset_type o;
Offset() : o(0) {}
Offset(uoffset_t _o) : o(_o) {}
Offset<void> Union() const { return Offset<void>(o); }
Offset(const offset_type _o) : o(_o) {}
Offset<> Union() const { return o; }
bool IsNull() const { return !o; }
};
// Wrapper for uoffset64_t Offsets.
template<typename T = void> struct Offset64 {
// The type of offset to use.
typedef uoffset64_t offset_type;
offset_type o;
Offset64() : o(0) {}
Offset64(const offset_type offset) : o(offset) {}
Offset64<> Union() const { return o; }
bool IsNull() const { return !o; }
};
// Litmus check for ensuring the Offsets are the expected size.
static_assert(sizeof(Offset<>) == 4, "Offset has wrong size");
static_assert(sizeof(Offset64<>) == 8, "Offset64 has wrong size");
inline void EndianCheck() {
int endiantest = 1;
// If this fails, see FLATBUFFERS_LITTLEENDIAN above.
@ -75,35 +94,59 @@ template<typename T> struct IndirectHelper {
typedef T return_type;
typedef T mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *p, uoffset_t i) {
static return_type Read(const uint8_t *p, const size_t i) {
return EndianScalar((reinterpret_cast<const T *>(p))[i]);
}
static return_type Read(uint8_t *p, uoffset_t i) {
return Read(const_cast<const uint8_t *>(p), i);
static mutable_return_type Read(uint8_t *p, const size_t i) {
return reinterpret_cast<mutable_return_type>(
Read(const_cast<const uint8_t *>(p), i));
}
};
template<typename T> struct IndirectHelper<Offset<T>> {
// For vector of Offsets.
template<typename T, template<typename> class OffsetT>
struct IndirectHelper<OffsetT<T>> {
typedef const T *return_type;
typedef T *mutable_return_type;
static const size_t element_stride = sizeof(uoffset_t);
static return_type Read(const uint8_t *p, uoffset_t i) {
p += i * sizeof(uoffset_t);
return reinterpret_cast<return_type>(p + ReadScalar<uoffset_t>(p));
typedef typename OffsetT<T>::offset_type offset_type;
static const offset_type element_stride = sizeof(offset_type);
static return_type Read(const uint8_t *const p, const offset_type i) {
// Offsets are relative to themselves, so first update the pointer to
// point to the offset location.
const uint8_t *const offset_location = p + i * element_stride;
// Then read the scalar value of the offset (which may be 32 or 64-bits) and
// then determine the relative location from the offset location.
return reinterpret_cast<return_type>(
offset_location + ReadScalar<offset_type>(offset_location));
}
static mutable_return_type Read(uint8_t *p, uoffset_t i) {
p += i * sizeof(uoffset_t);
return reinterpret_cast<mutable_return_type>(p + ReadScalar<uoffset_t>(p));
static mutable_return_type Read(uint8_t *const p, const offset_type i) {
// Offsets are relative to themselves, so first update the pointer to
// point to the offset location.
uint8_t *const offset_location = p + i * element_stride;
// Then read the scalar value of the offset (which may be 32 or 64-bits) and
// then determine the relative location from the offset location.
return reinterpret_cast<mutable_return_type>(
offset_location + ReadScalar<offset_type>(offset_location));
}
};
// For vector of structs.
template<typename T> struct IndirectHelper<const T *> {
typedef const T *return_type;
typedef T *mutable_return_type;
static const size_t element_stride = sizeof(T);
static return_type Read(const uint8_t *p, uoffset_t i) {
return reinterpret_cast<return_type>(p + i * sizeof(T));
static return_type Read(const uint8_t *const p, const size_t i) {
// Structs are stored inline, relative to the first struct pointer.
return reinterpret_cast<return_type>(p + i * element_stride);
}
static mutable_return_type Read(uint8_t *p, uoffset_t i) {
return reinterpret_cast<mutable_return_type>(p + i * sizeof(T));
static mutable_return_type Read(uint8_t *const p, const size_t i) {
// Structs are stored inline, relative to the first struct pointer.
return reinterpret_cast<mutable_return_type>(p + i * element_stride);
}
};
@ -130,23 +173,25 @@ inline bool BufferHasIdentifier(const void *buf, const char *identifier,
/// @cond FLATBUFFERS_INTERNAL
// Helpers to get a typed pointer to the root object contained in the buffer.
template<typename T> T *GetMutableRoot(void *buf) {
if (!buf) return nullptr;
EndianCheck();
return reinterpret_cast<T *>(
reinterpret_cast<uint8_t *>(buf) +
EndianScalar(*reinterpret_cast<uoffset_t *>(buf)));
}
template<typename T> T *GetMutableSizePrefixedRoot(void *buf) {
return GetMutableRoot<T>(reinterpret_cast<uint8_t *>(buf) +
sizeof(uoffset_t));
template<typename T, typename SizeT = uoffset_t>
T *GetMutableSizePrefixedRoot(void *buf) {
return GetMutableRoot<T>(reinterpret_cast<uint8_t *>(buf) + sizeof(SizeT));
}
template<typename T> const T *GetRoot(const void *buf) {
return GetMutableRoot<T>(const_cast<void *>(buf));
}
template<typename T> const T *GetSizePrefixedRoot(const void *buf) {
return GetRoot<T>(reinterpret_cast<const uint8_t *>(buf) + sizeof(uoffset_t));
template<typename T, typename SizeT = uoffset_t>
const T *GetSizePrefixedRoot(const void *buf) {
return GetRoot<T>(reinterpret_cast<const uint8_t *>(buf) + sizeof(SizeT));
}
} // namespace flatbuffers

View File

@ -18,12 +18,15 @@
#define FLATBUFFERS_FLATBUFFER_BUILDER_H_
#include <algorithm>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <type_traits>
#include "flatbuffers/allocator.h"
#include "flatbuffers/array.h"
#include "flatbuffers/base.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/buffer_ref.h"
#include "flatbuffers/default_allocator.h"
#include "flatbuffers/detached_buffer.h"
@ -40,8 +43,9 @@ namespace flatbuffers {
// Converts a Field ID to a virtual table offset.
inline voffset_t FieldIndexToOffset(voffset_t field_id) {
// Should correspond to what EndTable() below builds up.
const int fixed_fields = 2; // Vtable size and Object Size.
return static_cast<voffset_t>((field_id + fixed_fields) * sizeof(voffset_t));
const voffset_t fixed_fields =
2 * sizeof(voffset_t); // Vtable size and Object Size.
return fixed_fields + field_id * sizeof(voffset_t);
}
template<typename T, typename Alloc = std::allocator<T>>
@ -68,8 +72,13 @@ T *data(std::vector<T, Alloc> &v) {
/// `PushElement`/`AddElement`/`EndTable`, or the builtin `CreateString`/
/// `CreateVector` functions. Do this is depth-first order to build up a tree to
/// the root. `Finish()` wraps up the buffer ready for transport.
class FlatBufferBuilder {
template<bool Is64Aware = false> class FlatBufferBuilderImpl {
public:
// This switches the size type of the builder, based on if its 64-bit aware
// (uoffset64_t) or not (uoffset_t).
typedef
typename std::conditional<Is64Aware, uoffset64_t, uoffset_t>::type SizeT;
/// @brief Default constructor for FlatBufferBuilder.
/// @param[in] initial_size The initial size of the buffer, in bytes. Defaults
/// to `1024`.
@ -81,13 +90,16 @@ class FlatBufferBuilder {
/// minimum alignment upon reallocation. Only needed if you intend to store
/// types with custom alignment AND you wish to read the buffer in-place
/// directly after creation.
explicit FlatBufferBuilder(
explicit FlatBufferBuilderImpl(
size_t initial_size = 1024, Allocator *allocator = nullptr,
bool own_allocator = false,
size_t buffer_minalign = AlignOf<largest_scalar_t>())
: buf_(initial_size, allocator, own_allocator, buffer_minalign),
: buf_(initial_size, allocator, own_allocator, buffer_minalign,
static_cast<SizeT>(Is64Aware ? FLATBUFFERS_MAX_64_BUFFER_SIZE
: FLATBUFFERS_MAX_BUFFER_SIZE)),
num_field_loc(0),
max_voffset_(0),
length_of_64_bit_region_(0),
nested(false),
finished(false),
minalign_(1),
@ -98,10 +110,13 @@ class FlatBufferBuilder {
}
/// @brief Move constructor for FlatBufferBuilder.
FlatBufferBuilder(FlatBufferBuilder &&other) noexcept
: buf_(1024, nullptr, false, AlignOf<largest_scalar_t>()),
FlatBufferBuilderImpl(FlatBufferBuilderImpl &&other) noexcept
: buf_(1024, nullptr, false, AlignOf<largest_scalar_t>(),
static_cast<SizeT>(Is64Aware ? FLATBUFFERS_MAX_64_BUFFER_SIZE
: FLATBUFFERS_MAX_BUFFER_SIZE)),
num_field_loc(0),
max_voffset_(0),
length_of_64_bit_region_(0),
nested(false),
finished(false),
minalign_(1),
@ -116,18 +131,19 @@ class FlatBufferBuilder {
}
/// @brief Move assignment operator for FlatBufferBuilder.
FlatBufferBuilder &operator=(FlatBufferBuilder &&other) noexcept {
FlatBufferBuilderImpl &operator=(FlatBufferBuilderImpl &&other) noexcept {
// Move construct a temporary and swap idiom
FlatBufferBuilder temp(std::move(other));
FlatBufferBuilderImpl temp(std::move(other));
Swap(temp);
return *this;
}
void Swap(FlatBufferBuilder &other) {
void Swap(FlatBufferBuilderImpl &other) {
using std::swap;
buf_.swap(other.buf_);
swap(num_field_loc, other.num_field_loc);
swap(max_voffset_, other.max_voffset_);
swap(length_of_64_bit_region_, other.length_of_64_bit_region_);
swap(nested, other.nested);
swap(finished, other.finished);
swap(minalign_, other.minalign_);
@ -136,7 +152,7 @@ class FlatBufferBuilder {
swap(string_pool, other.string_pool);
}
~FlatBufferBuilder() {
~FlatBufferBuilderImpl() {
if (string_pool) delete string_pool;
}
@ -153,12 +169,36 @@ class FlatBufferBuilder {
nested = false;
finished = false;
minalign_ = 1;
length_of_64_bit_region_ = 0;
if (string_pool) string_pool->clear();
}
/// @brief The current size of the serialized buffer, counting from the end.
/// @return Returns an `SizeT` with the current size of the buffer.
SizeT GetSize() const { return buf_.size(); }
/// @brief The current size of the serialized buffer relative to the end of
/// the 32-bit region.
/// @return Returns an `uoffset_t` with the current size of the buffer.
uoffset_t GetSize() const { return buf_.size(); }
template<bool is_64 = Is64Aware>
// Only enable this method for the 64-bit builder, as only that builder is
// concerned with the 32/64-bit boundary, and should be the one to bare any
// run time costs.
typename std::enable_if<is_64, uoffset_t>::type GetSizeRelative32BitRegion()
const {
//[32-bit region][64-bit region]
// [XXXXXXXXXXXXXXXXXXX] GetSize()
// [YYYYYYYYYYYYY] length_of_64_bit_region_
// [ZZZZ] return size
return static_cast<uoffset_t>(GetSize() - length_of_64_bit_region_);
}
template<bool is_64 = Is64Aware>
// Only enable this method for the 32-bit builder.
typename std::enable_if<!is_64, uoffset_t>::type GetSizeRelative32BitRegion()
const {
return static_cast<uoffset_t>(GetSize());
}
/// @brief Get the serialized buffer (after you call `Finish()`).
/// @return Returns an `uint8_t` pointer to the FlatBuffer data inside the
@ -270,14 +310,16 @@ class FlatBufferBuilder {
}
// Write a single aligned scalar to the buffer
template<typename T> uoffset_t PushElement(T element) {
template<typename T, typename ReturnT = uoffset_t>
ReturnT PushElement(T element) {
AssertScalarT<T>();
Align(sizeof(T));
buf_.push_small(EndianScalar(element));
return GetSize();
return CalculateOffset<ReturnT>();
}
template<typename T> uoffset_t PushElement(Offset<T> off) {
template<typename T, template<typename> class OffsetT = Offset>
uoffset_t PushElement(OffsetT<T> off) {
// Special case for offsets: see ReferTo below.
return PushElement(ReferTo(off.o));
}
@ -307,11 +349,16 @@ class FlatBufferBuilder {
AddElement(field, ReferTo(off.o), static_cast<uoffset_t>(0));
}
template<typename T> void AddOffset(voffset_t field, Offset64<T> off) {
if (off.IsNull()) return; // Don't store.
AddElement(field, ReferTo(off.o), static_cast<uoffset64_t>(0));
}
template<typename T> void AddStruct(voffset_t field, const T *structptr) {
if (!structptr) return; // Default, don't store.
Align(AlignOf<T>());
buf_.push_small(*structptr);
TrackField(field, GetSize());
TrackField(field, CalculateOffset<uoffset_t>());
}
void AddStructOffset(voffset_t field, uoffset_t off) {
@ -322,12 +369,29 @@ class FlatBufferBuilder {
// This function converts them to be relative to the current location
// in the buffer (when stored here), pointing upwards.
uoffset_t ReferTo(uoffset_t off) {
// Align to ensure GetSize() below is correct.
// Align to ensure GetSizeRelative32BitRegion() below is correct.
Align(sizeof(uoffset_t));
// Offset must refer to something already in buffer.
const uoffset_t size = GetSize();
// 32-bit offsets are relative to the tail of the 32-bit region of the
// buffer. For most cases (without 64-bit entities) this is equivalent to
// size of the whole buffer (e.g. GetSize())
return ReferTo(off, GetSizeRelative32BitRegion());
}
uoffset64_t ReferTo(uoffset64_t off) {
// Align to ensure GetSize() below is correct.
Align(sizeof(uoffset64_t));
// 64-bit offsets are relative to tail of the whole buffer
return ReferTo(off, GetSize());
}
template<typename T, typename T2> T ReferTo(const T off, const T2 size) {
FLATBUFFERS_ASSERT(off && off <= size);
return size - off + static_cast<uoffset_t>(sizeof(uoffset_t));
return size - off + static_cast<T>(sizeof(T));
}
template<typename T> T ReferTo(const T off, const T size) {
FLATBUFFERS_ASSERT(off && off <= size);
return size - off + static_cast<T>(sizeof(T));
}
void NotNested() {
@ -349,7 +413,7 @@ class FlatBufferBuilder {
uoffset_t StartTable() {
NotNested();
nested = true;
return GetSize();
return GetSizeRelative32BitRegion();
}
// This finishes one serialized object by generating the vtable if it's a
@ -360,7 +424,9 @@ class FlatBufferBuilder {
FLATBUFFERS_ASSERT(nested);
// Write the vtable offset, which is the start of any Table.
// We fill its value later.
auto vtableoffsetloc = PushElement<soffset_t>(0);
// This is relative to the end of the 32-bit region.
const uoffset_t vtable_offset_loc =
static_cast<uoffset_t>(PushElement<soffset_t>(0));
// Write a vtable, which consists entirely of voffset_t elements.
// It starts with the number of offsets, followed by a type id, followed
// by the offsets themselves. In reverse:
@ -370,7 +436,7 @@ class FlatBufferBuilder {
(std::max)(static_cast<voffset_t>(max_voffset_ + sizeof(voffset_t)),
FieldIndexToOffset(0));
buf_.fill_big(max_voffset_);
auto table_object_size = vtableoffsetloc - start;
const uoffset_t table_object_size = vtable_offset_loc - start;
// Vtable use 16bit offsets.
FLATBUFFERS_ASSERT(table_object_size < 0x10000);
WriteScalar<voffset_t>(buf_.data() + sizeof(voffset_t),
@ -380,7 +446,8 @@ class FlatBufferBuilder {
for (auto it = buf_.scratch_end() - num_field_loc * sizeof(FieldLoc);
it < buf_.scratch_end(); it += sizeof(FieldLoc)) {
auto field_location = reinterpret_cast<FieldLoc *>(it);
auto pos = static_cast<voffset_t>(vtableoffsetloc - field_location->off);
const voffset_t pos =
static_cast<voffset_t>(vtable_offset_loc - field_location->off);
// If this asserts, it means you've set a field twice.
FLATBUFFERS_ASSERT(
!ReadScalar<voffset_t>(buf_.data() + field_location->id));
@ -389,7 +456,7 @@ class FlatBufferBuilder {
ClearOffsets();
auto vt1 = reinterpret_cast<voffset_t *>(buf_.data());
auto vt1_size = ReadScalar<voffset_t>(vt1);
auto vt_use = GetSize();
auto vt_use = GetSizeRelative32BitRegion();
// See if we already have generated a vtable with this exact same
// layout before. If so, make it point to the old one, remove this one.
if (dedup_vtables_) {
@ -400,23 +467,24 @@ class FlatBufferBuilder {
auto vt2_size = ReadScalar<voffset_t>(vt2);
if (vt1_size != vt2_size || 0 != memcmp(vt2, vt1, vt1_size)) continue;
vt_use = *vt_offset_ptr;
buf_.pop(GetSize() - vtableoffsetloc);
buf_.pop(GetSizeRelative32BitRegion() - vtable_offset_loc);
break;
}
}
// If this is a new vtable, remember it.
if (vt_use == GetSize()) { buf_.scratch_push_small(vt_use); }
if (vt_use == GetSizeRelative32BitRegion()) {
buf_.scratch_push_small(vt_use);
}
// Fill the vtable offset we created above.
// The offset points from the beginning of the object to where the
// vtable is stored.
// The offset points from the beginning of the object to where the vtable is
// stored.
// Offsets default direction is downward in memory for future format
// flexibility (storing all vtables at the start of the file).
WriteScalar(buf_.data_at(vtableoffsetloc),
WriteScalar(buf_.data_at(vtable_offset_loc + length_of_64_bit_region_),
static_cast<soffset_t>(vt_use) -
static_cast<soffset_t>(vtableoffsetloc));
static_cast<soffset_t>(vtable_offset_loc));
nested = false;
return vtableoffsetloc;
return vtable_offset_loc;
}
FLATBUFFERS_ATTRIBUTE([[deprecated("call the version above instead")]])
@ -426,14 +494,20 @@ class FlatBufferBuilder {
// This checks a required field has been set in a given table that has
// just been constructed.
template<typename T> void Required(Offset<T> table, voffset_t field);
template<typename T> void Required(Offset<T> table, voffset_t field) {
auto table_ptr = reinterpret_cast<const Table *>(buf_.data_at(table.o));
bool ok = table_ptr->GetOptionalFieldOffset(field) != 0;
// If this fails, the caller will show what field needs to be set.
FLATBUFFERS_ASSERT(ok);
(void)ok;
}
uoffset_t StartStruct(size_t alignment) {
Align(alignment);
return GetSize();
return GetSizeRelative32BitRegion();
}
uoffset_t EndStruct() { return GetSize(); }
uoffset_t EndStruct() { return GetSizeRelative32BitRegion(); }
void ClearOffsets() {
buf_.scratch_pop(num_field_loc * sizeof(FieldLoc));
@ -442,15 +516,18 @@ class FlatBufferBuilder {
}
// Aligns such that when "len" bytes are written, an object can be written
// after it with "alignment" without padding.
// after it (forward in the buffer) with "alignment" without padding.
void PreAlign(size_t len, size_t alignment) {
if (len == 0) return;
TrackMinAlign(alignment);
buf_.fill(PaddingBytes(GetSize() + len, alignment));
}
template<typename T> void PreAlign(size_t len) {
AssertScalarT<T>();
PreAlign(len, AlignOf<T>());
// Aligns such than when "len" bytes are written, an object of type `AlignT`
// can be written after it (forward in the buffer) without padding.
template<typename AlignT> void PreAlign(size_t len) {
AssertScalarT<AlignT>();
PreAlign(len, AlignOf<AlignT>());
}
/// @endcond
@ -458,34 +535,35 @@ class FlatBufferBuilder {
/// @param[in] str A const char pointer to the data to be stored as a string.
/// @param[in] len The number of bytes that should be stored from `str`.
/// @return Returns the offset in the buffer where the string starts.
Offset<String> CreateString(const char *str, size_t len) {
NotNested();
PreAlign<uoffset_t>(len + 1); // Always 0-terminated.
buf_.fill(1);
PushBytes(reinterpret_cast<const uint8_t *>(str), len);
PushElement(static_cast<uoffset_t>(len));
return Offset<String>(GetSize());
template<template<typename> class OffsetT = Offset>
OffsetT<String> CreateString(const char *str, size_t len) {
CreateStringImpl(str, len);
return OffsetT<String>(
CalculateOffset<typename OffsetT<String>::offset_type>());
}
/// @brief Store a string in the buffer, which is null-terminated.
/// @param[in] str A const char pointer to a C-string to add to the buffer.
/// @return Returns the offset in the buffer where the string starts.
Offset<String> CreateString(const char *str) {
return CreateString(str, strlen(str));
template<template<typename> class OffsetT = Offset>
OffsetT<String> CreateString(const char *str) {
return CreateString<OffsetT>(str, strlen(str));
}
/// @brief Store a string in the buffer, which is null-terminated.
/// @param[in] str A char pointer to a C-string to add to the buffer.
/// @return Returns the offset in the buffer where the string starts.
Offset<String> CreateString(char *str) {
return CreateString(str, strlen(str));
template<template<typename> class OffsetT = Offset>
OffsetT<String> CreateString(char *str) {
return CreateString<OffsetT>(str, strlen(str));
}
/// @brief Store a string in the buffer, which can contain any binary data.
/// @param[in] str A const reference to a std::string to store in the buffer.
/// @return Returns the offset in the buffer where the string starts.
Offset<String> CreateString(const std::string &str) {
return CreateString(str.c_str(), str.length());
template<template<typename> class OffsetT = Offset>
OffsetT<String> CreateString(const std::string &str) {
return CreateString<OffsetT>(str.c_str(), str.length());
}
// clang-format off
@ -493,8 +571,9 @@ class FlatBufferBuilder {
/// @brief Store a string in the buffer, which can contain any binary data.
/// @param[in] str A const string_view to copy in to the buffer.
/// @return Returns the offset in the buffer where the string starts.
Offset<String> CreateString(flatbuffers::string_view str) {
return CreateString(str.data(), str.size());
template<template <typename> class OffsetT = Offset>
OffsetT<String>CreateString(flatbuffers::string_view str) {
return CreateString<OffsetT>(str.data(), str.size());
}
#endif // FLATBUFFERS_HAS_STRING_VIEW
// clang-format on
@ -502,16 +581,21 @@ class FlatBufferBuilder {
/// @brief Store a string in the buffer, which can contain any binary data.
/// @param[in] str A const pointer to a `String` struct to add to the buffer.
/// @return Returns the offset in the buffer where the string starts
Offset<String> CreateString(const String *str) {
return str ? CreateString(str->c_str(), str->size()) : 0;
template<template<typename> class OffsetT = Offset>
OffsetT<String> CreateString(const String *str) {
return str ? CreateString<OffsetT>(str->c_str(), str->size()) : 0;
}
/// @brief Store a string in the buffer, which can contain any binary data.
/// @param[in] str A const reference to a std::string like type with support
/// of T::c_str() and T::length() to store in the buffer.
/// @return Returns the offset in the buffer where the string starts.
template<typename T> Offset<String> CreateString(const T &str) {
return CreateString(str.c_str(), str.length());
template<template<typename> class OffsetT = Offset,
// No need to explicitly declare the T type, let the compiler deduce
// it.
int &...ExplicitArgumentBarrier, typename T>
OffsetT<String> CreateString(const T &str) {
return CreateString<OffsetT>(str.c_str(), str.length());
}
/// @brief Store a string in the buffer, which can contain any binary data.
@ -523,12 +607,14 @@ class FlatBufferBuilder {
/// @return Returns the offset in the buffer where the string starts.
Offset<String> CreateSharedString(const char *str, size_t len) {
FLATBUFFERS_ASSERT(FLATBUFFERS_GENERAL_HEAP_ALLOC_OK);
if (!string_pool)
if (!string_pool) {
string_pool = new StringOffsetMap(StringOffsetCompare(buf_));
auto size_before_string = buf_.size();
}
const size_t size_before_string = buf_.size();
// Must first serialize the string, since the set is all offsets into
// buffer.
auto off = CreateString(str, len);
const Offset<String> off = CreateString<Offset>(str, len);
auto it = string_pool->find(off);
// If it exists we reuse existing serialized data!
if (it != string_pool->end()) {
@ -584,21 +670,27 @@ class FlatBufferBuilder {
}
/// @cond FLATBUFFERS_INTERNAL
uoffset_t EndVector(size_t len) {
template<typename LenT = uoffset_t, typename ReturnT = uoffset_t>
ReturnT EndVector(size_t len) {
FLATBUFFERS_ASSERT(nested); // Hit if no corresponding StartVector.
nested = false;
return PushElement(static_cast<uoffset_t>(len));
return PushElement<LenT, ReturnT>(static_cast<LenT>(len));
}
template<template<typename> class OffsetT = Offset, typename LenT = uint32_t>
void StartVector(size_t len, size_t elemsize, size_t alignment) {
NotNested();
nested = true;
PreAlign<uoffset_t>(len * elemsize);
// Align to the Length type of the vector (either 32-bit or 64-bit), so
// that the length of the buffer can be added without padding.
PreAlign<LenT>(len * elemsize);
PreAlign(len * elemsize, alignment); // Just in case elemsize > uoffset_t.
}
template<typename T> void StartVector(size_t len) {
return StartVector(len, sizeof(T), AlignOf<T>());
template<typename T, template<typename> class OffsetT = Offset,
typename LenT = uint32_t>
void StartVector(size_t len) {
return StartVector<OffsetT, LenT>(len, sizeof(T), AlignOf<T>());
}
// Call this right before StartVector/CreateVector if you want to force the
@ -623,17 +715,25 @@ class FlatBufferBuilder {
/// @brief Serialize an array into a FlatBuffer `vector`.
/// @tparam T The data type of the array elements.
/// @tparam OffsetT the type of offset to return
/// @tparam VectorT the type of vector to cast to.
/// @param[in] v A pointer to the array of type `T` to serialize into the
/// buffer as a `vector`.
/// @param[in] len The number of elements to serialize.
/// @return Returns a typed `Offset` into the serialized data indicating
/// @return Returns a typed `TOffset` into the serialized data indicating
/// where the vector is stored.
template<typename T> Offset<Vector<T>> CreateVector(const T *v, size_t len) {
template<template<typename...> class OffsetT = Offset,
template<typename...> class VectorT = Vector,
int &...ExplicitArgumentBarrier, typename T>
OffsetT<VectorT<T>> CreateVector(const T *v, size_t len) {
// The type of the length field in the vector.
typedef typename VectorT<T>::size_type LenT;
typedef typename OffsetT<VectorT<T>>::offset_type offset_type;
// If this assert hits, you're specifying a template argument that is
// causing the wrong overload to be selected, remove it.
AssertScalarT<T>();
StartVector<T>(len);
if (len == 0) { return Offset<Vector<T>>(EndVector(len)); }
StartVector<T, OffsetT, LenT>(len);
if (len > 0) {
// clang-format off
#if FLATBUFFERS_LITTLEENDIAN
PushBytes(reinterpret_cast<const uint8_t *>(v), len * sizeof(T));
@ -647,7 +747,8 @@ class FlatBufferBuilder {
}
#endif
// clang-format on
return Offset<Vector<T>>(EndVector(len));
}
return OffsetT<VectorT<T>>(EndVector<LenT, offset_type>(len));
}
/// @brief Serialize an array like object into a FlatBuffer `vector`.
@ -689,6 +790,12 @@ class FlatBufferBuilder {
return CreateVector(data(v), v.size());
}
template<template<typename...> class VectorT = Vector64,
int &...ExplicitArgumentBarrier, typename T>
Offset64<VectorT<T>> CreateVector64(const std::vector<T> &v) {
return CreateVector<Offset64, VectorT>(data(v), v.size());
}
// vector<bool> may be implemented using a bit-set, so we can't access it as
// an array. Instead, read elements manually.
// Background: https://isocpp.org/blog/2012/11/on-vectorbool
@ -785,47 +892,19 @@ class FlatBufferBuilder {
/// @param[in] len The number of elements to serialize.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T>
Offset<Vector<const T *>> CreateVectorOfStructs(const T *v, size_t len) {
StartVector(len * sizeof(T) / AlignOf<T>(), sizeof(T), AlignOf<T>());
template<typename T, template<typename...> class OffsetT = Offset,
template<typename...> class VectorT = Vector>
OffsetT<VectorT<const T *>> CreateVectorOfStructs(const T *v, size_t len) {
// The type of the length field in the vector.
typedef typename VectorT<T>::size_type LenT;
typedef typename OffsetT<VectorT<const T *>>::offset_type offset_type;
StartVector<OffsetT, LenT>(len * sizeof(T) / AlignOf<T>(), sizeof(T),
AlignOf<T>());
if (len > 0) {
PushBytes(reinterpret_cast<const uint8_t *>(v), sizeof(T) * len);
}
return Offset<Vector<const T *>>(EndVector(len));
}
/// @brief Serialize an array of native structs into a FlatBuffer `vector`.
/// @tparam T The data type of the struct array elements.
/// @tparam S The data type of the native struct array elements.
/// @param[in] v A pointer to the array of type `S` to serialize into the
/// buffer as a `vector`.
/// @param[in] len The number of elements to serialize.
/// @param[in] pack_func Pointer to a function to convert the native struct
/// to the FlatBuffer struct.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename S>
Offset<Vector<const T *>> CreateVectorOfNativeStructs(
const S *v, size_t len, T (*const pack_func)(const S &)) {
FLATBUFFERS_ASSERT(pack_func);
auto structs = StartVectorOfStructs<T>(len);
for (size_t i = 0; i < len; i++) { structs[i] = pack_func(v[i]); }
return EndVectorOfStructs<T>(len);
}
/// @brief Serialize an array of native structs into a FlatBuffer `vector`.
/// @tparam T The data type of the struct array elements.
/// @tparam S The data type of the native struct array elements.
/// @param[in] v A pointer to the array of type `S` to serialize into the
/// buffer as a `vector`.
/// @param[in] len The number of elements to serialize.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename S>
Offset<Vector<const T *>> CreateVectorOfNativeStructs(const S *v,
size_t len) {
extern T Pack(const S &);
return CreateVectorOfNativeStructs(v, len, Pack);
return OffsetT<VectorT<const T *>>(EndVector<LenT, offset_type>(len));
}
/// @brief Serialize an array of structs into a FlatBuffer `vector`.
@ -873,10 +952,52 @@ class FlatBufferBuilder {
/// serialize into the buffer as a `vector`.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename Alloc = std::allocator<T>>
Offset<Vector<const T *>> CreateVectorOfStructs(
template<typename T, template<typename...> class OffsetT = Offset,
template<typename...> class VectorT = Vector,
typename Alloc = std::allocator<T>>
OffsetT<VectorT<const T *>> CreateVectorOfStructs(
const std::vector<T, Alloc> &v) {
return CreateVectorOfStructs(data(v), v.size());
return CreateVectorOfStructs<T, OffsetT, VectorT>(data(v), v.size());
}
template<template<typename...> class VectorT = Vector64, int &..., typename T>
Offset64<VectorT<const T *>> CreateVectorOfStructs64(
const std::vector<T> &v) {
return CreateVectorOfStructs<T, Offset64, VectorT>(data(v), v.size());
}
/// @brief Serialize an array of native structs into a FlatBuffer `vector`.
/// @tparam T The data type of the struct array elements.
/// @tparam S The data type of the native struct array elements.
/// @param[in] v A pointer to the array of type `S` to serialize into the
/// buffer as a `vector`.
/// @param[in] len The number of elements to serialize.
/// @param[in] pack_func Pointer to a function to convert the native struct
/// to the FlatBuffer struct.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename S>
Offset<Vector<const T *>> CreateVectorOfNativeStructs(
const S *v, size_t len, T (*const pack_func)(const S &)) {
FLATBUFFERS_ASSERT(pack_func);
auto structs = StartVectorOfStructs<T>(len);
for (size_t i = 0; i < len; i++) { structs[i] = pack_func(v[i]); }
return EndVectorOfStructs<T>(len);
}
/// @brief Serialize an array of native structs into a FlatBuffer `vector`.
/// @tparam T The data type of the struct array elements.
/// @tparam S The data type of the native struct array elements.
/// @param[in] v A pointer to the array of type `S` to serialize into the
/// buffer as a `vector`.
/// @param[in] len The number of elements to serialize.
/// @return Returns a typed `Offset` into the serialized data indicating
/// where the vector is stored.
template<typename T, typename S>
Offset<Vector<const T *>> CreateVectorOfNativeStructs(const S *v,
size_t len) {
extern T Pack(const S &);
return CreateVectorOfNativeStructs(v, len, Pack);
}
/// @brief Serialize a `std::vector` of native structs into a FlatBuffer
@ -979,14 +1100,14 @@ class FlatBufferBuilder {
/// @cond FLATBUFFERS_INTERNAL
template<typename T> struct TableKeyComparator {
TableKeyComparator(vector_downward &buf) : buf_(buf) {}
explicit TableKeyComparator(vector_downward<SizeT> &buf) : buf_(buf) {}
TableKeyComparator(const TableKeyComparator &other) : buf_(other.buf_) {}
bool operator()(const Offset<T> &a, const Offset<T> &b) const {
auto table_a = reinterpret_cast<T *>(buf_.data_at(a.o));
auto table_b = reinterpret_cast<T *>(buf_.data_at(b.o));
return table_a->KeyCompareLessThan(table_b);
}
vector_downward &buf_;
vector_downward<SizeT> &buf_;
private:
FLATBUFFERS_DELETE_FUNC(
@ -1034,7 +1155,7 @@ class FlatBufferBuilder {
NotNested();
StartVector(len, elemsize, alignment);
buf_.make_space(len * elemsize);
auto vec_start = GetSize();
const uoffset_t vec_start = GetSizeRelative32BitRegion();
auto vec_end = EndVector(len);
*buf = buf_.data_at(vec_start);
return vec_end;
@ -1085,7 +1206,8 @@ class FlatBufferBuilder {
NotNested();
Align(AlignOf<T>());
buf_.push_small(structobj);
return Offset<const T *>(GetSize());
return Offset<const T *>(
CalculateOffset<typename Offset<const T *>::offset_type>());
}
/// @brief Finish serializing a buffer by writing the root offset.
@ -1109,7 +1231,7 @@ class FlatBufferBuilder {
Finish(root.o, file_identifier, true);
}
void SwapBufAllocator(FlatBufferBuilder &other) {
void SwapBufAllocator(FlatBufferBuilderImpl &other) {
buf_.swap_allocator(other.buf_);
}
@ -1119,16 +1241,23 @@ class FlatBufferBuilder {
protected:
// You shouldn't really be copying instances of this class.
FlatBufferBuilder(const FlatBufferBuilder &);
FlatBufferBuilder &operator=(const FlatBufferBuilder &);
FlatBufferBuilderImpl(const FlatBufferBuilderImpl &);
FlatBufferBuilderImpl &operator=(const FlatBufferBuilderImpl &);
void Finish(uoffset_t root, const char *file_identifier, bool size_prefix) {
NotNested();
buf_.clear_scratch();
const size_t prefix_size = size_prefix ? sizeof(SizeT) : 0;
// Make sure we track the alignment of the size prefix.
TrackMinAlign(prefix_size);
const size_t root_offset_size = sizeof(uoffset_t);
const size_t file_id_size = file_identifier ? kFileIdentifierLength : 0;
// This will cause the whole buffer to be aligned.
PreAlign((size_prefix ? sizeof(uoffset_t) : 0) + sizeof(uoffset_t) +
(file_identifier ? kFileIdentifierLength : 0),
minalign_);
PreAlign(prefix_size + root_offset_size + file_id_size, minalign_);
if (file_identifier) {
FLATBUFFERS_ASSERT(strlen(file_identifier) == kFileIdentifierLength);
PushBytes(reinterpret_cast<const uint8_t *>(file_identifier),
@ -1144,7 +1273,7 @@ class FlatBufferBuilder {
voffset_t id;
};
vector_downward buf_;
vector_downward<SizeT> buf_;
// Accumulating offsets of table members while it is being built.
// We store these in the scratch pad of buf_, after the vtable offsets.
@ -1153,6 +1282,31 @@ class FlatBufferBuilder {
// possible vtable.
voffset_t max_voffset_;
// This is the length of the 64-bit region of the buffer. The buffer supports
// 64-bit offsets by forcing serialization of those elements in the "tail"
// region of the buffer (i.e. "64-bit region"). To properly keep track of
// offsets that are referenced from the tail of the buffer to not overflow
// their size (e.g. Offset is a uint32_t type), the boundary of the 32-/64-bit
// regions must be tracked.
//
// [ Complete FlatBuffer ]
// [32-bit region][64-bit region]
// ^ ^
// | Tail of the buffer.
// |
// Tail of the 32-bit region of the buffer.
//
// This keeps track of the size of the 64-bit region so that the tail of the
// 32-bit region can be calculated as `GetSize() - length_of_64_bit_region_`.
//
// This will remain 0 if no 64-bit offset types are added to the buffer.
size_t length_of_64_bit_region_;
// When true, 64-bit offsets can still be added to the builder. When false,
// only 32-bit offsets can be added, and attempts to add a 64-bit offset will
// raise an assertion. This is typically a compile-time error in ordering the
// serialization of 64-bit offset fields not at the tail of the buffer.
// Ensure objects are not nested.
bool nested;
@ -1166,14 +1320,15 @@ class FlatBufferBuilder {
bool dedup_vtables_;
struct StringOffsetCompare {
StringOffsetCompare(const vector_downward &buf) : buf_(&buf) {}
explicit StringOffsetCompare(const vector_downward<SizeT> &buf)
: buf_(&buf) {}
bool operator()(const Offset<String> &a, const Offset<String> &b) const {
auto stra = reinterpret_cast<const String *>(buf_->data_at(a.o));
auto strb = reinterpret_cast<const String *>(buf_->data_at(b.o));
return StringLessThan(stra->data(), stra->size(), strb->data(),
strb->size());
}
const vector_downward *buf_;
const vector_downward<SizeT> *buf_;
};
// For use with CreateSharedString. Instantiated on first use only.
@ -1181,22 +1336,122 @@ class FlatBufferBuilder {
StringOffsetMap *string_pool;
private:
void CanAddOffset64() {
// If you hit this assertion, you are attempting to add a 64-bit offset to
// a 32-bit only builder. This is because the builder has overloads that
// differ only on the offset size returned: e.g.:
//
// FlatBufferBuilder builder;
// Offset64<String> string_offset = builder.CreateString<Offset64>();
//
// Either use a 64-bit aware builder, or don't try to create an Offset64
// return type.
//
// TODO(derekbailey): we can probably do more enable_if to avoid this
// looking like its possible to the user.
static_assert(Is64Aware, "cannot add 64-bit offset to a 32-bit builder");
// If you hit this assertion, you are attempting to add an 64-bit offset
// item after already serializing a 32-bit item. All 64-bit offsets have to
// added to the tail of the buffer before any 32-bit items can be added.
// Otherwise some items might not be addressable due to the maximum range of
// the 32-bit offset.
FLATBUFFERS_ASSERT(GetSize() == length_of_64_bit_region_);
}
/// @brief Store a string in the buffer, which can contain any binary data.
/// @param[in] str A const char pointer to the data to be stored as a string.
/// @param[in] len The number of bytes that should be stored from `str`.
/// @return Returns the offset in the buffer where the string starts.
void CreateStringImpl(const char *str, size_t len) {
NotNested();
PreAlign<uoffset_t>(len + 1); // Always 0-terminated.
buf_.fill(1);
PushBytes(reinterpret_cast<const uint8_t *>(str), len);
PushElement(static_cast<uoffset_t>(len));
}
// Allocates space for a vector of structures.
// Must be completed with EndVectorOfStructs().
template<typename T> T *StartVectorOfStructs(size_t vector_size) {
StartVector(vector_size * sizeof(T) / AlignOf<T>(), sizeof(T), AlignOf<T>());
template<typename T, template<typename> class OffsetT = Offset>
T *StartVectorOfStructs(size_t vector_size) {
StartVector<OffsetT>(vector_size * sizeof(T) / AlignOf<T>(), sizeof(T),
AlignOf<T>());
return reinterpret_cast<T *>(buf_.make_space(vector_size * sizeof(T)));
}
// End the vector of structures in the flatbuffers.
// Vector should have previously be started with StartVectorOfStructs().
template<typename T, template<typename> class OffsetT = Offset>
OffsetT<Vector<const T *>> EndVectorOfStructs(size_t vector_size) {
return OffsetT<Vector<const T *>>(
EndVector<typename Vector<const T *>::size_type,
typename OffsetT<Vector<const T *>>::offset_type>(
vector_size));
}
template<typename T>
Offset<Vector<const T *>> EndVectorOfStructs(size_t vector_size) {
return Offset<Vector<const T *>>(EndVector(vector_size));
typename std::enable_if<std::is_same<T, uoffset_t>::value, T>::type
CalculateOffset() {
// Default to the end of the 32-bit region. This may or may not be the end
// of the buffer, depending on if any 64-bit offsets have been added.
return GetSizeRelative32BitRegion();
}
// Specializations to handle the 64-bit CalculateOffset, which is relative to
// end of the buffer.
template<typename T>
typename std::enable_if<std::is_same<T, uoffset64_t>::value, T>::type
CalculateOffset() {
// This should never be compiled in when not using a 64-bit builder.
static_assert(Is64Aware, "invalid 64-bit offset in 32-bit builder");
// Store how big the 64-bit region of the buffer is, so we can determine
// where the 32/64 bit boundary is.
length_of_64_bit_region_ = GetSize();
return length_of_64_bit_region_;
}
};
/// @}
// Hack to `FlatBufferBuilder` mean `FlatBufferBuilder<false>` or
// `FlatBufferBuilder<>`, where the template < > syntax is required.
typedef FlatBufferBuilderImpl<false> FlatBufferBuilder;
typedef FlatBufferBuilderImpl<true> FlatBufferBuilder64;
// These are external due to GCC not allowing them in the class.
// See: https://stackoverflow.com/q/8061456/868247
template<>
template<>
inline Offset64<String> FlatBufferBuilder64::CreateString(const char *str,
size_t len) {
CanAddOffset64();
CreateStringImpl(str, len);
return Offset64<String>(
CalculateOffset<typename Offset64<String>::offset_type>());
}
// Used to distinguish from real Offsets.
template<typename T = void> struct EmptyOffset {};
// TODO(derekbailey): it would be nice to combine these two methods.
template<>
template<>
inline void FlatBufferBuilder64::StartVector<Offset64, uint32_t>(
size_t len, size_t elemsize, size_t alignment) {
CanAddOffset64();
StartVector<EmptyOffset, uint32_t>(len, elemsize, alignment);
}
template<>
template<>
inline void FlatBufferBuilder64::StartVector<Offset64, uint64_t>(
size_t len, size_t elemsize, size_t alignment) {
CanAddOffset64();
StartVector<EmptyOffset, uint64_t>(len, elemsize, alignment);
}
/// Helpers to get a typed pointer to objects that are currently being built.
/// @warning Creating new objects will lead to reallocations and invalidates
/// the pointer!
@ -1211,15 +1466,6 @@ const T *GetTemporaryPointer(FlatBufferBuilder &fbb, Offset<T> offset) {
return GetMutableTemporaryPointer<T>(fbb, offset);
}
template<typename T>
void FlatBufferBuilder::Required(Offset<T> table, voffset_t field) {
auto table_ptr = reinterpret_cast<const Table *>(buf_.data_at(table.o));
bool ok = table_ptr->GetOptionalFieldOffset(field) != 0;
// If this fails, the caller will show what field needs to be set.
FLATBUFFERS_ASSERT(ok);
(void)ok;
}
} // namespace flatbuffers
#endif // FLATBUFFERS_VECTOR_DOWNWARD_H_
#endif // FLATBUFFERS_FLATBUFFER_BUILDER_H_

View File

@ -76,8 +76,9 @@ inline const uint8_t *GetBufferStartFromRootPointer(const void *root) {
}
/// @brief This return the prefixed size of a FlatBuffer.
inline uoffset_t GetPrefixedSize(const uint8_t *buf) {
return ReadScalar<uoffset_t>(buf);
template<typename SizeT = uoffset_t>
inline SizeT GetPrefixedSize(const uint8_t *buf) {
return ReadScalar<SizeT>(buf);
}
// Base class for native objects (FlatBuffer data de-serialized into native

View File

@ -41,15 +41,18 @@
#include <optional>
#endif
// The __cpp_lib_span is the predefined feature macro.
#ifndef FLATBUFFERS_USE_STD_SPAN
// Testing __cpp_lib_span requires including either <version> or <span>,
// both of which were added in C++20.
// See: https://en.cppreference.com/w/cpp/utility/feature_test
#if defined(__cplusplus) && __cplusplus >= 202002L
#define FLATBUFFERS_USE_STD_SPAN 1
#endif
#endif // FLATBUFFERS_USE_STD_SPAN
#if defined(FLATBUFFERS_USE_STD_SPAN)
#include <span>
#elif defined(__cpp_lib_span) && defined(__has_include)
#if __has_include(<span>)
#include <array>
#include <span>
#define FLATBUFFERS_USE_STD_SPAN
#endif
#else
// Disable non-trivial ctors if FLATBUFFERS_SPAN_MINIMAL defined.
#if !defined(FLATBUFFERS_TEMPLATES_ALIASES)

View File

@ -47,14 +47,24 @@ class Table {
return field_offset ? ReadScalar<T>(data_ + field_offset) : defaultval;
}
template<typename P> P GetPointer(voffset_t field) {
template<typename P, typename OffsetSize = uoffset_t>
P GetPointer(voffset_t field) {
auto field_offset = GetOptionalFieldOffset(field);
auto p = data_ + field_offset;
return field_offset ? reinterpret_cast<P>(p + ReadScalar<uoffset_t>(p))
return field_offset ? reinterpret_cast<P>(p + ReadScalar<OffsetSize>(p))
: nullptr;
}
template<typename P> P GetPointer(voffset_t field) const {
return const_cast<Table *>(this)->GetPointer<P>(field);
template<typename P, typename OffsetSize = uoffset_t>
P GetPointer(voffset_t field) const {
return const_cast<Table *>(this)->GetPointer<P, OffsetSize>(field);
}
template<typename P> P GetPointer64(voffset_t field) {
return GetPointer<P, uoffset64_t>(field);
}
template<typename P> P GetPointer64(voffset_t field) const {
return GetPointer<P, uoffset64_t>(field);
}
template<typename P> P GetStruct(voffset_t field) const {
@ -131,15 +141,25 @@ class Table {
}
// Versions for offsets.
template<typename OffsetT = uoffset_t>
bool VerifyOffset(const Verifier &verifier, voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
return !field_offset || verifier.VerifyOffset(data_, field_offset);
return !field_offset || verifier.VerifyOffset<OffsetT>(data_, field_offset);
}
template<typename OffsetT = uoffset_t>
bool VerifyOffsetRequired(const Verifier &verifier, voffset_t field) const {
auto field_offset = GetOptionalFieldOffset(field);
return verifier.Check(field_offset != 0) &&
verifier.VerifyOffset(data_, field_offset);
verifier.VerifyOffset<OffsetT>(data_, field_offset);
}
bool VerifyOffset64(const Verifier &verifier, voffset_t field) const {
return VerifyOffset<uoffset64_t>(verifier, field);
}
bool VerifyOffset64Required(const Verifier &verifier, voffset_t field) const {
return VerifyOffsetRequired<uoffset64_t>(verifier, field);
}
private:

View File

@ -27,7 +27,8 @@ struct String;
// An STL compatible iterator implementation for Vector below, effectively
// calling Get() for every element.
template<typename T, typename IT, typename Data = uint8_t *>
template<typename T, typename IT, typename Data = uint8_t *,
typename SizeT = uoffset_t>
struct VectorIterator {
typedef std::random_access_iterator_tag iterator_category;
typedef IT value_type;
@ -35,8 +36,9 @@ struct VectorIterator {
typedef IT *pointer;
typedef IT &reference;
VectorIterator(Data data, uoffset_t i)
: data_(data + IndirectHelper<T>::element_stride * i) {}
static const SizeT element_stride = IndirectHelper<T>::element_stride;
VectorIterator(Data data, SizeT i) : data_(data + element_stride * i) {}
VectorIterator(const VectorIterator &other) : data_(other.data_) {}
VectorIterator() : data_(nullptr) {}
@ -63,7 +65,7 @@ struct VectorIterator {
}
difference_type operator-(const VectorIterator &other) const {
return (data_ - other.data_) / IndirectHelper<T>::element_stride;
return (data_ - other.data_) / element_stride;
}
// Note: return type is incompatible with the standard
@ -75,44 +77,42 @@ struct VectorIterator {
IT operator->() const { return IndirectHelper<T>::Read(data_, 0); }
VectorIterator &operator++() {
data_ += IndirectHelper<T>::element_stride;
data_ += element_stride;
return *this;
}
VectorIterator operator++(int) {
VectorIterator temp(data_, 0);
data_ += IndirectHelper<T>::element_stride;
data_ += element_stride;
return temp;
}
VectorIterator operator+(const uoffset_t &offset) const {
return VectorIterator(data_ + offset * IndirectHelper<T>::element_stride,
0);
VectorIterator operator+(const SizeT &offset) const {
return VectorIterator(data_ + offset * element_stride, 0);
}
VectorIterator &operator+=(const uoffset_t &offset) {
data_ += offset * IndirectHelper<T>::element_stride;
VectorIterator &operator+=(const SizeT &offset) {
data_ += offset * element_stride;
return *this;
}
VectorIterator &operator--() {
data_ -= IndirectHelper<T>::element_stride;
data_ -= element_stride;
return *this;
}
VectorIterator operator--(int) {
VectorIterator temp(data_, 0);
data_ -= IndirectHelper<T>::element_stride;
data_ -= element_stride;
return temp;
}
VectorIterator operator-(const uoffset_t &offset) const {
return VectorIterator(data_ - offset * IndirectHelper<T>::element_stride,
0);
VectorIterator operator-(const SizeT &offset) const {
return VectorIterator(data_ - offset * element_stride, 0);
}
VectorIterator &operator-=(const uoffset_t &offset) {
data_ -= offset * IndirectHelper<T>::element_stride;
VectorIterator &operator-=(const SizeT &offset) {
data_ -= offset * element_stride;
return *this;
}
@ -120,8 +120,8 @@ struct VectorIterator {
Data data_;
};
template<typename T, typename IT>
using VectorConstIterator = VectorIterator<T, IT, const uint8_t *>;
template<typename T, typename IT, typename SizeT = uoffset_t>
using VectorConstIterator = VectorIterator<T, IT, const uint8_t *, SizeT>;
template<typename Iterator>
struct VectorReverseIterator : public std::reverse_iterator<Iterator> {
@ -145,11 +145,14 @@ struct VectorReverseIterator : public std::reverse_iterator<Iterator> {
// This is used as a helper type for accessing vectors.
// Vector::data() assumes the vector elements start after the length field.
template<typename T> class Vector {
template<typename T, typename SizeT = uoffset_t> class Vector {
public:
typedef VectorIterator<T, typename IndirectHelper<T>::mutable_return_type>
typedef VectorIterator<T,
typename IndirectHelper<T>::mutable_return_type,
uint8_t *, SizeT>
iterator;
typedef VectorConstIterator<T, typename IndirectHelper<T>::return_type>
typedef VectorConstIterator<T, typename IndirectHelper<T>::return_type,
SizeT>
const_iterator;
typedef VectorReverseIterator<iterator> reverse_iterator;
typedef VectorReverseIterator<const_iterator> const_reverse_iterator;
@ -160,39 +163,41 @@ template<typename T> class Vector {
static FLATBUFFERS_CONSTEXPR bool is_span_observable =
scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1);
uoffset_t size() const { return EndianScalar(length_); }
SizeT size() const { return EndianScalar(length_); }
// Deprecated: use size(). Here for backwards compatibility.
FLATBUFFERS_ATTRIBUTE([[deprecated("use size() instead")]])
uoffset_t Length() const { return size(); }
SizeT Length() const { return size(); }
typedef SizeT size_type;
typedef typename IndirectHelper<T>::return_type return_type;
typedef typename IndirectHelper<T>::mutable_return_type mutable_return_type;
typedef typename IndirectHelper<T>::mutable_return_type
mutable_return_type;
typedef return_type value_type;
return_type Get(uoffset_t i) const {
return_type Get(SizeT i) const {
FLATBUFFERS_ASSERT(i < size());
return IndirectHelper<T>::Read(Data(), i);
}
return_type operator[](uoffset_t i) const { return Get(i); }
return_type operator[](SizeT i) const { return Get(i); }
// If this is a Vector of enums, T will be its storage type, not the enum
// type. This function makes it convenient to retrieve value with enum
// type E.
template<typename E> E GetEnum(uoffset_t i) const {
template<typename E> E GetEnum(SizeT i) const {
return static_cast<E>(Get(i));
}
// If this a vector of unions, this does the cast for you. There's no check
// to make sure this is the right type!
template<typename U> const U *GetAs(uoffset_t i) const {
template<typename U> const U *GetAs(SizeT i) const {
return reinterpret_cast<const U *>(Get(i));
}
// If this a vector of unions, this does the cast for you. There's no check
// to make sure this is actually a string!
const String *GetAsString(uoffset_t i) const {
const String *GetAsString(SizeT i) const {
return reinterpret_cast<const String *>(Get(i));
}
@ -226,7 +231,7 @@ template<typename T> class Vector {
// Change elements if you have a non-const pointer to this object.
// Scalars only. See reflection.h, and the documentation.
void Mutate(uoffset_t i, const T &val) {
void Mutate(SizeT i, const T &val) {
FLATBUFFERS_ASSERT(i < size());
WriteScalar(data() + i, val);
}
@ -234,15 +239,15 @@ template<typename T> class Vector {
// Change an element of a vector of tables (or strings).
// "val" points to the new table/string, as you can obtain from
// e.g. reflection::AddFlatBuffer().
void MutateOffset(uoffset_t i, const uint8_t *val) {
void MutateOffset(SizeT i, const uint8_t *val) {
FLATBUFFERS_ASSERT(i < size());
static_assert(sizeof(T) == sizeof(uoffset_t), "Unrelated types");
static_assert(sizeof(T) == sizeof(SizeT), "Unrelated types");
WriteScalar(data() + i,
static_cast<uoffset_t>(val - (Data() + i * sizeof(uoffset_t))));
static_cast<SizeT>(val - (Data() + i * sizeof(SizeT))));
}
// Get a mutable pointer to tables/strings inside this vector.
mutable_return_type GetMutableObject(uoffset_t i) const {
mutable_return_type GetMutableObject(SizeT i) const {
FLATBUFFERS_ASSERT(i < size());
return const_cast<mutable_return_type>(IndirectHelper<T>::Read(Data(), i));
}
@ -280,7 +285,7 @@ template<typename T> class Vector {
// try to construct these manually.
Vector();
uoffset_t length_;
SizeT length_;
private:
// This class is a pointer. Copying will therefore create an invalid object.
@ -299,6 +304,8 @@ template<typename T> class Vector {
}
};
template<typename T> using Vector64 = Vector<T, uoffset64_t>;
template<class U>
FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span<U> make_span(Vector<U> &vec)
FLATBUFFERS_NOEXCEPT {

View File

@ -17,6 +17,8 @@
#ifndef FLATBUFFERS_VECTOR_DOWNWARD_H_
#define FLATBUFFERS_VECTOR_DOWNWARD_H_
#include <cstdint>
#include <algorithm>
#include "flatbuffers/base.h"
@ -31,13 +33,15 @@ namespace flatbuffers {
// Since this vector leaves the lower part unused, we support a "scratch-pad"
// that can be stored there for temporary data, to share the allocated space.
// Essentially, this supports 2 std::vectors in a single buffer.
class vector_downward {
template<typename SizeT = uoffset_t> class vector_downward {
public:
explicit vector_downward(size_t initial_size, Allocator *allocator,
bool own_allocator, size_t buffer_minalign)
bool own_allocator, size_t buffer_minalign,
const SizeT max_size = FLATBUFFERS_MAX_BUFFER_SIZE)
: allocator_(allocator),
own_allocator_(own_allocator),
initial_size_(initial_size),
max_size_(max_size),
buffer_minalign_(buffer_minalign),
reserved_(0),
size_(0),
@ -50,6 +54,7 @@ class vector_downward {
: allocator_(other.allocator_),
own_allocator_(other.own_allocator_),
initial_size_(other.initial_size_),
max_size_(other.max_size_),
buffer_minalign_(other.buffer_minalign_),
reserved_(other.reserved_),
size_(other.size_),
@ -111,7 +116,7 @@ class vector_downward {
uint8_t *release_raw(size_t &allocated_bytes, size_t &offset) {
auto *buf = buf_;
allocated_bytes = reserved_;
offset = static_cast<size_t>(cur_ - buf_);
offset = vector_downward::offset();
// release_raw only relinquishes the buffer ownership.
// Does not deallocate or reset the allocator. Destructor will do that.
@ -136,10 +141,10 @@ class vector_downward {
size_t ensure_space(size_t len) {
FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_);
if (len > static_cast<size_t>(cur_ - scratch_)) { reallocate(len); }
// Beyond this, signed offsets may not have enough range:
// (FlatBuffers > 2GB not supported).
FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE);
// If the length is larger than the unused part of the buffer, we need to
// grow.
if (len > unused_buffer_size()) { reallocate(len); }
FLATBUFFERS_ASSERT(size() < max_size_);
return len;
}
@ -147,7 +152,7 @@ class vector_downward {
if (len) {
ensure_space(len);
cur_ -= len;
size_ += static_cast<uoffset_t>(len);
size_ += static_cast<SizeT>(len);
}
return cur_;
}
@ -155,11 +160,17 @@ class vector_downward {
// Returns nullptr if using the DefaultAllocator.
Allocator *get_custom_allocator() { return allocator_; }
inline uoffset_t size() const { return size_; }
// The current offset into the buffer.
size_t offset() const { return cur_ - buf_; }
uoffset_t scratch_size() const {
return static_cast<uoffset_t>(scratch_ - buf_);
}
// The total size of the vector (both the buffer and scratch parts).
inline SizeT size() const { return size_; }
// The size of the buffer part of the vector that is currently unused.
SizeT unused_buffer_size() const { return static_cast<SizeT>(cur_ - scratch_); }
// The size of the scratch part of the vector.
SizeT scratch_size() const { return static_cast<SizeT>(scratch_ - buf_); }
size_t capacity() const { return reserved_; }
@ -211,7 +222,7 @@ class vector_downward {
void pop(size_t bytes_to_remove) {
cur_ += bytes_to_remove;
size_ -= static_cast<uoffset_t>(bytes_to_remove);
size_ -= static_cast<SizeT>(bytes_to_remove);
}
void scratch_pop(size_t bytes_to_remove) { scratch_ -= bytes_to_remove; }
@ -224,6 +235,7 @@ class vector_downward {
swap(buffer_minalign_, other.buffer_minalign_);
swap(reserved_, other.reserved_);
swap(size_, other.size_);
swap(max_size_, other.max_size_);
swap(buf_, other.buf_);
swap(cur_, other.cur_);
swap(scratch_, other.scratch_);
@ -243,9 +255,12 @@ class vector_downward {
Allocator *allocator_;
bool own_allocator_;
size_t initial_size_;
// The maximum size the vector can be.
SizeT max_size_;
size_t buffer_minalign_;
size_t reserved_;
uoffset_t size_;
SizeT size_;
uint8_t *buf_;
uint8_t *cur_; // Points at location between empty (below) and used (above).
uint8_t *scratch_; // Points to the end of the scratchpad in use.

View File

@ -34,12 +34,16 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
bool check_alignment = true;
// If true, run verifier on nested flatbuffers
bool check_nested_flatbuffers = true;
// The maximum size of a buffer.
size_t max_size = FLATBUFFERS_MAX_BUFFER_SIZE;
// Use assertions to check for errors.
bool assert = false;
};
explicit Verifier(const uint8_t *const buf, const size_t buf_len,
const Options &opts)
: buf_(buf), size_(buf_len), opts_(opts) {
FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE);
FLATBUFFERS_ASSERT(size_ < opts.max_size);
}
// Deprecated API, please construct with Verifier::Options.
@ -58,7 +62,7 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
bool Check(const bool ok) const {
// clang-format off
#ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE
FLATBUFFERS_ASSERT(ok);
if (opts_.assert) { FLATBUFFERS_ASSERT(ok); }
#endif
#ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE
if (!ok)
@ -113,41 +117,43 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
}
// Verify a pointer (may be NULL) of any vector type.
template<typename T> bool VerifyVector(const Vector<T> *const vec) const {
return !vec || VerifyVectorOrString(reinterpret_cast<const uint8_t *>(vec),
sizeof(T));
template<int &..., typename T, typename LenT>
bool VerifyVector(const Vector<T, LenT> *const vec) const {
return !vec || VerifyVectorOrString<LenT>(
reinterpret_cast<const uint8_t *>(vec), sizeof(T));
}
// Verify a pointer (may be NULL) of a vector to struct.
template<typename T>
bool VerifyVector(const Vector<const T *> *const vec) const {
return VerifyVector(reinterpret_cast<const Vector<T> *>(vec));
template<int &..., typename T, typename LenT>
bool VerifyVector(const Vector<const T *, LenT> *const vec) const {
return VerifyVector(reinterpret_cast<const Vector<T, LenT> *>(vec));
}
// Verify a pointer (may be NULL) to string.
bool VerifyString(const String *const str) const {
size_t end;
return !str || (VerifyVectorOrString(reinterpret_cast<const uint8_t *>(str),
1, &end) &&
return !str || (VerifyVectorOrString<uoffset_t>(
reinterpret_cast<const uint8_t *>(str), 1, &end) &&
Verify(end, 1) && // Must have terminator
Check(buf_[end] == '\0')); // Terminating byte must be 0.
}
// Common code between vectors and strings.
template<typename LenT = uoffset_t>
bool VerifyVectorOrString(const uint8_t *const vec, const size_t elem_size,
size_t *const end = nullptr) const {
const auto veco = static_cast<size_t>(vec - buf_);
const auto vec_offset = static_cast<size_t>(vec - buf_);
// Check we can read the size field.
if (!Verify<uoffset_t>(veco)) return false;
if (!Verify<LenT>(vec_offset)) return false;
// Check the whole array. If this is a string, the byte past the array must
// be 0.
const auto size = ReadScalar<uoffset_t>(vec);
const auto max_elems = FLATBUFFERS_MAX_BUFFER_SIZE / elem_size;
const LenT size = ReadScalar<LenT>(vec);
const auto max_elems = opts_.max_size / elem_size;
if (!Check(size < max_elems))
return false; // Protect against byte_size overflowing.
const auto byte_size = sizeof(size) + elem_size * size;
if (end) *end = veco + byte_size;
return Verify(veco, byte_size);
const auto byte_size = sizeof(LenT) + elem_size * size;
if (end) *end = vec_offset + byte_size;
return Verify(vec_offset, byte_size);
}
// Special case for string contents, after the above has been called.
@ -203,7 +209,7 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
}
// Call T::Verify, which must be in the generated code for this type.
const auto o = VerifyOffset(start);
const auto o = VerifyOffset<uoffset_t>(start);
return Check(o != 0) &&
reinterpret_cast<const T *>(buf_ + start + o)->Verify(*this)
// clang-format off
@ -214,8 +220,8 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
// clang-format on
}
template<typename T>
bool VerifyNestedFlatBuffer(const Vector<uint8_t> *const buf,
template<typename T, int &..., typename SizeT>
bool VerifyNestedFlatBuffer(const Vector<uint8_t, SizeT> *const buf,
const char *const identifier) {
// Caller opted out of this.
if (!opts_.check_nested_flatbuffers) return true;
@ -226,7 +232,7 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
// If there is a nested buffer, it must be greater than the min size.
if (!Check(buf->size() >= FLATBUFFERS_MIN_BUFFER_SIZE)) return false;
Verifier nested_verifier(buf->data(), buf->size());
Verifier nested_verifier(buf->data(), buf->size(), opts_);
return nested_verifier.VerifyBuffer<T>(identifier);
}
@ -237,29 +243,30 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
return VerifyBufferFromStart<T>(identifier, 0);
}
template<typename T>
template<typename T, typename SizeT = uoffset_t>
bool VerifySizePrefixedBuffer(const char *const identifier) {
return Verify<uoffset_t>(0U) &&
Check(ReadScalar<uoffset_t>(buf_) == size_ - sizeof(uoffset_t)) &&
VerifyBufferFromStart<T>(identifier, sizeof(uoffset_t));
return Verify<SizeT>(0U) &&
Check(ReadScalar<SizeT>(buf_) == size_ - sizeof(SizeT)) &&
VerifyBufferFromStart<T>(identifier, sizeof(SizeT));
}
uoffset_t VerifyOffset(const size_t start) const {
if (!Verify<uoffset_t>(start)) return 0;
const auto o = ReadScalar<uoffset_t>(buf_ + start);
template<typename OffsetT = uoffset_t, typename SOffsetT = soffset_t>
size_t VerifyOffset(const size_t start) const {
if (!Verify<OffsetT>(start)) return 0;
const auto o = ReadScalar<OffsetT>(buf_ + start);
// May not point to itself.
if (!Check(o != 0)) return 0;
// Can't wrap around / buffers are max 2GB.
if (!Check(static_cast<soffset_t>(o) >= 0)) return 0;
// Can't wrap around larger than the max size.
if (!Check(static_cast<SOffsetT>(o) >= 0)) return 0;
// Must be inside the buffer to create a pointer from it (pointer outside
// buffer is UB).
if (!Verify(start + o, 1)) return 0;
return o;
}
uoffset_t VerifyOffset(const uint8_t *const base,
const voffset_t start) const {
return VerifyOffset(static_cast<size_t>(base - buf_) + start);
template<typename OffsetT = uoffset_t>
size_t VerifyOffset(const uint8_t *const base, const voffset_t start) const {
return VerifyOffset<OffsetT>(static_cast<size_t>(base - buf_) + start);
}
// Called at the start of a table to increase counters measuring data
@ -312,6 +319,12 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
std::vector<uint8_t> *flex_reuse_tracker_ = nullptr;
};
// Specialization for 64-bit offsets.
template<>
inline size_t Verifier::VerifyOffset<uoffset64_t>(const size_t start) const {
return VerifyOffset<uoffset64_t, soffset64_t>(start);
}
} // namespace flatbuffers
#endif // FLATBUFFERS_VERIFIER_H_

View File

@ -1,6 +1,6 @@
if(WITH_FLATBUFFERS)
set(HAVE_FLATBUFFERS 1)
set(flatbuffers_VERSION "23.1.21")
set(flatbuffers_VERSION "23.5.9")
ocv_install_3rdparty_licenses(flatbuffers "${OpenCV_SOURCE_DIR}/3rdparty/flatbuffers/LICENSE.txt")
ocv_add_external_target(flatbuffers "${OpenCV_SOURCE_DIR}/3rdparty/flatbuffers/include" "" "HAVE_FLATBUFFERS=1")
set(CUSTOM_STATUS_flatbuffers " Flatbuffers:" "builtin/3rdparty (${flatbuffers_VERSION})")

View File

@ -9,8 +9,8 @@
// Ensure the included flatbuffers.h is the same version as when this file was
// generated, otherwise it may not be compatible.
static_assert(FLATBUFFERS_VERSION_MAJOR == 23 &&
FLATBUFFERS_VERSION_MINOR == 1 &&
FLATBUFFERS_VERSION_REVISION == 21,
FLATBUFFERS_VERSION_MINOR == 5 &&
FLATBUFFERS_VERSION_REVISION == 9,
"Non-compatible flatbuffers version included");
namespace opencv_tflite {