From 624cf03cf78053acb70ef49b845eca46849c48a1 Mon Sep 17 00:00:00 2001 From: TFLM-bot Date: Thu, 16 Oct 2025 14:02:59 +0000 Subject: [PATCH 1/4] Sync from upstream TF. --- .../compiler/mlir/lite/core/c/tflite_types.h | 1 + .../compiler/mlir/lite/schema/schema.fbs | 1 + .../lite/core/api/flatbuffer_conversions.cc | 3 + tensorflow/lite/core/c/common.cc | 2 + .../kernels/internal/portable_tensor_utils.cc | 98 ++++++++++++++++--- .../kernels/internal/portable_tensor_utils.h | 33 +++++-- tensorflow/lite/python/schema_py_generated.py | 1 + tensorflow/lite/schema/schema_generated.h | 13 ++- tensorflow/lite/tools/visualize.py | 2 +- 9 files changed, 127 insertions(+), 27 deletions(-) diff --git a/tensorflow/compiler/mlir/lite/core/c/tflite_types.h b/tensorflow/compiler/mlir/lite/core/c/tflite_types.h index 068facb1076..f09923dda5f 100644 --- a/tensorflow/compiler/mlir/lite/core/c/tflite_types.h +++ b/tensorflow/compiler/mlir/lite/core/c/tflite_types.h @@ -64,6 +64,7 @@ typedef enum { kTfLiteUInt16 = 17, kTfLiteInt4 = 18, kTfLiteBFloat16 = 19, + kTfLiteInt2 = 20, } TfLiteType; // LINT.ThenChange(//tensorflow/lite/profiling/proto/model_runtime_info.proto:EdgeDataType) diff --git a/tensorflow/compiler/mlir/lite/schema/schema.fbs b/tensorflow/compiler/mlir/lite/schema/schema.fbs index dcf82e38a96..d9da8f0a331 100644 --- a/tensorflow/compiler/mlir/lite/schema/schema.fbs +++ b/tensorflow/compiler/mlir/lite/schema/schema.fbs @@ -59,6 +59,7 @@ enum TensorType : byte { UINT16 = 16, INT4 = 17, BFLOAT16 = 18, + INT2 = 19, } // Custom quantization parameters for experimenting with new quantization diff --git a/tensorflow/lite/core/api/flatbuffer_conversions.cc b/tensorflow/lite/core/api/flatbuffer_conversions.cc index 882b839049c..cc31fb44714 100644 --- a/tensorflow/lite/core/api/flatbuffer_conversions.cc +++ b/tensorflow/lite/core/api/flatbuffer_conversions.cc @@ -1088,6 +1088,9 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, case TensorType_INT4: *type = kTfLiteInt4; return kTfLiteOk; + case TensorType_INT2: + *type = kTfLiteInt2; + return kTfLiteOk; default: *type = kTfLiteNoType; TF_LITE_REPORT_ERROR(error_reporter, diff --git a/tensorflow/lite/core/c/common.cc b/tensorflow/lite/core/c/common.cc index 4f404c93a18..5d483bdf977 100644 --- a/tensorflow/lite/core/c/common.cc +++ b/tensorflow/lite/core/c/common.cc @@ -509,6 +509,8 @@ const char* TfLiteTypeGetName(TfLiteType type) { return "VARIANT"; case kTfLiteInt4: return "INT4"; + case kTfLiteInt2: + return "INT2"; } return "Unknown type"; } diff --git a/tensorflow/lite/kernels/internal/portable_tensor_utils.cc b/tensorflow/lite/kernels/internal/portable_tensor_utils.cc index 0928d4b0d0d..efc6ba5a9c0 100644 --- a/tensorflow/lite/kernels/internal/portable_tensor_utils.cc +++ b/tensorflow/lite/kernels/internal/portable_tensor_utils.cc @@ -18,6 +18,7 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/portable_tensor_utils.h" #include +#include #include #include @@ -92,23 +93,90 @@ void UnpackDenseInt4IntoInt8(const int8_t* src_buffer, int num_elements, } } -void PackInt8IntoDenseInt4(const int8_t* src_buffer, int num_elements, - int8_t* dst_buffer) { - // num_elements means the number of elements regardless of packed or unpacked. - // For example, 3 elements means both - // 1) Packed: 3 int4's = 12 bit -> 16 bits (padded) = 2 bytes. - // stored in src_buffer[0] and src_buffer[1] (i = 0..1) - // 2) Unpacked: 3 int8's = 3 bytes. - // stored in dst_buffer[0], dst_buffer[1] and dst_buffer[2] (j = 0..2) - for (int i = 0; i < num_elements - 1; i += 2) { - dst_buffer[i / 2] = src_buffer[i] & 0x0F; - dst_buffer[i / 2] |= src_buffer[i + 1] << 4; +void UnpackPackedIntToInt8(const int8_t* src_buffer, int num_elements, + int bit_width, int8_t* dst_buffer) { + assert(bit_width == 2 || bit_width == 4); + if (bit_width == 4) { + // num_elements means the number of elements regardless of packed or + // unpacked. For example, 3 elements means both + // 1) Packed: 3 int4's = 12 bit -> 16 bits (padded) = 2 bytes. + // stored in src_buffer[0] and src_buffer[1] (i = 0..1) + // 2) Unpacked: 3 int8's = 3 bytes. + //. stored in dst_buffer[0], dst_buffer[1] and dst_buffer[2] (j = 0..2) + for (int i = 0; i < num_elements / 2; i++) { + int8_t byte = src_buffer[i]; + // Shift left first so that sign is properly extended when shifted right + int8_t lower = static_cast(byte << 4) >> 4; + int8_t higher = byte >> 4; + dst_buffer[2 * i] = lower; + dst_buffer[2 * i + 1] = higher; + } + + // If the buffer size is odd, extract the final lower nibble. + if (num_elements % 2 != 0) { + dst_buffer[num_elements - 1] = + static_cast(src_buffer[num_elements / 2] << 4) >> 4; + } + } else if (bit_width == 2) { + for (int i = 0; i < num_elements / 4; i++) { + int8_t byte = src_buffer[i]; + // Shift left first so that sign is properly extended when shifted right + int8_t val1 = static_cast(byte << 6) >> 6; + int8_t val2 = static_cast((byte << 4) & 0xFF) >> 6; + int8_t val3 = static_cast((byte << 2) & 0xFF) >> 6; + int8_t val4 = byte >> 6; + dst_buffer[4 * i] = val1; + dst_buffer[4 * i + 1] = val2; + dst_buffer[4 * i + 2] = val3; + dst_buffer[4 * i + 3] = val4; + } + + // Handle the remaining elements. + int remaining_elements = num_elements % 4; + if (remaining_elements > 0) { + int8_t byte = src_buffer[num_elements / 4]; + for (int i = 0; i < remaining_elements; i++) { + dst_buffer[num_elements - remaining_elements + i] = + static_cast((byte << (6 - 2 * i)) & 0xFF) >> 6; + } + } } - auto packed_size = (num_elements + 1) / 2; +} - // Copy the final nibble if the buffer is odd-lengthed - if (num_elements % 2 != 0) { - dst_buffer[packed_size - 1] = src_buffer[num_elements - 1] & 0x0F; +void PackInt8IntoDenseInt(const int8_t* src_buffer, int num_elements, + int bit_width, int8_t* dst_buffer) { + assert(bit_width == 2 || bit_width == 4); + if (bit_width == 4) { + // num_elements means the number of elements regardless of packed or + // unpacked. For example, 3 elements means both + // 1) Unpacked: 3 int8's = 3 bytes. + // stored in src_buffer[0], src_buffer[1] and src_buffer[2] (j = 0..2) + // 2) Packed: 3 int4's = 12 bit -> 16 bits (padded) = 2 bytes. + // stored in dst_buffer[0] and dst_buffer[1] (i = 0..1) + for (int i = 0; i < num_elements / 2; ++i) { + dst_buffer[i] = (src_buffer[2 * i] & 0x0F) | (src_buffer[2 * i + 1] << 4); + } + // If the buffer size is odd, pack the final nibble. + if (num_elements % 2 != 0) { + dst_buffer[num_elements / 2] = src_buffer[num_elements - 1] & 0x0F; + } + } else if (bit_width == 2) { + for (int i = 0; i < num_elements / 4; ++i) { + dst_buffer[i] = (src_buffer[4 * i] & 0x03) | + ((src_buffer[4 * i + 1] & 0x03) << 2) | + ((src_buffer[4 * i + 2] & 0x03) << 4) | + ((src_buffer[4 * i + 3] & 0x03) << 6); + } + // Handle the remaining elements. + int remaining_elements = num_elements % 4; + if (remaining_elements > 0) { + int8_t packed_val = 0; + for (int i = 0; i < remaining_elements; ++i) { + packed_val |= (src_buffer[num_elements - remaining_elements + i] & 0x03) + << (i * 2); + } + dst_buffer[num_elements / 4] = packed_val; + } } } diff --git a/tensorflow/lite/kernels/internal/portable_tensor_utils.h b/tensorflow/lite/kernels/internal/portable_tensor_utils.h index a361a2d0e5d..c70ac94db5f 100644 --- a/tensorflow/lite/kernels/internal/portable_tensor_utils.h +++ b/tensorflow/lite/kernels/internal/portable_tensor_utils.h @@ -618,20 +618,41 @@ void ApplySignbitToVector(const float* __restrict__ vector, int v_size, void UnpackDenseInt4IntoInt8(const int8_t* src_buffer, int num_elements, int8_t* dst_buffer); -// Pack `src_buffer` into a densely packed buffer of int4 values. +// Unpack or inflate `src_buffer` by taking each byte and splitting it into +// multiple elements into `dst_buffer`. Supports 2-bit and 4-bit packed integers // Parameters: -// src_buffer : Buffer containing int4 values stored in int8 memory. +// src_buffer : Densely packed buffer containing int2 or int4 values. +// num_elements : Number of unpacked elements to be read from the buffer. +// This should be equal to the size of `dst_buffer`. +// bit_width : The bit width of the packed elements (either 2 or 4). +// dst_buffer : Buffer to unpack into. Should be allocated by the caller. +// Size should be at least `num_elements`. +// Notes: +// For 4-bit unpacking: e.g., `src_buffer = {0x12, 0x34};` (num_elements = 4) +// will return `dst_buffer = {0x02, 0x01, 0x04, 0x03}`. +// For 2-bit unpacking: e.g., `src_buffer = {0x12};` (num_elements = 4) +// will return `dst_buffer = {0x02, 0x00, 0x01, 0x00}` (sign extended). +void UnpackPackedIntToInt8(const int8_t* src_buffer, int num_elements, + int bit_width, int8_t* dst_buffer); + +// Pack `src_buffer` into a densely packed buffer of int2 or int4 values. +// Parameters: +// src_buffer : Buffer containing int2 or int4 values stored in int8 +// memory. // num_elements : Number of elements stored in the buffer. Note that this can // be smaller than the size of `src_buffer` by 1 if it's odd, // in which case the last nibble in `src_buffer` is ignored. // This should be equal to the size of `dst_buffer`. +// bit_width : The bit width of the packed elements (either 2 or 4). // dst_buffer : Buffer to pack into. Should be allocated by the caller. // Size should be at least `num_elements`. // Notes: -// For example, given `src_buffer = {0x02, 0x01, 0x04, 0x03}`, calling this -// function will return `dst_buffer = {0x12, 0x34}`. -void PackInt8IntoDenseInt4(const int8_t* src_buffer, int num_elements, - int8_t* dst_buffer); +// For 4-bit packing: e.g., given `src_buffer = {0x02, 0x01, 0x04, 0x03}`, +// calling this function will return `dst_buffer = {0x12, 0x34}`. +// For 2-bit packing: e.g., given `src_buffer = {0x00, 0x01, 0x00, 0x02}`, +// calling this function will return `dst_buffer = {0x84}`. +void PackInt8IntoDenseInt(const int8_t* src_buffer, int num_elements, + int bit_width, int8_t* dst_buffer); } // namespace tensor_utils } // namespace tflite diff --git a/tensorflow/lite/python/schema_py_generated.py b/tensorflow/lite/python/schema_py_generated.py index 5fb12737d43..648cfd043af 100755 --- a/tensorflow/lite/python/schema_py_generated.py +++ b/tensorflow/lite/python/schema_py_generated.py @@ -27,6 +27,7 @@ class TensorType(object): UINT16 = 16 INT4 = 17 BFLOAT16 = 18 + INT2 = 19 class QuantizationDetails(object): diff --git a/tensorflow/lite/schema/schema_generated.h b/tensorflow/lite/schema/schema_generated.h index 1f055d2045f..35dd124b367 100755 --- a/tensorflow/lite/schema/schema_generated.h +++ b/tensorflow/lite/schema/schema_generated.h @@ -703,11 +703,12 @@ enum TensorType : int8_t { TensorType_UINT16 = 16, TensorType_INT4 = 17, TensorType_BFLOAT16 = 18, + TensorType_INT2 = 19, TensorType_MIN = TensorType_FLOAT32, - TensorType_MAX = TensorType_BFLOAT16 + TensorType_MAX = TensorType_INT2 }; -inline const TensorType (&EnumValuesTensorType())[19] { +inline const TensorType (&EnumValuesTensorType())[20] { static const TensorType values[] = { TensorType_FLOAT32, TensorType_FLOAT16, @@ -727,13 +728,14 @@ inline const TensorType (&EnumValuesTensorType())[19] { TensorType_UINT32, TensorType_UINT16, TensorType_INT4, - TensorType_BFLOAT16 + TensorType_BFLOAT16, + TensorType_INT2 }; return values; } inline const char * const *EnumNamesTensorType() { - static const char * const names[20] = { + static const char * const names[21] = { "FLOAT32", "FLOAT16", "INT32", @@ -753,13 +755,14 @@ inline const char * const *EnumNamesTensorType() { "UINT16", "INT4", "BFLOAT16", + "INT2", nullptr }; return names; } inline const char *EnumNameTensorType(TensorType e) { - if (::flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_BFLOAT16)) return ""; + if (::flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_INT2)) return ""; const size_t index = static_cast(e); return EnumNamesTensorType()[index]; } diff --git a/tensorflow/lite/tools/visualize.py b/tensorflow/lite/tools/visualize.py index de7ef820079..cd4bcfa7aaf 100644 --- a/tensorflow/lite/tools/visualize.py +++ b/tensorflow/lite/tools/visualize.py @@ -33,7 +33,7 @@ from tflite_micro.tensorflow.lite.python import schema_py_generated as schema_fb else: # This file is part of tflite_runtime package. - from tflite_runtime import schema_py_generated as schema_fb + from tflite_micro.tensorflow.lite_runtime import schema_py_generated as schema_fb # A CSS description for making the visualizer _CSS = """ From 22013c4188a23f526884ac9fea5758c3f31560d8 Mon Sep 17 00:00:00 2001 From: Esun Kim Date: Thu, 16 Oct 2025 10:25:29 -0700 Subject: [PATCH 2/4] Manual change from cl/819821231 --- python/tflite_micro/numpy_utils.cc | 3 + tensorflow/lite/micro/tools/layer_by_layer.cc | 3 + .../micro/tools/layer_by_layer_schema.fbs | 1 + .../tools/layer_by_layer_schema_generated.h | 218 +----------------- 4 files changed, 15 insertions(+), 210 deletions(-) diff --git a/python/tflite_micro/numpy_utils.cc b/python/tflite_micro/numpy_utils.cc index 0daabf00347..fb728a18a1e 100644 --- a/python/tflite_micro/numpy_utils.cc +++ b/python/tflite_micro/numpy_utils.cc @@ -58,6 +58,9 @@ int TfLiteTypeToPyArrayType(TfLiteType tf_lite_type) { case kTfLiteInt4: // TODO(b/246806634): NPY_INT4 currently doesn't exist return NPY_BYTE; + case kTfLiteInt2: + // TODO(b/246806634): NPY_INT2 currently doesn't exist + return NPY_BYTE; case kTfLiteInt8: return NPY_INT8; case kTfLiteInt64: diff --git a/tensorflow/lite/micro/tools/layer_by_layer.cc b/tensorflow/lite/micro/tools/layer_by_layer.cc index 91d325e51c7..5964c1f4ad8 100644 --- a/tensorflow/lite/micro/tools/layer_by_layer.cc +++ b/tensorflow/lite/micro/tools/layer_by_layer.cc @@ -120,6 +120,9 @@ TfLiteStatus ConvertTensorType(TfLiteType type, TensorTypes& tensor_type) { case kTfLiteInt4: tensor_type = TensorTypes_INT4; return kTfLiteOk; + case kTfLiteInt2: + tensor_type = TensorTypes_INT2; + return kTfLiteOk; case kTfLiteNoType: MicroPrintf("Unsupported data type %d in tensor\n", tensor_type); return kTfLiteError; diff --git a/tensorflow/lite/micro/tools/layer_by_layer_schema.fbs b/tensorflow/lite/micro/tools/layer_by_layer_schema.fbs index 4183c9cf52c..b788399a839 100644 --- a/tensorflow/lite/micro/tools/layer_by_layer_schema.fbs +++ b/tensorflow/lite/micro/tools/layer_by_layer_schema.fbs @@ -35,6 +35,7 @@ enum TensorTypes : byte { UINT16 = 16, INT4 = 17, BFLOAT16 = 18, + INT2 = 19, } table TensorData { diff --git a/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h b/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h index 67a2caa7850..122dff74537 100644 --- a/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h +++ b/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h @@ -1,17 +1,3 @@ -/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ // automatically generated by the FlatBuffers compiler, do not modify @@ -29,15 +15,12 @@ static_assert(FLATBUFFERS_VERSION_MAJOR == 23 && struct TensorData; struct TensorDataBuilder; -struct TensorDataT; struct SubgraphData; struct SubgraphDataBuilder; -struct SubgraphDataT; struct ModelTestData; struct ModelTestDataBuilder; -struct ModelTestDataT; enum TensorTypes : int8_t { TensorTypes_FLOAT32 = 0, @@ -59,11 +42,12 @@ enum TensorTypes : int8_t { TensorTypes_UINT16 = 16, TensorTypes_INT4 = 17, TensorTypes_BFLOAT16 = 18, + TensorTypes_INT2 = 19, TensorTypes_MIN = TensorTypes_FLOAT32, - TensorTypes_MAX = TensorTypes_BFLOAT16 + TensorTypes_MAX = TensorTypes_INT2 }; -inline const TensorTypes (&EnumValuesTensorTypes())[19] { +inline const TensorTypes (&EnumValuesTensorTypes())[20] { static const TensorTypes values[] = { TensorTypes_FLOAT32, TensorTypes_FLOAT16, @@ -83,13 +67,14 @@ inline const TensorTypes (&EnumValuesTensorTypes())[19] { TensorTypes_UINT32, TensorTypes_UINT16, TensorTypes_INT4, - TensorTypes_BFLOAT16 + TensorTypes_BFLOAT16, + TensorTypes_INT2 }; return values; } inline const char * const *EnumNamesTensorTypes() { - static const char * const names[20] = { + static const char * const names[21] = { "FLOAT32", "FLOAT16", "INT32", @@ -109,30 +94,19 @@ inline const char * const *EnumNamesTensorTypes() { "UINT16", "INT4", "BFLOAT16", + "INT2", nullptr }; return names; } inline const char *EnumNameTensorTypes(TensorTypes e) { - if (::flatbuffers::IsOutRange(e, TensorTypes_FLOAT32, TensorTypes_BFLOAT16)) return ""; + if (::flatbuffers::IsOutRange(e, TensorTypes_FLOAT32, TensorTypes_INT2)) return ""; const size_t index = static_cast(e); return EnumNamesTensorTypes()[index]; } -struct TensorDataT : public ::flatbuffers::NativeTable { - typedef TensorData TableType; - uint32_t input_index = 0; - uint32_t tensor_index = 0; - std::vector shape{}; - TensorTypes dtype = TensorTypes_FLOAT32; - std::vector data{}; - uint32_t num_bytes = 0; - uint32_t layer_number = 0; -}; - struct TensorData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef TensorDataT NativeTableType; typedef TensorDataBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_INPUT_INDEX = 4, @@ -177,9 +151,6 @@ struct TensorData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { VerifyField(verifier, VT_LAYER_NUMBER, 4) && verifier.EndTable(); } - TensorDataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TensorDataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; - static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct TensorDataBuilder { @@ -260,20 +231,7 @@ inline ::flatbuffers::Offset CreateTensorDataDirect( layer_number); } -::flatbuffers::Offset CreateTensorData(::flatbuffers::FlatBufferBuilder &_fbb, const TensorDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SubgraphDataT : public ::flatbuffers::NativeTable { - typedef SubgraphData TableType; - std::vector> outputs{}; - uint32_t subgraph_index = 0; - SubgraphDataT() = default; - SubgraphDataT(const SubgraphDataT &o); - SubgraphDataT(SubgraphDataT&&) FLATBUFFERS_NOEXCEPT = default; - SubgraphDataT &operator=(SubgraphDataT o) FLATBUFFERS_NOEXCEPT; -}; - struct SubgraphData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef SubgraphDataT NativeTableType; typedef SubgraphDataBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_OUTPUTS = 4, @@ -293,9 +251,6 @@ struct SubgraphData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { VerifyField(verifier, VT_SUBGRAPH_INDEX, 4) && verifier.EndTable(); } - SubgraphDataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SubgraphDataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; - static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct SubgraphDataBuilder { @@ -340,20 +295,7 @@ inline ::flatbuffers::Offset CreateSubgraphDataDirect( subgraph_index); } -::flatbuffers::Offset CreateSubgraphData(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ModelTestDataT : public ::flatbuffers::NativeTable { - typedef ModelTestData TableType; - std::vector> input_data{}; - std::vector> subgraph_data{}; - ModelTestDataT() = default; - ModelTestDataT(const ModelTestDataT &o); - ModelTestDataT(ModelTestDataT&&) FLATBUFFERS_NOEXCEPT = default; - ModelTestDataT &operator=(ModelTestDataT o) FLATBUFFERS_NOEXCEPT; -}; - struct ModelTestData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { - typedef ModelTestDataT NativeTableType; typedef ModelTestDataBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_INPUT_DATA = 4, @@ -375,9 +317,6 @@ struct ModelTestData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { verifier.VerifyVectorOfTables(subgraph_data()) && verifier.EndTable(); } - ModelTestDataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ModelTestDataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; - static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelTestDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct ModelTestDataBuilder { @@ -423,135 +362,6 @@ inline ::flatbuffers::Offset CreateModelTestDataDirect( subgraph_data__); } -::flatbuffers::Offset CreateModelTestData(::flatbuffers::FlatBufferBuilder &_fbb, const ModelTestDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); - -inline TensorDataT *TensorData::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new TensorDataT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void TensorData::UnPackTo(TensorDataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = input_index(); _o->input_index = _e; } - { auto _e = tensor_index(); _o->tensor_index = _e; } - { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } else { _o->shape.resize(0); } } - { auto _e = dtype(); _o->dtype = _e; } - { auto _e = data(); if (_e) { _o->data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->data.begin()); } } - { auto _e = num_bytes(); _o->num_bytes = _e; } - { auto _e = layer_number(); _o->layer_number = _e; } -} - -inline ::flatbuffers::Offset TensorData::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { - return CreateTensorData(_fbb, _o, _rehasher); -} - -inline ::flatbuffers::Offset CreateTensorData(::flatbuffers::FlatBufferBuilder &_fbb, const TensorDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TensorDataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _input_index = _o->input_index; - auto _tensor_index = _o->tensor_index; - auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; - auto _dtype = _o->dtype; - auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0; - auto _num_bytes = _o->num_bytes; - auto _layer_number = _o->layer_number; - return CreateTensorData( - _fbb, - _input_index, - _tensor_index, - _shape, - _dtype, - _data, - _num_bytes, - _layer_number); -} - -inline SubgraphDataT::SubgraphDataT(const SubgraphDataT &o) - : subgraph_index(o.subgraph_index) { - outputs.reserve(o.outputs.size()); - for (const auto &outputs_ : o.outputs) { outputs.emplace_back((outputs_) ? new TensorDataT(*outputs_) : nullptr); } -} - -inline SubgraphDataT &SubgraphDataT::operator=(SubgraphDataT o) FLATBUFFERS_NOEXCEPT { - std::swap(outputs, o.outputs); - std::swap(subgraph_index, o.subgraph_index); - return *this; -} - -inline SubgraphDataT *SubgraphData::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SubgraphDataT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SubgraphData::UnPackTo(SubgraphDataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->outputs[_i]) { _e->Get(_i)->UnPackTo(_o->outputs[_i].get(), _resolver); } else { _o->outputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->outputs.resize(0); } } - { auto _e = subgraph_index(); _o->subgraph_index = _e; } -} - -inline ::flatbuffers::Offset SubgraphData::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { - return CreateSubgraphData(_fbb, _o, _rehasher); -} - -inline ::flatbuffers::Offset CreateSubgraphData(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SubgraphDataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->outputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorData(*__va->__fbb, __va->__o->outputs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _subgraph_index = _o->subgraph_index; - return CreateSubgraphData( - _fbb, - _outputs, - _subgraph_index); -} - -inline ModelTestDataT::ModelTestDataT(const ModelTestDataT &o) { - input_data.reserve(o.input_data.size()); - for (const auto &input_data_ : o.input_data) { input_data.emplace_back((input_data_) ? new TensorDataT(*input_data_) : nullptr); } - subgraph_data.reserve(o.subgraph_data.size()); - for (const auto &subgraph_data_ : o.subgraph_data) { subgraph_data.emplace_back((subgraph_data_) ? new SubgraphDataT(*subgraph_data_) : nullptr); } -} - -inline ModelTestDataT &ModelTestDataT::operator=(ModelTestDataT o) FLATBUFFERS_NOEXCEPT { - std::swap(input_data, o.input_data); - std::swap(subgraph_data, o.subgraph_data); - return *this; -} - -inline ModelTestDataT *ModelTestData::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ModelTestDataT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ModelTestData::UnPackTo(ModelTestDataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = input_data(); if (_e) { _o->input_data.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->input_data[_i]) { _e->Get(_i)->UnPackTo(_o->input_data[_i].get(), _resolver); } else { _o->input_data[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->input_data.resize(0); } } - { auto _e = subgraph_data(); if (_e) { _o->subgraph_data.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->subgraph_data[_i]) { _e->Get(_i)->UnPackTo(_o->subgraph_data[_i].get(), _resolver); } else { _o->subgraph_data[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->subgraph_data.resize(0); } } -} - -inline ::flatbuffers::Offset ModelTestData::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelTestDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { - return CreateModelTestData(_fbb, _o, _rehasher); -} - -inline ::flatbuffers::Offset CreateModelTestData(::flatbuffers::FlatBufferBuilder &_fbb, const ModelTestDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ModelTestDataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _input_data = _o->input_data.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->input_data.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorData(*__va->__fbb, __va->__o->input_data[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _subgraph_data = _o->subgraph_data.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->subgraph_data.size(), [](size_t i, _VectorArgs *__va) { return CreateSubgraphData(*__va->__fbb, __va->__o->subgraph_data[i].get(), __va->__rehasher); }, &_va ) : 0; - return CreateModelTestData( - _fbb, - _input_data, - _subgraph_data); -} - inline const ModelTestData *GetModelTestData(const void *buf) { return ::flatbuffers::GetRoot(buf); } @@ -582,16 +392,4 @@ inline void FinishSizePrefixedModelTestDataBuffer( fbb.FinishSizePrefixed(root); } -inline std::unique_ptr UnPackModelTestData( - const void *buf, - const ::flatbuffers::resolver_function_t *res = nullptr) { - return std::unique_ptr(GetModelTestData(buf)->UnPack(res)); -} - -inline std::unique_ptr UnPackSizePrefixedModelTestData( - const void *buf, - const ::flatbuffers::resolver_function_t *res = nullptr) { - return std::unique_ptr(GetSizePrefixedModelTestData(buf)->UnPack(res)); -} - #endif // FLATBUFFERS_GENERATED_LAYERBYLAYERSCHEMA_H_ From f3a56bd076d905880c85f5ee5fe6106fab8bd1f9 Mon Sep 17 00:00:00 2001 From: Esun Kim Date: Thu, 16 Oct 2025 11:22:26 -0700 Subject: [PATCH 3/4] Fix --- tensorflow/lite/micro/tools/layer_by_layer.cc | 2 +- .../tools/layer_by_layer_schema_generated.h | 202 ++++++++++++++++++ 2 files changed, 203 insertions(+), 1 deletion(-) diff --git a/tensorflow/lite/micro/tools/layer_by_layer.cc b/tensorflow/lite/micro/tools/layer_by_layer.cc index 5964c1f4ad8..b72517523fa 100644 --- a/tensorflow/lite/micro/tools/layer_by_layer.cc +++ b/tensorflow/lite/micro/tools/layer_by_layer.cc @@ -120,7 +120,7 @@ TfLiteStatus ConvertTensorType(TfLiteType type, TensorTypes& tensor_type) { case kTfLiteInt4: tensor_type = TensorTypes_INT4; return kTfLiteOk; - case kTfLiteInt2: + case kTfLiteInt2: tensor_type = TensorTypes_INT2; return kTfLiteOk; case kTfLiteNoType: diff --git a/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h b/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h index 122dff74537..b68d0e89228 100644 --- a/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h +++ b/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h @@ -1,3 +1,14 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ // automatically generated by the FlatBuffers compiler, do not modify @@ -15,12 +26,15 @@ static_assert(FLATBUFFERS_VERSION_MAJOR == 23 && struct TensorData; struct TensorDataBuilder; +struct TensorDataT; struct SubgraphData; struct SubgraphDataBuilder; +struct SubgraphDataT; struct ModelTestData; struct ModelTestDataBuilder; +struct ModelTestDataT; enum TensorTypes : int8_t { TensorTypes_FLOAT32 = 0, @@ -106,7 +120,19 @@ inline const char *EnumNameTensorTypes(TensorTypes e) { return EnumNamesTensorTypes()[index]; } +struct TensorDataT : public ::flatbuffers::NativeTable { + typedef TensorData TableType; + uint32_t input_index = 0; + uint32_t tensor_index = 0; + std::vector shape{}; + TensorTypes dtype = TensorTypes_FLOAT32; + std::vector data{}; + uint32_t num_bytes = 0; + uint32_t layer_number = 0; +}; + struct TensorData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef TensorDataT NativeTableType; typedef TensorDataBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_INPUT_INDEX = 4, @@ -151,6 +177,9 @@ struct TensorData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { VerifyField(verifier, VT_LAYER_NUMBER, 4) && verifier.EndTable(); } + TensorDataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TensorDataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct TensorDataBuilder { @@ -231,7 +260,20 @@ inline ::flatbuffers::Offset CreateTensorDataDirect( layer_number); } +::flatbuffers::Offset CreateTensorData(::flatbuffers::FlatBufferBuilder &_fbb, const TensorDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SubgraphDataT : public ::flatbuffers::NativeTable { + typedef SubgraphData TableType; + std::vector> outputs{}; + uint32_t subgraph_index = 0; + SubgraphDataT() = default; + SubgraphDataT(const SubgraphDataT &o); + SubgraphDataT(SubgraphDataT&&) FLATBUFFERS_NOEXCEPT = default; + SubgraphDataT &operator=(SubgraphDataT o) FLATBUFFERS_NOEXCEPT; +}; + struct SubgraphData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SubgraphDataT NativeTableType; typedef SubgraphDataBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_OUTPUTS = 4, @@ -251,6 +293,9 @@ struct SubgraphData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { VerifyField(verifier, VT_SUBGRAPH_INDEX, 4) && verifier.EndTable(); } + SubgraphDataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SubgraphDataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct SubgraphDataBuilder { @@ -295,7 +340,20 @@ inline ::flatbuffers::Offset CreateSubgraphDataDirect( subgraph_index); } +::flatbuffers::Offset CreateSubgraphData(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ModelTestDataT : public ::flatbuffers::NativeTable { + typedef ModelTestData TableType; + std::vector> input_data{}; + std::vector> subgraph_data{}; + ModelTestDataT() = default; + ModelTestDataT(const ModelTestDataT &o); + ModelTestDataT(ModelTestDataT&&) FLATBUFFERS_NOEXCEPT = default; + ModelTestDataT &operator=(ModelTestDataT o) FLATBUFFERS_NOEXCEPT; +}; + struct ModelTestData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ModelTestDataT NativeTableType; typedef ModelTestDataBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_INPUT_DATA = 4, @@ -317,6 +375,9 @@ struct ModelTestData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { verifier.VerifyVectorOfTables(subgraph_data()) && verifier.EndTable(); } + ModelTestDataT *UnPack(const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ModelTestDataT *_o, const ::flatbuffers::resolver_function_t *_resolver = nullptr) const; + static ::flatbuffers::Offset Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelTestDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct ModelTestDataBuilder { @@ -362,6 +423,135 @@ inline ::flatbuffers::Offset CreateModelTestDataDirect( subgraph_data__); } +::flatbuffers::Offset CreateModelTestData(::flatbuffers::FlatBufferBuilder &_fbb, const ModelTestDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline TensorDataT *TensorData::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TensorDataT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void TensorData::UnPackTo(TensorDataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = input_index(); _o->input_index = _e; } + { auto _e = tensor_index(); _o->tensor_index = _e; } + { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } else { _o->shape.resize(0); } } + { auto _e = dtype(); _o->dtype = _e; } + { auto _e = data(); if (_e) { _o->data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->data.begin()); } } + { auto _e = num_bytes(); _o->num_bytes = _e; } + { auto _e = layer_number(); _o->layer_number = _e; } +} + +inline ::flatbuffers::Offset TensorData::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const TensorDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateTensorData(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateTensorData(::flatbuffers::FlatBufferBuilder &_fbb, const TensorDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const TensorDataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _input_index = _o->input_index; + auto _tensor_index = _o->tensor_index; + auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; + auto _dtype = _o->dtype; + auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0; + auto _num_bytes = _o->num_bytes; + auto _layer_number = _o->layer_number; + return CreateTensorData( + _fbb, + _input_index, + _tensor_index, + _shape, + _dtype, + _data, + _num_bytes, + _layer_number); +} + +inline SubgraphDataT::SubgraphDataT(const SubgraphDataT &o) + : subgraph_index(o.subgraph_index) { + outputs.reserve(o.outputs.size()); + for (const auto &outputs_ : o.outputs) { outputs.emplace_back((outputs_) ? new TensorDataT(*outputs_) : nullptr); } +} + +inline SubgraphDataT &SubgraphDataT::operator=(SubgraphDataT o) FLATBUFFERS_NOEXCEPT { + std::swap(outputs, o.outputs); + std::swap(subgraph_index, o.subgraph_index); + return *this; +} + +inline SubgraphDataT *SubgraphData::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SubgraphDataT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SubgraphData::UnPackTo(SubgraphDataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->outputs[_i]) { _e->Get(_i)->UnPackTo(_o->outputs[_i].get(), _resolver); } else { _o->outputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->outputs.resize(0); } } + { auto _e = subgraph_index(); _o->subgraph_index = _e; } +} + +inline ::flatbuffers::Offset SubgraphData::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateSubgraphData(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateSubgraphData(::flatbuffers::FlatBufferBuilder &_fbb, const SubgraphDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const SubgraphDataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _outputs = _o->outputs.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->outputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorData(*__va->__fbb, __va->__o->outputs[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _subgraph_index = _o->subgraph_index; + return CreateSubgraphData( + _fbb, + _outputs, + _subgraph_index); +} + +inline ModelTestDataT::ModelTestDataT(const ModelTestDataT &o) { + input_data.reserve(o.input_data.size()); + for (const auto &input_data_ : o.input_data) { input_data.emplace_back((input_data_) ? new TensorDataT(*input_data_) : nullptr); } + subgraph_data.reserve(o.subgraph_data.size()); + for (const auto &subgraph_data_ : o.subgraph_data) { subgraph_data.emplace_back((subgraph_data_) ? new SubgraphDataT(*subgraph_data_) : nullptr); } +} + +inline ModelTestDataT &ModelTestDataT::operator=(ModelTestDataT o) FLATBUFFERS_NOEXCEPT { + std::swap(input_data, o.input_data); + std::swap(subgraph_data, o.subgraph_data); + return *this; +} + +inline ModelTestDataT *ModelTestData::UnPack(const ::flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ModelTestDataT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ModelTestData::UnPackTo(ModelTestDataT *_o, const ::flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = input_data(); if (_e) { _o->input_data.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->input_data[_i]) { _e->Get(_i)->UnPackTo(_o->input_data[_i].get(), _resolver); } else { _o->input_data[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->input_data.resize(0); } } + { auto _e = subgraph_data(); if (_e) { _o->subgraph_data.resize(_e->size()); for (::flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->subgraph_data[_i]) { _e->Get(_i)->UnPackTo(_o->subgraph_data[_i].get(), _resolver); } else { _o->subgraph_data[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } else { _o->subgraph_data.resize(0); } } +} + +inline ::flatbuffers::Offset ModelTestData::Pack(::flatbuffers::FlatBufferBuilder &_fbb, const ModelTestDataT* _o, const ::flatbuffers::rehasher_function_t *_rehasher) { + return CreateModelTestData(_fbb, _o, _rehasher); +} + +inline ::flatbuffers::Offset CreateModelTestData(::flatbuffers::FlatBufferBuilder &_fbb, const ModelTestDataT *_o, const ::flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { ::flatbuffers::FlatBufferBuilder *__fbb; const ModelTestDataT* __o; const ::flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _input_data = _o->input_data.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->input_data.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorData(*__va->__fbb, __va->__o->input_data[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _subgraph_data = _o->subgraph_data.size() ? _fbb.CreateVector<::flatbuffers::Offset> (_o->subgraph_data.size(), [](size_t i, _VectorArgs *__va) { return CreateSubgraphData(*__va->__fbb, __va->__o->subgraph_data[i].get(), __va->__rehasher); }, &_va ) : 0; + return CreateModelTestData( + _fbb, + _input_data, + _subgraph_data); +} + inline const ModelTestData *GetModelTestData(const void *buf) { return ::flatbuffers::GetRoot(buf); } @@ -392,4 +582,16 @@ inline void FinishSizePrefixedModelTestDataBuffer( fbb.FinishSizePrefixed(root); } +inline std::unique_ptr UnPackModelTestData( + const void *buf, + const ::flatbuffers::resolver_function_t *res = nullptr) { + return std::unique_ptr(GetModelTestData(buf)->UnPack(res)); +} + +inline std::unique_ptr UnPackSizePrefixedModelTestData( + const void *buf, + const ::flatbuffers::resolver_function_t *res = nullptr) { + return std::unique_ptr(GetSizePrefixedModelTestData(buf)->UnPack(res)); +} + #endif // FLATBUFFERS_GENERATED_LAYERBYLAYERSCHEMA_H_ From 0ea35a1663163ade23f2f242d3cf333a74f9fb7f Mon Sep 17 00:00:00 2001 From: Esun Kim Date: Thu, 16 Oct 2025 13:26:23 -0700 Subject: [PATCH 4/4] Copyright --- tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h b/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h index b68d0e89228..25f101bc183 100644 --- a/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h +++ b/tensorflow/lite/micro/tools/layer_by_layer_schema_generated.h @@ -1,8 +1,11 @@ /* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.