diff --git a/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h b/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h index e260774570f..b4087f3b87b 100644 --- a/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h +++ b/tensorflow/lite/micro/kernels/arc_mli/mli_interface.h @@ -33,7 +33,7 @@ class MliTensorInterface { public: // Make sure that lifetime of MliTensorInterface instance isn't bigger than // related mli_tensor. - MliTensorInterface(mli_tensor* tensor) : tensor_(tensor) {}; + MliTensorInterface(mli_tensor* tensor) : tensor_(tensor){}; MliTensorInterface() = default; ~MliTensorInterface() = default; diff --git a/tensorflow/lite/micro/kernels/reduce_common.cc b/tensorflow/lite/micro/kernels/reduce_common.cc index cdc95d89424..8aeb529d757 100644 --- a/tensorflow/lite/micro/kernels/reduce_common.cc +++ b/tensorflow/lite/micro/kernels/reduce_common.cc @@ -120,7 +120,7 @@ enum MinMaxEvalType { kEvalMin, kEvalMax }; template struct MinMaxReducerCompare { MinMaxReducerCompare() = delete; - MinMaxReducerCompare(MinMaxEvalType evalType) : type_(evalType) {}; + MinMaxReducerCompare(MinMaxEvalType evalType) : type_(evalType){}; constexpr T initialValue() const { return (type_ == kEvalMin) ? std::numeric_limits::max() diff --git a/tensorflow/lite/micro/micro_allocator.cc b/tensorflow/lite/micro/micro_allocator.cc index 8c8df8cd067..b6a482863e5 100644 --- a/tensorflow/lite/micro/micro_allocator.cc +++ b/tensorflow/lite/micro/micro_allocator.cc @@ -230,6 +230,19 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( TfLiteTensor* result) { TFLITE_DCHECK(result != nullptr); + // Validate buffer index before any dereference of the buffers vector. + if (buffers == nullptr) { + MicroPrintf("Model buffers vector is null\n"); + return kTfLiteError; + } + const uint32_t buffer_index = flatbuffer_tensor.buffer(); + if (buffer_index >= buffers->size()) { + MicroPrintf( + "Tensor references invalid buffer index %u, model has only %d buffers\n", + buffer_index, buffers->size()); + return kTfLiteError; + } + *result = {}; // Make sure the serialized type is one we know how to deal with, and convert // it from a flatbuffer enum into a constant used by the kernel C API. @@ -347,6 +360,20 @@ TfLiteStatus InitializeTfLiteEvalTensorFromFlatbuffer( const flatbuffers::Vector>* buffers, TfLiteEvalTensor* result) { *result = {}; + + // Validate buffer index before any dereference of the buffers vector. + if (buffers == nullptr) { + MicroPrintf("Model buffers vector is null\n"); + return kTfLiteError; + } + const uint32_t buffer_index = flatbuffer_tensor.buffer(); + if (buffer_index >= buffers->size()) { + MicroPrintf( + "Tensor references invalid buffer index %u, model has only %d buffers\n", + buffer_index, buffers->size()); + return kTfLiteError; + } + // Make sure the serialized type is one we know how to deal with, and convert // it from a flatbuffer enum into a constant used by the kernel C API. TF_LITE_ENSURE_STATUS( @@ -1107,11 +1134,43 @@ TfLiteStatus MicroAllocator::PopulateTfLiteTensorFromFlatbuffer( // TODO(b/162311891): This method serves as a stub to ensure quantized // allocations in the tail can be recorded. Once the interpreter has APIs for // accessing buffers on TfLiteEvalTensor this method can be dropped. + + // Validate subgraph and tensor indices before dereferencing FlatBuffer + // vectors to avoid out-of-bounds access. + if (subgraph_idx < 0 || + static_cast(subgraph_idx) >= model->subgraphs()->size()) { + MicroPrintf("Invalid subgraph index %d, model has only %d subgraphs\n", + subgraph_idx, model->subgraphs()->size()); + return kTfLiteError; + } + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); + if (subgraph == nullptr || subgraph->tensors() == nullptr) { + MicroPrintf("Subgraph %d has no tensors vector\n", subgraph_idx); + return kTfLiteError; + } + if (tensor_index < 0 || + static_cast(tensor_index) >= subgraph->tensors()->size()) { + MicroPrintf( + "Invalid tensor index %d for subgraph %d, subgraph has only %d tensors\n", + tensor_index, subgraph_idx, subgraph->tensors()->size()); + return kTfLiteError; + } + + const tflite::Tensor* flatbuffer_tensor = + subgraph->tensors()->Get(tensor_index); + + // Buffer index validation is performed in + // InitializeTfLiteTensorFromFlatbuffer but we also ensure model->buffers() is + // non-null here for completeness. + if (model->buffers() == nullptr) { + MicroPrintf("Model buffers vector is null\n"); + return kTfLiteError; + } + + // Populate the TfLiteTensor fields from the flatbuffer. return internal::InitializeTfLiteTensorFromFlatbuffer( persistent_buffer_allocator_, non_persistent_buffer_allocator_, - allocate_temp, - *model->subgraphs()->Get(subgraph_idx)->tensors()->Get(tensor_index), - model->buffers(), tensor); + allocate_temp, *flatbuffer_tensor, model->buffers(), tensor); } TfLiteStatus MicroAllocator::CommitStaticMemoryPlan( diff --git a/tensorflow/lite/micro/micro_allocator.h b/tensorflow/lite/micro/micro_allocator.h index 215bffc6a8c..af688e865dc 100644 --- a/tensorflow/lite/micro/micro_allocator.h +++ b/tensorflow/lite/micro/micro_allocator.h @@ -49,6 +49,13 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( const flatbuffers::Vector>* buffers, TfLiteTensor* result); +// Initializes a TfLiteEvalTensor from a flatbuffer tensor and buffers vector. +// This is the eval-phase counterpart to InitializeTfLiteTensorFromFlatbuffer. +TfLiteStatus InitializeTfLiteEvalTensorFromFlatbuffer( + const tflite::Tensor& flatbuffer_tensor, + const flatbuffers::Vector>* buffers, + TfLiteEvalTensor* result); + // Holds placeholder information for a scratch buffer request from a kernel. // This struct is only used during the model prepare stage. Each request from a // kernel is stored in the head section. During the prepare stage, the head diff --git a/tensorflow/lite/micro/micro_allocator_test.cc b/tensorflow/lite/micro/micro_allocator_test.cc index 9e19e271daf..2ed08f98ad0 100644 --- a/tensorflow/lite/micro/micro_allocator_test.cc +++ b/tensorflow/lite/micro/micro_allocator_test.cc @@ -25,6 +25,7 @@ limitations under the License. #include "tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h" #include "tensorflow/lite/micro/micro_allocator.h" #include "tensorflow/lite/micro/micro_arena_constants.h" +#include "tensorflow/lite/micro/micro_interpreter.h" #include "tensorflow/lite/micro/test_helpers.h" #include "tensorflow/lite/micro/testing/micro_test.h" #include "tensorflow/lite/micro/testing/test_conv_model.h" @@ -1384,4 +1385,101 @@ TF_LITE_MICRO_TEST(TestMultiSubgraphNumScratchAllocations) { used_bytes + sizeof(tflite::ScratchBufferHandle) * 2); } +// New tests validating invalid buffer index guards in tensor initialization +// and model allocation paths. + +TF_LITE_MICRO_TEST(TestInitializeTensorInvalidBufferIndex) { + // Arena and allocator for temporary/persistent allocations used by init. + constexpr size_t arena_size = 1024; + uint8_t arena[arena_size]; + tflite::SingleArenaBufferAllocator* simple_allocator = + tflite::SingleArenaBufferAllocator::Create(arena, arena_size); + + // Build a flatbuffer Tensor that references a non-existent buffer index. + flatbuffers::FlatBufferBuilder builder; + const int32_t dims_data[] = {1}; + auto dims_vec = builder.CreateVector(dims_data, 1); + // Use an invalid buffer index (>= buffers->size()). + const uint32_t kInvalidBufferIndex = 5; + auto name_str = builder.CreateString("invalid_buffer_tensor"); + auto tensor_offset = tflite::CreateTensor( + builder, /*shape=*/dims_vec, tflite::TensorType_INT32, + /*buffer=*/kInvalidBufferIndex, /*name=*/name_str, + /*quantization=*/0, /*is_variable=*/false, /*sparsity=*/0); + builder.Finish(tensor_offset); + const tflite::Tensor* bad_tensor = + flatbuffers::GetRoot(builder.GetBufferPointer()); + + // Create a buffers vector with a single empty buffer. + const flatbuffers::Vector>* buffers = + tflite::testing::CreateFlatbufferBuffers(); + + TfLiteTensor out_tensor; + TfLiteEvalTensor out_eval_tensor; + + // Expect kTfLiteError due to invalid buffer index. + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteError, + tflite::internal::InitializeTfLiteTensorFromFlatbuffer( + simple_allocator, simple_allocator, /*allocate_temp=*/false, + *bad_tensor, buffers, &out_tensor)); + TF_LITE_MICRO_EXPECT_EQ( + kTfLiteError, tflite::internal::InitializeTfLiteEvalTensorFromFlatbuffer( + *bad_tensor, buffers, &out_eval_tensor)); +} + +TF_LITE_MICRO_TEST(TestModelAllocationSubgraphInvalidBufferIndex) { + // Build a minimal model with a single tensor that references an invalid + // buffer index. Make it an output tensor to mimic output-table invalid case + // but the allocator will validate all tensors equally, so this also + // simulates intermediate cases. + flatbuffers::FlatBufferBuilder fbb; + + // One empty buffer at index 0 in Model.buffers(). + flatbuffers::Offset buffers_arr[1] = { + tflite::CreateBuffer(fbb)}; + auto buffers_fb = fbb.CreateVector(buffers_arr, 1); + + // Tensor with invalid buffer index. + const int32_t dims_data[] = {1}; + auto dims_vec = fbb.CreateVector(dims_data, 1); + const uint32_t kInvalidBufferIndex = 3; // >= buffers_vec.size() + auto t_name = fbb.CreateString("out_tensor_invalid_buf"); + auto tensor = tflite::CreateTensor( + fbb, dims_vec, tflite::TensorType_INT32, kInvalidBufferIndex, t_name, + /*quantization=*/0, /*is_variable=*/false, /*sparsity=*/0); + auto tensors_vec = fbb.CreateVector(&tensor, 1); + + // Subgraph with the tensor as an output; no operators required. + const int32_t outputs_idx[] = {0}; + auto outputs = fbb.CreateVector(outputs_idx, 1); + auto inputs = fbb.CreateVector({}); + auto ops = fbb.CreateVector>({}); + auto subgraph = tflite::CreateSubGraph(fbb, tensors_vec, inputs, outputs, ops, + fbb.CreateString("sg0")); + auto subgraphs = fbb.CreateVector(&subgraph, 1); + + // Minimal model (no operator codes needed as there are no operators). + auto model = + tflite::CreateModel(fbb, /*version=*/TFLITE_SCHEMA_VERSION, + /*operator_codes=*/0, subgraphs, + fbb.CreateString("invalid_buf_model"), buffers_fb); + tflite::FinishModelBuffer(fbb, model); + const tflite::Model* m = + flatbuffers::GetRoot(fbb.GetBufferPointer()); + + // Allocate an arena and create the allocator. + constexpr size_t arena_size = 2048; + uint8_t arena[arena_size]; + tflite::MicroAllocator* allocator = + tflite::MicroAllocator::Create(arena, arena_size); + TF_LITE_MICRO_EXPECT(nullptr != allocator); + + // StartModelAllocation should fail (return nullptr) because initializing + // any eval tensor with an invalid buffer index returns kTfLiteError. + tflite::SubgraphAllocations* subgraph_allocations = + allocator->StartModelAllocation(m); + TF_LITE_MICRO_EXPECT(nullptr == subgraph_allocations); +} + TF_LITE_MICRO_TESTS_END