diff --git a/tensorflow_lite_support/cc/task/vision/BUILD b/tensorflow_lite_support/cc/task/vision/BUILD index ab6d7b948..c8c24b898 100644 --- a/tensorflow_lite_support/cc/task/vision/BUILD +++ b/tensorflow_lite_support/cc/task/vision/BUILD @@ -173,3 +173,31 @@ cc_library_with_tflite( "@org_tensorflow//tensorflow/lite/core/api:op_resolver", ], ) + +# IMPORTANT: in order to use hardware acceleration delegates, configurable through the +# `compute_settings` field of the ImageClassifierOptions, you must additionally link to +# the appropriate delegate plugin target (e.g. `gpu_plugin` for GPU) from: +# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/experimental/acceleration/configuration/BUILD +# To use EDGETPU_CORAL, link to `edgetpu_coral_plugin` from: +# https://github.com/tensorflow/tflite-support/blob/a58a4f9225c411fa9ba29f821523e6e283988d23/tensorflow_lite_support/acceleration/configuration/BUILD#L11 +cc_library_with_tflite( + name = "image_transformer", + srcs = ["image_transformer.cc"], + hdrs = ["image_transformer.h"], + tflite_deps = [ + "@org_tensorflow//tensorflow/lite/core/shims:builtin_ops", + "//tensorflow_lite_support/cc/task/core:task_api_factory", + "//tensorflow_lite_support/cc/task/vision/core:base_vision_task_api", + "//tensorflow_lite_support/cc/task/processor:image_postprocessor", + ], + deps = [ + "//tensorflow_lite_support/cc/port:integral_types", + "//tensorflow_lite_support/cc/port:status_macros", + "//tensorflow_lite_support/cc/port:statusor", + "//tensorflow_lite_support/cc/task/core:external_file_handler", + "//tensorflow_lite_support/cc/task/vision/proto:image_transformer_options_proto_inc", + "@com_google_absl//absl/strings:str_format", + "@flatbuffers", + "@org_tensorflow//tensorflow/lite/core/api", + ], +) diff --git a/tensorflow_lite_support/cc/task/vision/image_transformer.cc b/tensorflow_lite_support/cc/task/vision/image_transformer.cc new file mode 100644 index 000000000..33b7b2d74 --- /dev/null +++ b/tensorflow_lite_support/cc/task/vision/image_transformer.cc @@ -0,0 +1,123 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow_lite_support/cc/task/vision/image_transformer.h" + +#include "external/com_google_absl/absl/strings/str_format.h" +#include "external/com_google_absl/absl/strings/string_view.h" +#include "flatbuffers/flatbuffers.h" // from @flatbuffers +#include "tensorflow_lite_support/cc/port/status_macros.h" +#include "tensorflow_lite_support/cc/task/core/task_api_factory.h" + +namespace tflite { +namespace task { +namespace vision { + +namespace { + +using ::absl::StatusCode; +using ::tflite::support::CreateStatusWithPayload; +using ::tflite::support::StatusOr; +using ::tflite::support::TfLiteSupportStatus; +using ::tflite::task::core::AssertAndReturnTypedTensor; +using ::tflite::task::core::TaskAPIFactory; +using ::tflite::task::core::TfLiteEngine; +using ::tflite::task::vision::FrameBuffer; +} // namespace + +/* static */ +StatusOr> ImageTransformer::CreateFromOptions( + const ImageTransformerOptions& options, + std::unique_ptr resolver) { + RETURN_IF_ERROR(SanityCheckOptions(options)); + + // Copy options to ensure the ExternalFile outlives the constructed object. + auto options_copy = absl::make_unique(options); + + std::unique_ptr image_transformer; + + ASSIGN_OR_RETURN(image_transformer, + TaskAPIFactory::CreateFromBaseOptions( + &options_copy->base_options(), std::move(resolver))); + + RETURN_IF_ERROR(image_transformer->Init(std::move(options_copy))); + return image_transformer; +} + +/* static */ +absl::Status ImageTransformer::SanityCheckOptions( + const ImageTransformerOptions& options) { + // Nothing to do. + return absl::OkStatus(); +} + +absl::Status ImageTransformer::Init( + std::unique_ptr options) { + // Set options. + options_ = std::move(options); + + // Perform pre-initialization actions (by default, sets the process engine for + // image pre-processing to kLibyuv as a sane default). + RETURN_IF_ERROR(PreInit()); + + // Sanity check and set inputs and outputs. + RETURN_IF_ERROR(CheckAndSetInputs()); + RETURN_IF_ERROR(CheckAndSetOutputs()); + + RETURN_IF_ERROR(PostInit()); + + ASSIGN_OR_RETURN(postprocessor_, processor::ImagePostprocessor::Create( + GetTfLiteEngine(), {0}, {0})); + + return absl::OkStatus(); +} + +absl::Status ImageTransformer::PreInit() { + SetProcessEngine(FrameBufferUtils::ProcessEngine::kLibyuv); + return absl::OkStatus(); +} + +absl::Status ImageTransformer::PostInit() { + // Nothing to do. + return absl::OkStatus(); +} + +absl::Status ImageTransformer::CheckAndSetOutputs() { + // Nothing to do. + return absl::OkStatus(); +} + +StatusOr ImageTransformer::Transform( + const FrameBuffer& frame_buffer) { + BoundingBox roi; + roi.set_width(frame_buffer.dimension().width); + roi.set_height(frame_buffer.dimension().height); + return Transform(frame_buffer, roi); +} + +StatusOr ImageTransformer::Transform( + const FrameBuffer& frame_buffer, const BoundingBox& roi) { + return InferWithFallback(frame_buffer, roi); +} + +StatusOr ImageTransformer::Postprocess( + const std::vector& /*output_tensors*/, + const FrameBuffer& /*frame_buffer*/, const BoundingBox& /*roi*/) { + ASSIGN_OR_RETURN(auto postprocessed_output, postprocessor_->Postprocess()); + return postprocessed_output; +} +} // namespace vision +} // namespace task +} // namespace tflite diff --git a/tensorflow_lite_support/cc/task/vision/image_transformer.h b/tensorflow_lite_support/cc/task/vision/image_transformer.h new file mode 100644 index 000000000..5339f95b8 --- /dev/null +++ b/tensorflow_lite_support/cc/task/vision/image_transformer.h @@ -0,0 +1,138 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_SUPPORT_CC_TASK_VISION_IMAGE_TRANSFORMER_H_ +#define TENSORFLOW_LITE_SUPPORT_CC_TASK_VISION_IMAGE_TRANSFORMER_H_ + +#include +#include + +#include "tensorflow/lite/core/api/op_resolver.h" +#include "tensorflow/lite/core/shims/cc/kernels/register.h" +#include "tensorflow_lite_support/cc/port/statusor.h" +#include "tensorflow_lite_support/cc/task/core/external_file_handler.h" +#include "tensorflow_lite_support/cc/task/vision/core/base_vision_task_api.h" +#include "tensorflow_lite_support/cc/task/vision/proto/image_transformer_options_proto_inc.h" +#include "tensorflow_lite_support/cc/task/processor/image_postprocessor.h" + +namespace tflite { +namespace task { +namespace vision { + +// Performs transformation on images. +// +// The API expects a TFLite model with optional, but strongly recommended, +// TFLite Model Metadata. +// +// Input tensor: +// (kTfLiteUInt8/kTfLiteFloat32) +// - image input of size `[batch x height x width x channels]`. +// - batch inference is not supported (`batch` is required to be 1). +// - only RGB inputs are supported (`channels` is required to be 3). +// - if type is kTfLiteFloat32, NormalizationOptions are required to be +// attached to the metadata for input normalization. +// At least one output tensor with: +// (kTfLiteUInt8/kTfLiteFloat32) +// - `N `classes and either 2 or 4 dimensions, i.e. `[1 x N]` or +// `[1 x 1 x 1 x N]` +// - optional (but recommended) label map(s) as AssociatedFile-s with type +// TENSOR_AXIS_LABELS, containing one label per line. The first such +// AssociatedFile (if any) is used to fill the `class_name` field of the +// results. The `display_name` field is filled from the AssociatedFile (if +// any) whose locale matches the `display_names_locale` field of the +// `ImageTransformerOptions` used at creation time ("en" by default, i.e. +// English). If none of these are available, only the `index` field of the +// results will be filled. +// +// An example of such model can be found at: +// https://tfhub.dev/bohemian-visual-recognition-alliance/lite-model/models/mushroom-identification_v1/1 +// +// A CLI demo tool is available for easily trying out this API, and provides +// example usage. See: +// examples/task/vision/desktop/image_classifier_demo.cc +class ImageTransformer : public BaseVisionTaskApi { + public: + using BaseVisionTaskApi::BaseVisionTaskApi; + + // Creates an ImageTransformer from the provided options. A non-default + // OpResolver can be specified in order to support custom Ops or specify a + // subset of built-in Ops.f + static tflite::support::StatusOr> + CreateFromOptions( + const ImageTransformerOptions& options, + std::unique_ptr resolver = + absl::make_unique()); + + // Performs actual transformation on the provided FrameBuffer. + // + // The FrameBuffer can be of any size and any of the supported formats, i.e. + // RGBA, RGB, NV12, NV21, YV12, YV21. It is automatically pre-processed before + // inference in order to (and in this order): + // - resize it (with bilinear interpolation, aspect-ratio *not* preserved) to + // the dimensions of the model input tensor, + // - convert it to the colorspace of the input tensor (i.e. RGB, which is the + // only supported colorspace for now), + // - rotate it according to its `Orientation` so that inference is performed + // on an "upright" image. + tflite::support::StatusOr Transform( + const FrameBuffer& frame_buffer); + + // Same as above, except that the transformation is performed based on the + // input region of interest. Cropping according to this region of interest is + // prepended to the pre-processing operations. + // + // IMPORTANT: as a consequence of cropping occurring first, the provided + // region of interest is expressed in the unrotated frame of reference + // coordinates system, i.e. in `[0, frame_buffer.width) x [0, + // frame_buffer.height)`, which are the dimensions of the underlying + // `frame_buffer` data before any `Orientation` flag gets applied. Also, the + // region of interest is not clamped, so this method will return a non-ok + // status if the region is out of these bounds. + tflite::support::StatusOr Transform( + const FrameBuffer& frame_buffer, const BoundingBox& roi); + + protected: + // The options used to build this ImageTransformer. + std::unique_ptr options_; + + // Post-processing to transform the raw model outputs into image results. + tflite::support::StatusOr Postprocess( + const std::vector& output_tensors, + const FrameBuffer& frame_buffer, const BoundingBox& roi) override; + + // Performs sanity checks on the provided ImageTransformerOptions. + static absl::Status SanityCheckOptions(const ImageTransformerOptions& options); + + // Initializes the ImageTransformer from the provided ImageTransformerOptions, + // whose ownership is transferred to this object. + absl::Status Init(std::unique_ptr options); + + // Performs pre-initialization actions. + virtual absl::Status PreInit(); + // Performs post-initialization actions. + virtual absl::Status PostInit(); + + private: + // Performs sanity checks on the model outputs and extracts their metadata. + absl::Status CheckAndSetOutputs(); + + std::unique_ptr postprocessor_; +}; + +} // namespace vision +} // namespace task +} // namespace tflite + +#endif // TENSORFLOW_LITE_SUPPORT_CC_TASK_VISION_IMAGE_TRANSFORMER_H_ diff --git a/tensorflow_lite_support/cc/task/vision/proto/BUILD b/tensorflow_lite_support/cc/task/vision/proto/BUILD index 16ea0cd3c..b089366fd 100644 --- a/tensorflow_lite_support/cc/task/vision/proto/BUILD +++ b/tensorflow_lite_support/cc/task/vision/proto/BUILD @@ -242,3 +242,28 @@ cc_library( hdrs = ["embeddings_proto_inc.h"], deps = [":embeddings_cc_proto"], ) + +# ImageTransformer protos. + +proto_library( + name = "image_transformer_options_proto", + srcs = ["image_transformer_options.proto"], + deps = [ + "//tensorflow_lite_support/cc/task/core/proto:base_options_proto", + ], +) + +cc_proto_library( + name = "image_transformer_options_cc_proto", + deps = [ + ":image_transformer_options_proto", + ], +) + +cc_library( + name = "image_transformer_options_proto_inc", + hdrs = ["image_transformer_options_proto_inc.h"], + deps = [ + ":image_transformer_options_cc_proto", + ], +) diff --git a/tensorflow_lite_support/cc/task/vision/proto/image_transformer_options.proto b/tensorflow_lite_support/cc/task/vision/proto/image_transformer_options.proto new file mode 100644 index 000000000..246980641 --- /dev/null +++ b/tensorflow_lite_support/cc/task/vision/proto/image_transformer_options.proto @@ -0,0 +1,28 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +syntax = "proto2"; + +package tflite.task.vision; + +import "tensorflow_lite_support/cc/task/core/proto/base_options.proto"; + +// Options for setting up an ImageTransformer. +// Next Id: 10. +message ImageTransformerOptions { + // Base options for configuring Task library, such as specifying the TfLite + // model file with metadata, accelerator options, etc. + optional tflite.task.core.BaseOptions base_options = 1; +} diff --git a/tensorflow_lite_support/cc/task/vision/proto/image_transformer_options_proto_inc.h b/tensorflow_lite_support/cc/task/vision/proto/image_transformer_options_proto_inc.h new file mode 100644 index 000000000..828e1cd3a --- /dev/null +++ b/tensorflow_lite_support/cc/task/vision/proto/image_transformer_options_proto_inc.h @@ -0,0 +1,23 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_SUPPORT_CC_TASK_VISION_PROTO_IMAGE_TRANSFORMER_OPTIONS_PROTO_INC_H_ +#define TENSORFLOW_LITE_SUPPORT_CC_TASK_VISION_PROTO_IMAGE_TRANSFORMER_OPTIONS_PROTO_INC_H_ + +#include "tensorflow_lite_support/cc/task/core/proto/base_options_proto_inc.h" +#include "tensorflow_lite_support/cc/task/core/proto/external_file_proto_inc.h" + +#include "tensorflow_lite_support/cc/task/vision/proto/image_transformer_options.pb.h" +#endif // TENSORFLOW_LITE_SUPPORT_CC_TASK_VISION_PROTO_IMAGE_TRANSFORMER_OPTIONS_PROTO_INC_H_ diff --git a/tensorflow_lite_support/cc/test/task/vision/BUILD b/tensorflow_lite_support/cc/test/task/vision/BUILD index 24a4b9108..438064199 100644 --- a/tensorflow_lite_support/cc/test/task/vision/BUILD +++ b/tensorflow_lite_support/cc/test/task/vision/BUILD @@ -173,3 +173,22 @@ cc_test_with_tflite( "@org_tensorflow//tensorflow/lite/kernels:builtin_ops", ], ) + +cc_test_with_tflite( + name = "image_transformer_test", + srcs = ["image_transformer_test.cc"], + data = [ + "//tensorflow_lite_support/cc/test/testdata/task/vision:test_images", + "//tensorflow_lite_support/cc/test/testdata/task/vision:test_models", + ], + deps = [ + "//tensorflow_lite_support/cc/task/vision:image_transformer", + "@org_tensorflow//tensorflow/lite/core/shims:cc_shims_test_util", + "//tensorflow_lite_support/cc/port:gtest_main", + "//tensorflow_lite_support/cc/task/vision/utils:frame_buffer_common_utils", + "//tensorflow_lite_support/cc/task/vision/utils:frame_buffer_utils", + "//tensorflow_lite_support/cc/test:test_utils", + "//tensorflow_lite_support/examples/task/vision/desktop/utils:image_utils", + "@com_google_absl//absl/flags:flag", + ], +) diff --git a/tensorflow_lite_support/cc/test/task/vision/image_transformer_test.cc b/tensorflow_lite_support/cc/test/task/vision/image_transformer_test.cc new file mode 100644 index 000000000..1cf8a288d --- /dev/null +++ b/tensorflow_lite_support/cc/test/task/vision/image_transformer_test.cc @@ -0,0 +1,145 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow_lite_support/cc/task/vision/image_transformer.h" + +#include + +#include "absl/status/status.h" // from @com_google_absl +#include "tensorflow/lite/core/shims/cc/shims_test_util.h" +#include "tensorflow_lite_support/cc/port/gmock.h" +#include "tensorflow_lite_support/cc/port/gtest.h" +#include "tensorflow_lite_support/cc/port/status_matchers.h" +#include "tensorflow_lite_support/cc/task/core/task_utils.h" +#include "tensorflow_lite_support/cc/task/vision/utils/frame_buffer_common_utils.h" +#include "tensorflow_lite_support/cc/test/test_utils.h" +#include "tensorflow_lite_support/examples/task/vision/desktop/utils/image_utils.h" + +namespace tflite { +namespace task { +namespace vision { +namespace { + +using ::tflite::support::StatusOr; +using ::tflite::task::JoinPath; +using ::tflite::task::core::TfLiteEngine; + + +constexpr char kTestDataDirectory[] = + "/tensorflow_lite_support/cc/test/testdata/task/" + "vision/"; + +constexpr char kESRGANModelWithInputAndOutputMetaData[] = "esrgan_with_input_and_output_metadata.tflite"; +constexpr char kESRGANModelWithInputMetaData[] = "esrgan_with_input_metadata.tflite"; + +StatusOr LoadImage(std::string image_name) { + return DecodeImageFromFile(JoinPath("./" /*test src dir*/, + kTestDataDirectory, image_name)); +} + +class PostprocessorTest : public tflite_shims::testing::Test {}; + +TEST_F(PostprocessorTest, FloatSucceedsWithFullMetadata) { + SUPPORT_ASSERT_OK_AND_ASSIGN(ImageData rgb_image, LoadImage("husky_downsampled.jpg")); + + std::unique_ptr frame_buffer = CreateFromRgbRawBuffer( + rgb_image.pixel_data, + FrameBuffer::Dimension{rgb_image.width, rgb_image.height}); + ImageTransformerOptions options; + options.mutable_base_options()->mutable_model_file()->set_file_name( + JoinPath("./" /*test src dir*/, kTestDataDirectory, + kESRGANModelWithInputAndOutputMetaData)); + SUPPORT_ASSERT_OK_AND_ASSIGN(std::unique_ptr image_transformer, + ImageTransformer::CreateFromOptions(options)); + + StatusOr result_or = + image_transformer->Transform(*frame_buffer); + ImageDataFree(&rgb_image); + SUPPORT_ASSERT_OK(result_or); +} + +TEST_F(PostprocessorTest, FloatSucceedsWithPartialMetadata) { + SUPPORT_ASSERT_OK_AND_ASSIGN(ImageData rgb_image, LoadImage("husky_downsampled.jpg")); + + std::unique_ptr frame_buffer = CreateFromRgbRawBuffer( + rgb_image.pixel_data, + FrameBuffer::Dimension{rgb_image.width, rgb_image.height}); + ImageTransformerOptions options; + options.mutable_base_options()->mutable_model_file()->set_file_name( + JoinPath("./" /*test src dir*/, kTestDataDirectory, + kESRGANModelWithInputMetaData)); + + SUPPORT_ASSERT_OK_AND_ASSIGN(std::unique_ptr image_transformer, + ImageTransformer::CreateFromOptions(options)); + + StatusOr result_or = + image_transformer->Transform(*frame_buffer); + ImageDataFree(&rgb_image); + SUPPORT_ASSERT_OK(result_or); +} + +class SuperResolutionTest : public tflite_shims::testing::Test {}; + +// Calculate the peak signal-to-noise ratio. +// Original code: https://www.geeksforgeeks.org/python-peak-signal-to-noise-ratio-psnr/. +double PSNR(const FrameBuffer& enhancedImage, const FrameBuffer& testImage) { + int imageSize = testImage.dimension().width * testImage.dimension().height; + const uint8* enhancedImagePtr = enhancedImage.plane(0).buffer; + const uint8* testImagePtr = testImage.plane(0).buffer; + double mse = 0.0; + for (int i = 0; i < imageSize; ++i, ++enhancedImagePtr, ++testImagePtr) { + mse += std::pow(static_cast(*enhancedImagePtr) - static_cast(*testImagePtr), 2); + } + mse /= imageSize; + + // Zero MSE means no noise is present in the signal. + double psnr = mse == 0 ? 100.0 : 20 * std::log10(255.0 / std::sqrt(mse)); + + return psnr; +} + +// Use a bi-cubically downsampled image as input to the model and compare +// the model output with the original image. +TEST_F(SuperResolutionTest, GoldenImageComparisonTest) { + SUPPORT_ASSERT_OK_AND_ASSIGN(ImageData husky_downsampled, LoadImage("husky_downsampled.jpg")); + SUPPORT_ASSERT_OK_AND_ASSIGN(ImageData husky_original, LoadImage("husky_original.jpg")); + + std::unique_ptr husky_downsampled_buffer = CreateFromRgbRawBuffer( + husky_downsampled.pixel_data, + FrameBuffer::Dimension{husky_downsampled.width, husky_downsampled.height}); + + std::unique_ptr husky_original_buffer = CreateFromRgbRawBuffer( + husky_original.pixel_data, + FrameBuffer::Dimension{husky_original.width, husky_original.height}); + + ImageTransformerOptions options; + options.mutable_base_options()->mutable_model_file()->set_file_name( + JoinPath("./" /*test src dir*/, kTestDataDirectory, + kESRGANModelWithInputAndOutputMetaData)); + SUPPORT_ASSERT_OK_AND_ASSIGN(std::unique_ptr image_transformer, + ImageTransformer::CreateFromOptions(options)); + + StatusOr result_or = + image_transformer->Transform(*husky_downsampled_buffer); + SUPPORT_ASSERT_OK(result_or); + EXPECT_DOUBLE_EQ(PSNR(result_or.value(), *husky_original_buffer), 25.073790631326489); + ImageDataFree(&husky_downsampled); + ImageDataFree(&husky_original); +} + +} // namespace +} // namespace processor +} // namespace task +} // namespace tflite diff --git a/tensorflow_lite_support/cc/test/testdata/task/vision/esrgan_with_input_and_output_metadata.tflite b/tensorflow_lite_support/cc/test/testdata/task/vision/esrgan_with_input_and_output_metadata.tflite new file mode 100644 index 000000000..2b2d79b36 Binary files /dev/null and b/tensorflow_lite_support/cc/test/testdata/task/vision/esrgan_with_input_and_output_metadata.tflite differ diff --git a/tensorflow_lite_support/cc/test/testdata/task/vision/esrgan_with_input_metadata.tflite b/tensorflow_lite_support/cc/test/testdata/task/vision/esrgan_with_input_metadata.tflite new file mode 100644 index 000000000..4999e85fc Binary files /dev/null and b/tensorflow_lite_support/cc/test/testdata/task/vision/esrgan_with_input_metadata.tflite differ diff --git a/tensorflow_lite_support/cc/test/testdata/task/vision/husky_downsampled.jpg b/tensorflow_lite_support/cc/test/testdata/task/vision/husky_downsampled.jpg new file mode 100644 index 000000000..99bfd2049 Binary files /dev/null and b/tensorflow_lite_support/cc/test/testdata/task/vision/husky_downsampled.jpg differ diff --git a/tensorflow_lite_support/cc/test/testdata/task/vision/husky_original.jpg b/tensorflow_lite_support/cc/test/testdata/task/vision/husky_original.jpg new file mode 100644 index 000000000..c9f636b9d Binary files /dev/null and b/tensorflow_lite_support/cc/test/testdata/task/vision/husky_original.jpg differ