Skip to content

Commit b95b47b

Browse files
authored
Merge branch 'main' into reduce-min
2 parents ea84530 + 97166cf commit b95b47b

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+1794
-153
lines changed

.github/mergify.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
merge_queue:
2+
max_parallel_checks: 1
3+
14
queue_rules:
25
- name: default
36
checks_timeout: 2 h

python/tflite_micro/BUILD

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,7 @@ py_package(
130130
# in the tflm tree.
131131
packages = [
132132
"python.tflite_micro",
133+
"tensorflow.lite.micro.compression",
133134
"tensorflow.lite.micro.tools.generate_test_for_model",
134135
"tensorflow.lite.python",
135136
"tensorflow.lite.tools.flatbuffer_utils",
@@ -138,6 +139,7 @@ py_package(
138139
":postinstall_check",
139140
":runtime",
140141
":version",
142+
"//tensorflow/lite/micro/compression",
141143
],
142144
)
143145

@@ -223,8 +225,10 @@ py_wheel(
223225
":local": "py3",
224226
}),
225227
requires = [
228+
"bitarray",
226229
"flatbuffers",
227230
"numpy",
231+
"pyyaml",
228232
"tensorflow",
229233
],
230234
stamp = 1, # 1 == always stamp

python/tflite_micro/__init__.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,5 +24,8 @@
2424
# Unambiguously identify the source used to build the package.
2525
from tflite_micro.python.tflite_micro._version import __version__
2626

27-
# Ordered after `runtime` to avoid a circular dependency
27+
# Provide a convenient alias for the compression module
28+
from tflite_micro.tensorflow.lite.micro import compression
29+
30+
# Ordered after `runtime` and `compression` to avoid circular dependencies
2831
from tflite_micro.python.tflite_micro import postinstall_check

python/tflite_micro/postinstall_check.py

Lines changed: 27 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,17 @@
1919
# in the Python installation environment rather than to locations in the tflm
2020
# source tree.
2121
from tflite_micro import runtime
22+
from tflite_micro import compression
2223

2324
import numpy as np
2425
import pkg_resources
2526
import sys
27+
import tempfile
28+
import os
2629

2730

28-
def passed():
31+
def runtime_test():
32+
"""Test the runtime interpreter functionality."""
2933
# Create an interpreter with a sine model
3034
model = pkg_resources.resource_filename(__name__, "sine_float.tflite")
3135
interpreter = runtime.Interpreter.from_file(model)
@@ -49,5 +53,27 @@ def infer(x):
4953
return np.allclose(outputs, goldens, atol=0.05)
5054

5155

56+
def compression_test():
57+
"""Test that the compression module is available and functional."""
58+
59+
# Test that compress function is available
60+
# We don't actually compress here as it requires a properly structured model
61+
# with compressible tensors, but we verify the function is importable
62+
assert callable(compression.compress)
63+
64+
# Test availability of the SpecBuilder
65+
_ = (compression.SpecBuilder().add_tensor(
66+
subgraph=0, tensor=0).with_lut(index_bitwidth=4).build())
67+
68+
return True
69+
70+
71+
def passed():
72+
"""Run all postinstall checks."""
73+
runtime_passed = runtime_test()
74+
compression_passed = compression_test()
75+
return runtime_passed and compression_passed
76+
77+
5278
if __name__ == "__main__":
5379
sys.exit(0 if passed() else 1)

python/tflite_micro/pypi_build.sh

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -101,13 +101,15 @@ docker run \
101101
}
102102
103103
# Build the wheel via bazel, using the Python compatibility tag matching the
104-
# build environment.
104+
# build environment. Enable compression support for the official package.
105105
call_bazel build //python/tflite_micro:whl.dist \
106-
--//python/tflite_micro:compatibility_tag=\$PY_COMPATIBILITY
106+
--//python/tflite_micro:compatibility_tag=\$PY_COMPATIBILITY \
107+
--//:with_compression=true
107108
108109
# Test, in the container environment.
109110
call_bazel test //python/tflite_micro:whl_test \
110-
--//python/tflite_micro:compatibility_tag=\$PY_COMPATIBILITY
111+
--//python/tflite_micro:compatibility_tag=\$PY_COMPATIBILITY \
112+
--//:with_compression=true
111113
EOF
112114

113115
# Make the output directory tree writable so it can be removed easily by the

python/tflite_micro/python_ops_resolver.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,7 @@ PythonOpsResolver::PythonOpsResolver() {
105105
AddReshape();
106106
AddResizeBilinear();
107107
AddResizeNearestNeighbor();
108+
AddReverseV2();
108109
AddRfft();
109110
AddRound();
110111
AddRsqrt();

tensorflow/lite/core/c/common.cc

Lines changed: 44 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,41 @@ void TfLiteVarArrayFree(T* a) {
104104

105105
#ifndef TF_LITE_STATIC_MEMORY
106106

107+
TfLiteSparsity TfLiteSparsityClone(const TfLiteSparsity& src) {
108+
TfLiteSparsity dst = src;
109+
dst.traversal_order = TfLiteIntArrayCopy(src.traversal_order);
110+
dst.block_map = TfLiteIntArrayCopy(src.block_map);
111+
if (src.dim_metadata) {
112+
dst.dim_metadata = reinterpret_cast<TfLiteDimensionMetadata*>(
113+
calloc(1, sizeof(TfLiteDimensionMetadata) * src.dim_metadata_size));
114+
for (int i = 0; i < src.dim_metadata_size; ++i) {
115+
dst.dim_metadata[i] = src.dim_metadata[i];
116+
dst.dim_metadata[i].array_segments =
117+
TfLiteIntArrayCopy(src.dim_metadata[i].array_segments);
118+
dst.dim_metadata[i].array_indices =
119+
TfLiteIntArrayCopy(src.dim_metadata[i].array_indices);
120+
}
121+
}
122+
return dst;
123+
}
124+
125+
// Clones the source sparsity to a newly allocated object.
126+
TfLiteSparsity* TfLiteSparsityClone(const TfLiteSparsity* const src) {
127+
if (!src) {
128+
return nullptr;
129+
}
130+
TfLiteSparsity* dst =
131+
reinterpret_cast<TfLiteSparsity*>(calloc(1, sizeof(TfLiteSparsity)));
132+
*dst = TfLiteSparsityClone(*src);
133+
return dst;
134+
}
135+
136+
#endif // TF_LITE_STATIC_MEMORY
137+
138+
} // namespace
139+
140+
#ifndef TF_LITE_STATIC_MEMORY
141+
107142
TfLiteQuantization TfLiteQuantizationClone(const TfLiteQuantization& src) {
108143
TfLiteQuantization dst;
109144
dst.type = src.type;
@@ -136,39 +171,8 @@ TfLiteQuantization TfLiteQuantizationClone(const TfLiteQuantization& src) {
136171
return dst;
137172
}
138173

139-
TfLiteSparsity TfLiteSparsityClone(const TfLiteSparsity& src) {
140-
TfLiteSparsity dst = src;
141-
dst.traversal_order = TfLiteIntArrayCopy(src.traversal_order);
142-
dst.block_map = TfLiteIntArrayCopy(src.block_map);
143-
if (src.dim_metadata) {
144-
dst.dim_metadata = reinterpret_cast<TfLiteDimensionMetadata*>(
145-
calloc(1, sizeof(TfLiteDimensionMetadata) * src.dim_metadata_size));
146-
for (int i = 0; i < src.dim_metadata_size; ++i) {
147-
dst.dim_metadata[i] = src.dim_metadata[i];
148-
dst.dim_metadata[i].array_segments =
149-
TfLiteIntArrayCopy(src.dim_metadata[i].array_segments);
150-
dst.dim_metadata[i].array_indices =
151-
TfLiteIntArrayCopy(src.dim_metadata[i].array_indices);
152-
}
153-
}
154-
return dst;
155-
}
156-
157-
// Clones the source sparsity to a newly allocated object.
158-
TfLiteSparsity* TfLiteSparsityClone(const TfLiteSparsity* const src) {
159-
if (!src) {
160-
return nullptr;
161-
}
162-
TfLiteSparsity* dst =
163-
reinterpret_cast<TfLiteSparsity*>(calloc(1, sizeof(TfLiteSparsity)));
164-
*dst = TfLiteSparsityClone(*src);
165-
return dst;
166-
}
167-
168174
#endif // TF_LITE_STATIC_MEMORY
169175

170-
} // namespace
171-
172176
extern "C" {
173177

174178
size_t TfLiteIntArrayGetSizeInBytes(int size) {
@@ -247,6 +251,11 @@ void TfLiteQuantizationFree(TfLiteQuantization* quantization) {
247251
}
248252
free(q_params);
249253
}
254+
if (quantization->type == kTfLiteBlockwiseQuantization) {
255+
TfLiteBlockwiseQuantization* q_params =
256+
reinterpret_cast<TfLiteBlockwiseQuantization*>(quantization->params);
257+
free(q_params);
258+
}
250259
quantization->params = nullptr;
251260
quantization->type = kTfLiteNoQuantization;
252261
}
@@ -640,4 +649,8 @@ TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t) {
640649
return kTfLiteRunStepUnknown;
641650
}
642651

652+
// Returns a sentinel value to be used as the user_data field of a TfLiteNode
653+
// when the kernel initialization fails.
654+
void* TfLiteKernelInitFailed() { return reinterpret_cast<void*>(-1); }
655+
643656
} // extern "C"

tensorflow/lite/core/c/common.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -788,6 +788,7 @@ TfLiteStatus TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
788788
/// If all dimensions are known, this is the same as `t->dims`.
789789
/// (`dims_signature` is NULL or empty if all dimensions are known.)
790790
const TfLiteIntArray* TfLiteTensorGetDimsSignature(const TfLiteTensor* t);
791+
791792
#endif // TF_LITE_STATIC_MEMORY
792793

793794
/// WARNING: This is an experimental interface that is subject to change.
@@ -1160,6 +1161,11 @@ typedef struct TfLiteRegistration {
11601161
/// NOTE: if the data is already in the desired format, simply implement this
11611162
/// function to return `nullptr` and implement the free function to be a
11621163
/// no-op.
1164+
///
1165+
/// NOTE: For a Delegate kernel, returns `TfLiteKernelInitFailed()` if it
1166+
/// fails on the initialization. This eventually causes user's API call to
1167+
/// InterpreterBuilder::operator() or Interpreter::ModifyGraphWithDelegate()
1168+
/// to return an error.
11631169
void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
11641170

11651171
/// The pointer `buffer` is the data previously returned by an init
@@ -1498,6 +1504,10 @@ TfLiteRunStep TfLiteTensorGetDataKnownStep(const TfLiteTensor* t);
14981504
/// operations.
14991505
TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t);
15001506

1507+
/// Returns a sentinel value to be used as the user_data field of a TfLiteNode
1508+
/// when the kernel initialization fails.
1509+
void* TfLiteKernelInitFailed();
1510+
15011511
/** @} */
15021512
// Ends `\addtogroup`, it's important for the doc generator that this doesn't
15031513
// include the CC code below.
@@ -1633,5 +1643,8 @@ TfLiteStatus TfLiteTensorVariantRealloc(TfLiteTensor* t,
16331643
return kTfLiteOk;
16341644
}
16351645

1646+
// Returns a copy of the quantization parameters of the tensor.
1647+
TfLiteQuantization TfLiteQuantizationClone(const TfLiteQuantization& src);
1648+
16361649
#endif // __cplusplus
16371650
#endif // TENSORFLOW_LITE_CORE_C_COMMON_H_

tensorflow/lite/kernels/internal/portable_tensor_utils.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,7 @@ inline void BatchQuantizeFloats(const float* float_data_ptr, int n_batch,
170170
tensor_utils::SymmetricQuantizeFloats(
171171
float_data_ptr + offset, n_data, quantized_data_ptr + offset,
172172
&unused_min, &unused_max, &scaling_factors[b]);
173+
if (zero_points) zero_points[b] = 0;
173174
}
174175
}
175176
}

tensorflow/lite/kernels/internal/reference/batch_matmul.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ limitations under the License.
2121
#include "tensorflow/lite/kernels/internal/common.h"
2222
#include "tensorflow/lite/kernels/internal/compatibility.h"
2323
#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
24+
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
2425
#include "tensorflow/lite/kernels/internal/types.h"
2526

2627
namespace tflite {

0 commit comments

Comments
 (0)