@@ -99,6 +99,18 @@ inline void Div(const ArithmeticParams& params,
99
99
DivElementwise (flat_size, params, input1_data, input2_data, output_data);
100
100
}
101
101
102
+ inline void Div (const ArithmeticParams& params,
103
+ const RuntimeShape& input1_shape, const int16_t * input1_data,
104
+ const RuntimeShape& input2_shape, const int16_t * input2_data,
105
+ const RuntimeShape& output_shape, int16_t * output_data) {
106
+ TFLITE_DCHECK_LE (params.quantized_activation_min ,
107
+ params.quantized_activation_max );
108
+ const int flat_size =
109
+ MatchingElementsSize (input1_shape, input2_shape, output_shape);
110
+
111
+ DivElementwise (flat_size, params, input1_data, input2_data, output_data);
112
+ }
113
+
102
114
template <typename T, int N = 5 >
103
115
inline void BroadcastDivSlowQuantized (
104
116
const ArithmeticParams& params, const RuntimeShape& unextended_input1_shape,
@@ -177,6 +189,19 @@ inline void BroadcastDivSlow(const ArithmeticParams& params,
177
189
input2_data, unextended_output_shape, output_data);
178
190
}
179
191
192
+ template <int N = 5 >
193
+ inline void BroadcastDivSlow (const ArithmeticParams& params,
194
+ const RuntimeShape& unextended_input1_shape,
195
+ const int16_t * input1_data,
196
+ const RuntimeShape& unextended_input2_shape,
197
+ const int16_t * input2_data,
198
+ const RuntimeShape& unextended_output_shape,
199
+ int16_t * output_data) {
200
+ BroadcastDivSlowQuantized<int16_t , N>(
201
+ params, unextended_input1_shape, input1_data, unextended_input2_shape,
202
+ input2_data, unextended_output_shape, output_data);
203
+ }
204
+
180
205
// TODO(jiawen): We can implement BroadcastDiv on buffers of arbitrary
181
206
// dimensionality if the runtime code does a single loop over one dimension
182
207
// that handles broadcasting as the base case. The code generator would then
0 commit comments