Skip to content

Commit 0247ed2

Browse files
Skylion007pytorchmergebot
authored andcommitted
Apply Clang-Tidy readability-container-size-empty (pytorch#93236)
Not only is this change usually shorter and more readable, it also can yield better performance. size() is not always a constant time operation (such as on LinkedLists), but empty() always is. Pull Request resolved: pytorch#93236 Approved by: https://github.com/malfet
1 parent 239afa0 commit 0247ed2

File tree

216 files changed

+518
-525
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

216 files changed

+518
-525
lines changed

.clang-tidy

+1
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ modernize-*,
3939
performance-*,
4040
-performance-noexcept-move-constructor,
4141
-performance-unnecessary-value-param,
42+
readability-container-size-empty,
4243
'
4344
HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/(?!deploy/interpreter/cpython)).*$'
4445
AnalyzeTemporaryDtors: false

aten/src/ATen/CPUApplyUtils.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ struct strided_tensor_iter {
106106
};
107107

108108
inline bool _all_equal_numel(at::ArrayRef<Tensor> tensors) {
109-
if (tensors.size() == 0)
109+
if (tensors.empty())
110110
return true;
111111
int64_t all_numel = tensors[0].numel();
112112
for (const auto i : c10::irange(1, tensors.size())) {

aten/src/ATen/FunctionalStorageImpl.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -43,9 +43,10 @@ ViewMeta ViewMeta::to_out_idx(int64_t out_idx) {
4343
const Tensor apply_update(const FunctionalStorageImpl::Update& update, const Tensor& base) {
4444
at::Tensor t = update.new_val;
4545
TORCH_INTERNAL_ASSERT(!at::functionalization::impl::isFunctionalTensor(t));
46-
if (update.view_metas.size() == 0) return t;
46+
if (update.view_metas.empty()) return t;
4747

4848
std::vector<at::Tensor> tmp_values({base});
49+
tmp_values.reserve(update.view_metas.size());
4950
for (size_t i = 0; i < update.view_metas.size() - 1; ++i) {
5051
at::Tensor next_view = update.view_metas[i].forward_fn(tmp_values.back(), update.view_metas[i].out_index);
5152
// NB: We only actually need tmp_values for ops like select/slice/diagonal/squeeze/as_strided
@@ -113,7 +114,7 @@ bool FunctionalStorageImpl::apply_updates() {
113114
// It adds the Functionalize key into TLS before redispatching to the functionalization kernels,
114115
// which means that we need to explicitly exclude it here before doing any other work underneath the pass.
115116
at::AutoDispatchSkipFunctionalize guard;
116-
bool any_updates = updates_.size() > 0;
117+
bool any_updates = !updates_.empty();
117118
for (auto& update_data: updates_) {
118119
base_ = apply_update(update_data, base_);
119120
}

aten/src/ATen/FunctionalTensorWrapper.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ FunctionalTensorWrapper::FunctionalTensorWrapper(const Tensor& view_value, const
132132
{
133133
set_constructor_metadata();
134134
// Copy the original tensor's ViewMeta vector and push the current one.
135-
if (base->view_metas_.size() > 0) {
135+
if (!base->view_metas_.empty()) {
136136
view_metas_ = base->view_metas_; // copy
137137
}
138138
view_metas_.push_back(meta);
@@ -238,7 +238,7 @@ void FunctionalTensorWrapper::maybe_replace_storage(const Tensor& other) {
238238
//
239239
// Given all of the above, for now we're just banning the above usage.
240240
TORCH_CHECK(storage().use_count() == 1, "Attempted to resize a view tensor to a larger size. This is not allowed in the functionalization pass");
241-
TORCH_CHECK(view_metas_.size() == 0, "Attempted to resize a view tensor to a larger size. This is not allowed in the functionalization pass");
241+
TORCH_CHECK(view_metas_.empty(), "Attempted to resize a view tensor to a larger size. This is not allowed in the functionalization pass");
242242
// If this tensor is not a view (and has no outstanding views taken out on it),
243243
// Then it's safe to throw out the old storage and replace it with the new, larger one.
244244
storage_ = c10::Storage(c10::make_intrusive<functionalization::FunctionalStorageImpl>(other));
@@ -508,7 +508,7 @@ bool isFunctionalTensor(const c10::optional<Tensor>& t) {
508508
}
509509

510510
bool isFunctionalTensor(const c10::List<c10::optional<Tensor>>& t_list) {
511-
if (t_list.size() == 0) return false;
511+
if (t_list.empty()) return false;
512512
auto functional_count = 0;
513513
for (const auto i : c10::irange(t_list.size())) {
514514
if (!t_list[i].has_value() || !t_list[i]->defined()) continue;

aten/src/ATen/LegacyBatchedFallback.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ void batchedTensorInplaceForLoopFallback(const c10::OperatorHandle& op, torch::j
156156
batched_tensor_inputs.push_back(tensor);
157157
batched_tensor_inputs_position.push_back(idx);
158158
}
159-
TORCH_INTERNAL_ASSERT(batched_tensor_inputs.size() > 0);
159+
TORCH_INTERNAL_ASSERT(!batched_tensor_inputs.empty());
160160

161161
// MultiBatchVmapTransform the BatchedTensor arguments. This returns
162162
// VmapPhysicalViews that contain all of the batch dimensions.
@@ -290,7 +290,7 @@ void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Sta
290290
batched_tensor_inputs.push_back(tensor);
291291
batched_tensor_inputs_position.push_back(idx);
292292
}
293-
TORCH_INTERNAL_ASSERT(batched_tensor_inputs.size() > 0);
293+
TORCH_INTERNAL_ASSERT(!batched_tensor_inputs.empty());
294294

295295
// MultiBatchVmapTransform the BatchedTensor arguments. This returns
296296
// VmapPhysicalViews that contain all of the batch dimensions.

aten/src/ATen/LegacyBatchingRegistrations.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ Tensor sum_batching_rule(const Tensor& self, OptionalIntArrayRef opt_dims, bool
6969
// >>> x = torch.randn(B0) # the per-examples are all scalars
7070
// >>> vmap(partial(torch.sum, dim=0), x)
7171
// then we replicate the behavior of sum(scalar_tensor, dim=0).
72-
if (/*logical*/self.dim() == 0 && (dims.size() == 0 || (dims.size() == 1 && is_allowed_dim_on_scalar_tensor(dims[0])))) {
72+
if (/*logical*/self.dim() == 0 && (dims.empty() || (dims.size() == 1 && is_allowed_dim_on_scalar_tensor(dims[0])))) {
7373
return self.clone();
7474
}
7575
}
@@ -477,7 +477,7 @@ Tensor view_batching_rule(const Tensor& self, IntArrayRef size) {
477477
Tensor view_as_complex_batching_rule(const Tensor& self) {
478478
// guard against the user passing in a batch of scalar tensors with batch
479479
// size equal to 2.
480-
TORCH_CHECK(self.sizes().size() != 0, "Input tensor must have one or more dimensions");
480+
TORCH_CHECK(!self.sizes().empty(), "Input tensor must have one or more dimensions");
481481
auto self_physical = MultiBatchVmapTransform::logicalToPhysical(self);
482482
auto result = at::view_as_complex(self_physical.tensor());
483483
return self_physical.getPhysicalToLogicalMap().apply(result);
@@ -931,7 +931,7 @@ Tensor cat_batching_rule(const ITensorListRef& tensors, int64_t dim) {
931931
auto physical_tensors = fmap(
932932
physical_views, [](const VmapPhysicalView& view) -> Tensor { return view.tensor(); });
933933
TORCH_INTERNAL_ASSERT(
934-
tensors.size() > 0, "The dispatcher should not have dispatched here otherwise.");
934+
!tensors.empty(), "The dispatcher should not have dispatched here otherwise.");
935935
auto result = at::cat(physical_tensors, physical_views[0].getPhysicalDim(dim));
936936
return physical_views[0].getPhysicalToLogicalMap().apply(result);
937937
}
@@ -941,7 +941,7 @@ Tensor stack_batching_rule(TensorList tensors, int64_t dim) {
941941
auto physical_tensors = fmap(
942942
physical_views, [](const VmapPhysicalView& view) -> Tensor { return view.tensor(); });
943943
TORCH_INTERNAL_ASSERT(
944-
tensors.size() > 0, "The dispatcher should not have dispatched here otherwise.");
944+
!tensors.empty(), "The dispatcher should not have dispatched here otherwise.");
945945
// NB: stack wraps the dimensionality to (logical dim + 1), so we have to
946946
// manually handle that here.
947947
auto dim_physical =

aten/src/ATen/LegacyVmapTransforms.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ MultiBatchVmapTransform::logicalToPhysical(ITensorListRef logical_tensors) {
239239

240240
static std::pair<std::bitset<kVmapNumLevels>,int64_t>
241241
getLevelsAndLargestLogicalDim(TensorList logical_tensors) {
242-
TORCH_INTERNAL_ASSERT(logical_tensors.size() > 0);
242+
TORCH_INTERNAL_ASSERT(!logical_tensors.empty());
243243
std::bitset<kVmapNumLevels> levels;
244244
int64_t largest_logical_dim = -1;
245245
for (const auto& tensor : logical_tensors) {

aten/src/ATen/NamedTensorUtils.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ void propagate_names_for_reduction(const Tensor& result, const Tensor& src, IntA
207207
return;
208208
}
209209
// This actually means "full reduction"
210-
if (reduced_dims.size() == 0) {
210+
if (reduced_dims.empty()) {
211211
return;
212212
}
213213
propagate_names_except(result, src, reduced_dims);
@@ -303,7 +303,7 @@ static int64_t num_batch_dims(DimnameList names) {
303303
static std::vector<Dimname> compute_matmul_outnames(
304304
DimnameList self_names,
305305
DimnameList other_names) {
306-
TORCH_CHECK(self_names.size() >= 1 && other_names.size() >= 1,
306+
TORCH_CHECK(!self_names.empty() && !other_names.empty(),
307307
"both arguments to matmul need to be at least 1D, but they are ",
308308
self_names.size(), "D and ", other_names.size(), "D");
309309

@@ -430,7 +430,7 @@ std::vector<Dimname> compute_cat_outnames(const MaterializedITensorListRef& tens
430430
std::vector<Dimname> result;
431431
for (const Tensor& tensor : tensors) {
432432
const auto tensor_names = tensor.names();
433-
TORCH_CHECK(tensor_names.size() > 0, "zero-dimensional tensor cannot be concatenated");
433+
TORCH_CHECK(!tensor_names.empty(), "zero-dimensional tensor cannot be concatenated");
434434
TORCH_CHECK(result.empty() || tensor_names.size() == result.size(),
435435
"Tensors must have same number of dimensions: got ", result.size(),
436436
" and ", tensor_names.size());

aten/src/ATen/PythonTorchFunctionTLS.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ void PythonTorchFunctionTLS::push_onto_stack(std::shared_ptr<SafePyObject> mode)
1111
}
1212

1313
const std::shared_ptr<SafePyObject> PythonTorchFunctionTLS::pop_stack() {
14-
TORCH_CHECK(pythonTorchFunctionState.stack_.size() > 0, "trying to pop from empty mode stack");
14+
TORCH_CHECK(!pythonTorchFunctionState.stack_.empty(), "trying to pop from empty mode stack");
1515
auto out = pythonTorchFunctionState.stack_.back();
1616
pythonTorchFunctionState.stack_.pop_back();
1717
return out;

aten/src/ATen/SavedTensorHooks.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ bool SavedTensorDefaultHooks::is_enabled() {
2626

2727
void SavedTensorDefaultHooks::disable(const std::string& message) {
2828
tls.disabled_error_message = message;
29-
if (tls.stack.size() > 0) {
29+
if (!tls.stack.empty()) {
3030
assertSavedTensorHooksNotDisabled();
3131
}
3232
}

aten/src/ATen/TensorIndexing.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ static inline void set_item(const Tensor& self, ArrayRef<TensorIndex> indices, c
6565
} // namespace indexing
6666

6767
Tensor Tensor::index(ArrayRef<at::indexing::TensorIndex> indices) const {
68-
TORCH_CHECK(indices.size() > 0, "Passing an empty index list to Tensor::index() is not valid syntax");
68+
TORCH_CHECK(!indices.empty(), "Passing an empty index list to Tensor::index() is not valid syntax");
6969
OptionalDeviceGuard device_guard(device_of(*this));
7070
return at::indexing::get_item(*this, indices);
7171
}
@@ -74,13 +74,13 @@ Tensor Tensor::index(std::initializer_list<at::indexing::TensorIndex> indices) c
7474
}
7575

7676
Tensor & Tensor::index_put_(ArrayRef<at::indexing::TensorIndex> indices, Tensor const & rhs) {
77-
TORCH_CHECK(indices.size() > 0, "Passing an empty index list to Tensor::index_put_() is not valid syntax");
77+
TORCH_CHECK(!indices.empty(), "Passing an empty index list to Tensor::index_put_() is not valid syntax");
7878
OptionalDeviceGuard device_guard(device_of(*this));
7979
at::indexing::set_item(*this, indices, rhs);
8080
return *this;
8181
}
8282
Tensor & Tensor::index_put_(ArrayRef<at::indexing::TensorIndex> indices, const Scalar& v) {
83-
TORCH_CHECK(indices.size() > 0, "Passing an empty index list to Tensor::index_put_() is not valid syntax");
83+
TORCH_CHECK(!indices.empty(), "Passing an empty index list to Tensor::index_put_() is not valid syntax");
8484
OptionalDeviceGuard device_guard(device_of(*this));
8585
at::indexing::set_item(*this, indices, v);
8686
return *this;

aten/src/ATen/TensorIndexing.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,7 @@ static inline Tensor applySelect(
237237
// See NOTE [nested tensor size for indexing]
238238
if (self_sizes.has_value()) {
239239
TORCH_CHECK_INDEX(
240-
!(index == 0 && dim == 0 && self_sizes->size() == 0),
240+
!(index == 0 && dim == 0 && self_sizes->empty()),
241241
"invalid index of a 0-dim tensor. ",
242242
"Use `tensor.item()` in Python or `tensor.item<T>()` in C++ to convert a 0-dim tensor to a number");
243243

aten/src/ATen/TensorIterator.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ TensorIteratorConfig& TensorIteratorConfig::declare_static_shape(IntArrayRef sha
163163

164164
TensorIteratorConfig& TensorIteratorConfig::declare_static_shape(IntArrayRef shape, IntArrayRef squash_dims) {
165165
declare_static_shape(shape);
166-
if (!static_shape_->size()) return *this;
166+
if (static_shape_->empty()) return *this;
167167
for (const auto& squash_dim : squash_dims) {
168168
TORCH_CHECK(squash_dim >= 0 && squash_dim < static_cast<int64_t>(static_shape_->size()),
169169
"squash_dim ", squash_dim, " must be in [0, ", static_shape_->size(), ").");
@@ -715,7 +715,7 @@ void TensorIteratorBase::permute_dimensions(IntArrayRef perm) {
715715
// Update shape and strides
716716
shape_ = reorder(shape_);
717717
for (auto& op : operands_) {
718-
if (op.stride_bytes.size() > 0) {
718+
if (!op.stride_bytes.empty()) {
719719
op.stride_bytes = reorder(op.stride_bytes);
720720
}
721721
}
@@ -1225,7 +1225,7 @@ void TensorIteratorBase::compute_shape(const TensorIteratorConfig& config) {
12251225
"TensorIterator does not support symbolic shapes; please implement this operator in torch/_refs "
12261226
"using the elementwise or reduction helpers (look at backtrace to find out what operator this is)");
12271227
auto shape = op.tensor_base().sizes();
1228-
if (shape.size() == 0) {
1228+
if (shape.empty()) {
12291229
has_scalars = true;
12301230
} else {
12311231
has_tensors = true;
@@ -1724,7 +1724,7 @@ void DimCounter::increment(const std::array<int64_t, 2>& step) {
17241724
std::array<int64_t, 2> DimCounter::max_2d_step() const {
17251725
int64_t step0 = std::min(shape[0] - values[0], range.end - offset);
17261726
int64_t step1 = 1;
1727-
if (step0 == shape[0] && shape.size() >= 1) {
1727+
if (step0 == shape[0] && !shape.empty()) {
17281728
step1 = std::min(shape[1] - values[1], (range.end - offset) / shape[0]);
17291729
}
17301730
return {step0, step1};

aten/src/ATen/WrapDimUtils.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ inline int64_t maybe_wrap_dim(int64_t dim, TensorImpl* tensor) {
1919
}
2020

2121
inline int64_t maybe_wrap_dim(int64_t dim, TensorList tensors) {
22-
if (tensors.size() == 0) {
22+
if (tensors.empty()) {
2323
// can't wrap empty TensorList; rely on underlying implementation to throw
2424
// error if necessary.
2525
return dim;
@@ -30,7 +30,7 @@ inline int64_t maybe_wrap_dim(int64_t dim, TensorList tensors) {
3030
inline int64_t maybe_wrap_dim(
3131
int64_t dim,
3232
const std::vector<std::vector<int64_t>>& tensor_sizes) {
33-
if (tensor_sizes.size() == 0) {
33+
if (tensor_sizes.empty()) {
3434
// can't wrap empty list; rely on underlying implementation to throw error
3535
// if necessary
3636
return dim;

aten/src/ATen/code_template.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -192,14 +192,14 @@ struct CodeTemplate {
192192
const string_list& strings,
193193
bool comma_before,
194194
bool comma_after) const {
195-
if (comma_before && strings.size() > 0)
195+
if (comma_before && !strings.empty())
196196
out << ", ";
197197
for (const auto i : c10::irange(strings.size())) {
198198
if (i > 0)
199199
out << ", ";
200200
out << strings[i];
201201
}
202-
if (comma_after && strings.size() > 0)
202+
if (comma_after && !strings.empty())
203203
out << ", ";
204204
}
205205
// These indentation functions follow the convention that they never emit

aten/src/ATen/core/boxing/impl/boxing.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ struct BoxedKernelWrapper<
234234
[&] {
235235
// op returns void, boxed kernel has pushed nothing onto stack.
236236
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
237-
stack.size() == 0,
237+
stack.empty(),
238238
"Boxed kernel was expected to return no values on the stack, ",
239239
"but instead returned ", stack.size(), " values."
240240
);

aten/src/ATen/core/class_type.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ void checkForwardHookInputArguments(
152152
if (forward_args.size() == 1) {
153153
// check for empty forward case
154154
TORCH_CHECK(
155-
input_tuple_types.size() == 0,
155+
input_tuple_types.empty(),
156156
hook_id,
157157
"was expecting Tuple[()] as the input type. Received type: '",
158158
input_arg.type()->annotation_str(),
@@ -213,7 +213,7 @@ void ClassType::checkForwardPreHookSchema(
213213
// or the contained single type if the input was a tuple containing a single
214214
// type.
215215
TORCH_CHECK(
216-
pre_hook_schema.returns().size() != 0,
216+
!pre_hook_schema.returns().empty(),
217217
hook_id,
218218
"is missing a return annotation. Return annotations are required, please add one.\n",
219219
pre_hook_err_msg
@@ -254,7 +254,7 @@ void ClassType::checkForwardPreHookSchema(
254254
// check for edge case of Tuple[()] for when forward has no arguments
255255
if (forward_args.size() == 1) {
256256
TORCH_CHECK(
257-
return_tuple_types.size() == 0,
257+
return_tuple_types.empty(),
258258
wrong_type_returned_err_msg,
259259
" Was expecting either 'None' or 'Tuple[()]' since forward had ",
260260
"no arguments.\n",

aten/src/ATen/core/dispatch/OperatorEntry.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ OperatorEntry::AnnotatedKernelContainerIterator OperatorEntry::registerKernel(
145145
#ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
146146
if (k[0].kernel.isValid()) {
147147
#else
148-
if (k.size() > 0) {
148+
if (!k.empty()) {
149149
#endif
150150
// Suppress the warning for Meta key as we are overriding C++ meta functions with python meta functions
151151
// for some ops
@@ -221,12 +221,12 @@ bool OperatorEntry::hasKernelForDispatchKey(DispatchKey k) const {
221221
TORCH_INTERNAL_ASSERT(kernels_.find(DispatchKey::Undefined) == kernels_.end());
222222
auto it = kernels_.find(k);
223223
if (it == kernels_.end()) return false;
224-
return it->second.size() > 0;
224+
return !it->second.empty();
225225
}
226226

227227
const KernelFunction& OperatorEntry::kernelForDispatchKey(DispatchKey k) const {
228228
auto it = kernels_.find(k);
229-
TORCH_CHECK(it != kernels_.end() && it->second.size(), "no kernel for ", k, " on ", name_);
229+
TORCH_CHECK(it != kernels_.end() && !it->second.empty(), "no kernel for ", k, " on ", name_);
230230
auto jt = it->second.begin();
231231
TORCH_INTERNAL_ASSERT(jt->kernel.isValid())
232232
return jt->kernel;
@@ -462,7 +462,7 @@ void OperatorEntry::checkInvariants() const {
462462
}
463463
TORCH_INTERNAL_ASSERT(kernels_.find(DispatchKey::Undefined) == kernels_.end(), dumpState());
464464
for (const auto& kv : kernels_) {
465-
TORCH_INTERNAL_ASSERT(kv.second.size() > 0, dumpState());
465+
TORCH_INTERNAL_ASSERT(!kv.second.empty(), dumpState());
466466
}
467467
for (auto k : DispatchKeySet(DispatchKeySet::FULL)) {
468468
auto expected_k = computeDispatchTableEntry(c10::Dispatcher::singleton(), k);

aten/src/ATen/core/dynamic_type.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ std::string DynamicType::str() const {
3838
std::string ret = "Dynamic<";
3939
ret += std::to_string(static_cast<DynamicTypeBits>(tag_));
4040
ret += ">";
41-
if (tag_ != Tag::Class && arguments_.elems.size() > 0) {
41+
if (tag_ != Tag::Class && !arguments_.elems.empty()) {
4242
ret += "[";
4343
for (const auto& arg : arguments_.elems) {
4444
if (arg.label) {

aten/src/ATen/core/function_schema.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ c10::optional<AliasTypeSet> FunctionSchema::mapTypeToAliasTypeSet(const TypePtr&
109109
(*maybe_inner_types).end());
110110
}
111111
}
112-
if (mutable_types.size() == 0) {
112+
if (mutable_types.empty()) {
113113
return c10::nullopt;
114114
}
115115
return mutable_types;
@@ -130,7 +130,7 @@ c10::optional<AliasTypeSet> FunctionSchema::mapTypeToAliasTypeSet(const TypePtr&
130130
(*maybe_inner_types).end());
131131
}
132132
}
133-
if (mutable_types.size() == 0) {
133+
if (mutable_types.empty()) {
134134
return c10::nullopt;
135135
}
136136
return {AliasTypeSet{TupleType::create(std::move(mutable_types))}};

0 commit comments

Comments
 (0)