Skip to content

Commit 9e77113

Browse files
r-barnesfacebook-github-bot
authored andcommitted
irange-ify 11 (pytorch#62121)
Summary: Pull Request resolved: pytorch#62121 Test Plan: Sandcastle Reviewed By: ngimel Differential Revision: D29879701 fbshipit-source-id: 5c51879c88fa6a5790db241c8b33ec0dc4b177ca
1 parent b5867a1 commit 9e77113

9 files changed

+33
-22
lines changed

torch/csrc/jit/python/init.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@
9898
#include <torch/csrc/jit/tensorexpr/tensorexpr_init.h>
9999

100100
#include <c10/macros/Export.h>
101+
#include <c10/util/irange.h>
101102
#include <c10/util/signal_handler.h>
102103
#include <caffe2/serialize/inline_container.h>
103104

@@ -475,7 +476,7 @@ void initJITBindings(PyObject* module) {
475476
// we want full shape specialization. The alternative would be to
476477
// have a "complete type inference" function in ArguemntSpecCreator.
477478
auto g_inputs = graph->inputs();
478-
for (size_t i = 0; i < inputs.size(); ++i) {
479+
for (const auto i : c10::irange(inputs.size())) {
479480
if (stack[i].isTensor()) {
480481
g_inputs[i]->setType(stack[i].type());
481482
}
@@ -491,7 +492,7 @@ void initJITBindings(PyObject* module) {
491492
stack.push_back(toTypeInferredIValue(obj));
492493
}
493494
auto g_inputs = graph->inputs();
494-
for (size_t i = 0; i < inputs.size(); ++i) {
495+
for (const auto i : c10::irange(inputs.size())) {
495496
if (stack[i].isTensor()) {
496497
g_inputs[i]->setType(stack[i].type());
497498
}
@@ -1164,7 +1165,7 @@ void initJITBindings(PyObject* module) {
11641165
[operations, symbol](py::args args, py::kwargs kwargs) {
11651166
std::vector<py::handle> overloaded_args;
11661167
size_t total_arg_num = args.size() + kwargs.size();
1167-
for (size_t i = 0; i < args.size(); ++i) {
1168+
for (const auto i : c10::irange(args.size())) {
11681169
is_tensor_and_append_overloaded(
11691170
args[i].ptr(), &overloaded_args);
11701171
is_tensor_list_and_append_overloaded(
@@ -1380,7 +1381,7 @@ void initJITBindings(PyObject* module) {
13801381
py::function f = py::cast<py::function>(args[0]);
13811382
py::tuple args_tup(args.size() - 1);
13821383

1383-
for (size_t i = 1; i < args.size(); ++i) {
1384+
for (const auto i : c10::irange(1, args.size())) {
13841385
args_tup[i - 1] = args[i];
13851386
}
13861387

torch/csrc/jit/python/pybind.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
#include <ATen/core/interned_strings.h>
66
#include <ATen/core/ivalue.h>
7+
#include <c10/util/irange.h>
78
#include <torch/csrc/DynamicTypes.h>
89
#include <torch/csrc/THP.h>
910
#include <torch/csrc/autograd/variable.h>
@@ -214,7 +215,7 @@ namespace jit {
214215

215216
static inline py::tuple tuple_tail(const py::tuple& tup) {
216217
py::tuple r(tup.size() - 1);
217-
for (size_t i = 1; i < tup.size(); i++) {
218+
for (const auto i : c10::irange(1, tup.size())) {
218219
r[i - 1] = tup[i];
219220
}
220221
return r;

torch/csrc/jit/python/pybind_utils.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ IValue toIValue(py::handle obj, const TypePtr& type, c10::optional<int32_t> N) {
8282
}
8383
std::vector<IValue> values;
8484
values.reserve(tuple_size);
85-
for (size_t i = 0; i < tuple_size; ++i) {
85+
for (const auto i : c10::irange(tuple_size)) {
8686
values.push_back(toIValue(tuple[i], elem_types[i]));
8787
}
8888
return tuple_type->name()

torch/csrc/jit/python/pybind_utils.h

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
#endif
3939
#include <c10/util/Exception.h>
4040
#include <c10/util/Optional.h>
41+
#include <c10/util/irange.h>
4142

4243
#include <algorithm>
4344
#include <cstddef>
@@ -706,7 +707,7 @@ inline py::object toPyObject(IValue ivalue) {
706707
} else if (ivalue.isList()) {
707708
auto list = std::move(ivalue).toList();
708709
py::list t{list.size()};
709-
for (size_t i = 0; i < list.size(); ++i) {
710+
for (const auto i : c10::irange(list.size())) {
710711
t[i] = toPyObject(IValue{list.get(i)});
711712
}
712713
return std::move(t);
@@ -715,7 +716,7 @@ inline py::object toPyObject(IValue ivalue) {
715716
const auto& elements = tuple->elements();
716717

717718
py::tuple t{elements.size()};
718-
for (size_t i = 0; i < elements.size(); ++i) {
719+
for (const auto i : c10::irange(elements.size())) {
719720
t[i] = toPyObject(IValue{elements.at(i)});
720721
}
721722

@@ -783,7 +784,7 @@ inline py::object toPyObject(IValue ivalue) {
783784

784785
const auto numAttrs = classType->numAttributes();
785786

786-
for (size_t slot = 0; slot < numAttrs; slot++) {
787+
for (const auto slot : c10::irange(numAttrs)) {
787788
const auto& attrName = classType->getAttributeName(slot);
788789
IValue v = obj->getSlot(slot);
789790
py::setattr(pyObj, attrName.c_str(), toPyObject(std::move(v)));
@@ -916,7 +917,7 @@ inline py::object createPyObjectForStack(Stack&& stack) {
916917

917918
// If there is more than one return value, pop them into a py::tuple.
918919
py::tuple return_values(stack.size());
919-
for (size_t ret = 0; ret < return_values.size(); ++ret) {
920+
for (const auto ret : c10::irange(return_values.size())) {
920921
return_values[ret] = toPyObject(std::move(stack[ret]));
921922
}
922923

@@ -935,7 +936,7 @@ inline Stack evilDeprecatedBadCreateStackDoNotUse(
935936
}
936937
Stack result;
937938
result.reserve(tuple.size() + reserve_extra_space);
938-
for (size_t i = 0; i < inputs.size(); ++i) {
939+
for (const auto i : c10::irange(inputs.size())) {
939940
result.push_back(toIValue(std::move(tuple[i]), inputs[i]->type()));
940941
}
941942
return result;

torch/csrc/jit/python/python_arg_flatten.cpp

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
#include <c10/util/irange.h>
12
#include <torch/csrc/jit/python/python_arg_flatten.h>
23
#include <torch/csrc/utils/python_strings.h>
34
#include <torch/csrc/utils/six.h>
@@ -33,8 +34,9 @@ template <typename T>
3334
py::object cast_handle_sequence(std::vector<py::handle> objs) {
3435
auto num_objs = objs.size();
3536
T sequence{num_objs};
36-
for (size_t i = 0; i < num_objs; ++i)
37+
for (const auto i : c10::irange(num_objs)) {
3738
sequence[i] = py::reinterpret_borrow<py::object>(objs[i]);
39+
}
3840
return sequence;
3941
}
4042

@@ -109,15 +111,16 @@ template <typename T>
109111
py::object cast_sequence(std::vector<py::object> objs) {
110112
auto num_objs = objs.size();
111113
T sequence{num_objs};
112-
for (size_t i = 0; i < num_objs; ++i)
114+
for (const auto i : c10::irange(num_objs)) {
113115
sequence[i] = std::move(objs[i]);
116+
}
114117
return std::move(sequence);
115118
}
116119

117120
py::object cast_dict(std::vector<py::object> objs) {
118121
auto num_objs = objs.size();
119122
py::dict sequence = {};
120-
for (size_t i = 0; i < num_objs; ++i) {
123+
for (const auto i : c10::irange(num_objs)) {
121124
py::tuple obj = py::reinterpret_borrow<py::tuple>(objs[i]);
122125
sequence[obj[0]] = obj[1];
123126
}

torch/csrc/jit/python/python_arg_flatten.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#pragma once
22

33
#include <c10/util/hash.h>
4+
#include <c10/util/irange.h>
45
#include <torch/csrc/autograd/variable.h>
56
#include <torch/csrc/jit/python/pybind.h>
67

@@ -75,7 +76,7 @@ static inline std::ostream& operator<<(
7576
out << ", device=" << meta_device.index();
7677
}
7778
out << ") {";
78-
for (size_t i = 0; i < meta.sizes.size(); ++i) {
79+
for (const auto i : c10::irange(meta.sizes.size())) {
7980
if (i > 0)
8081
out << ", ";
8182
out << meta.sizes[i];
@@ -89,7 +90,7 @@ static inline std::ostream& operator<<(
8990
const IODescriptor& desc) {
9091
out << desc.structure << "\n";
9192
out << " with grad_enabled=" << desc.grad_enabled << "\n";
92-
for (size_t i = 0; i < desc.metadata.size(); ++i) {
93+
for (const auto i : c10::irange(desc.metadata.size())) {
9394
out << " with v" << i << " having type " << desc.metadata[i] << "\n";
9495
}
9596
return out;

torch/csrc/jit/python/python_list.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#include <ATen/core/ivalue.h>
2+
#include <c10/util/irange.h>
23
#include <pybind11/detail/common.h>
34
#include <pybind11/pytypes.h>
45
#include <torch/csrc/jit/python/pybind_utils.h>
@@ -133,7 +134,8 @@ void initScriptListBindings(PyObject* module) {
133134

134135
auto seq = std::make_shared<ScriptList>(self->type());
135136

136-
for (size_t i = 0; i < slicelength; ++i) {
137+
for (const auto i : c10::irange(slicelength)) {
138+
(void)i; // Suppress unused variable warning
137139
seq->append(self->getItem(start));
138140
start += step;
139141
}
@@ -172,7 +174,7 @@ void initScriptListBindings(PyObject* module) {
172174
"Left and right hand size of slice assignment have different sizes");
173175
}
174176

175-
for (size_t i = 0; i < slicelength; ++i) {
177+
for (const auto i : c10::irange(slicelength)) {
176178
try {
177179
self->setItem(
178180
start, toIValue(value[i], self->type()->getElementType()));

torch/csrc/jit/python/python_tracer.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <torch/csrc/utils/python_strings.h>
1111

1212
#include <c10/util/Exception.h>
13+
#include <c10/util/irange.h>
1314

1415
#include <sstream>
1516

@@ -89,7 +90,7 @@ std::pair<std::shared_ptr<Graph>, Stack> createGraphByTracing(
8990
[&func](Stack inputs) -> Stack {
9091
size_t num_func_inputs = inputs.size();
9192
py::tuple py_inputs(num_func_inputs);
92-
for (size_t i = 0; i < num_func_inputs; ++i) {
93+
for (const auto i : c10::irange(num_func_inputs)) {
9394
py_inputs[i] = py::cast(inputs[i]);
9495
}
9596
auto out = func(*py_inputs);

torch/csrc/jit/python/script_init.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#include <torch/csrc/jit/testing/file_check.h>
1919

2020
#include <c10/util/intrusive_ptr.h>
21+
#include <c10/util/irange.h>
2122
#include <torch/csrc/jit/frontend/parser.h>
2223
#include <torch/csrc/jit/frontend/tracer.h>
2324
#include <torch/csrc/jit/ir/constants.h>
@@ -175,7 +176,7 @@ void checkOverloadDecl(const Decl& new_decl, const Decl& old_decl) {
175176
"Overload must have same number of parameters\n",
176177
new_decl.range(),
177178
old_decl.range());
178-
for (size_t i = 0; i < new_decl.params().size(); ++i) {
179+
for (const auto i : c10::irange(new_decl.params().size())) {
179180
TORCH_INTERNAL_ASSERT(
180181
new_params[i].ident().name() == old_params[i].ident().name(),
181182
"Overload parameters must have the same names\n",
@@ -311,7 +312,7 @@ static Decl mergeDefaultsAndExtraParametersToOverloadDecl(
311312
overload_decl.range(),
312313
impl_decl.range());
313314

314-
for (size_t i = 0; i < overload_params.size(); ++i) {
315+
for (const auto i : c10::irange(overload_params.size())) {
315316
auto overload_name = overload_params[i].ident().name();
316317
auto impl_name = impl_params[i].ident().name();
317318
if (overload_name != impl_name) {
@@ -586,7 +587,7 @@ bool ivalue_tags_match(const Module& lhs, const Module& rhs) {
586587
} else if (item.a.isList()) {
587588
auto al = item.a.toList();
588589
auto bl = item.b.toList();
589-
for (size_t i = 0; i < al.size(); ++i) {
590+
for (const auto i : c10::irange(al.size())) {
590591
work.emplace_back(Work{al.get(i), bl.get(i)});
591592
}
592593
} else if (item.a.isGenericDict()) {

0 commit comments

Comments
 (0)