Skip to content

Commit 48dc24d

Browse files
Skylion007pytorchmergebot
authored andcommitted
Fix: [ATen] Add some missing moves (pytorch#88514)
Related to pytorch#88512 , but for ATen. This should reduce a number of copies and inefficient atomic smart pointer increments. Pull Request resolved: pytorch#88514 Approved by: https://github.com/jgong5, https://github.com/ezyang
1 parent 9eabcc3 commit 48dc24d

File tree

10 files changed

+17
-13
lines changed

10 files changed

+17
-13
lines changed

aten/src/ATen/InferSize.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ inline at::SymDimVector infer_size_dv(
8080
c10::SymInt numel) {
8181
auto res = at::SymDimVector(shape);
8282
infer_size_impl<c10::SymIntArrayRef, c10::SymInt, at::SymDimVector>(
83-
shape, numel, res);
83+
shape, std::move(numel), res);
8484
return res;
8585
}
8686

aten/src/ATen/core/Formatting.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ std::ostream& operator<<(std::ostream & out, Backend b) {
1313
return out << toString(b);
1414
}
1515

16-
std::ostream& operator<<(std::ostream & out, Scalar s) {
16+
std::ostream& operator<<(std::ostream & out, const Scalar& s) {
1717
if (s.isFloatingPoint()) {
1818
return out << s.toDouble();
1919
}
@@ -35,7 +35,7 @@ std::ostream& operator<<(std::ostream & out, Scalar s) {
3535
throw std::logic_error("Unknown type in Scalar");
3636
}
3737

38-
std::string toString(Scalar s) {
38+
std::string toString(const Scalar& s) {
3939
std::stringstream out;
4040
out << s;
4141
return out.str();

aten/src/ATen/core/Formatting.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@
88

99
namespace c10 {
1010
TORCH_API std::ostream& operator<<(std::ostream& out, Backend b);
11-
TORCH_API std::ostream& operator<<(std::ostream & out, Scalar s);
12-
TORCH_API std::string toString(Scalar s);
11+
TORCH_API std::ostream& operator<<(std::ostream & out, const Scalar& s);
12+
TORCH_API std::string toString(const Scalar& s);
1313
}
1414
namespace at {
1515

aten/src/ATen/native/TensorShape.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -204,10 +204,11 @@
204204
#include <ATen/ops/zeros_native.h>
205205
#endif
206206

207+
#include <c10/util/StringUtil.h>
207208
#include <algorithm>
208209
#include <cstdint>
210+
#include <utility>
209211
#include <vector>
210-
#include <c10/util/StringUtil.h>
211212

212213
namespace at {
213214
namespace meta {
@@ -416,7 +417,7 @@ Tensor& set_storage_meta__symint(Tensor& result, Storage storage, c10::SymInt st
416417
const auto itemsize = result.dtype().itemsize();
417418
c10::SymInt size_bytes = at::detail::computeStorageNbytes(
418419
size, stride, itemsize, storage_offset);
419-
storage.set_nbytes(size_bytes);
420+
storage.set_nbytes(std::move(size_bytes));
420421
}
421422
return result;
422423
}

aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2+
#include <utility>
23
#include <vector>
34

45
#include <ATen/core/Tensor.h>
@@ -444,7 +445,7 @@ c10::intrusive_ptr<ConvPackedParamsBase<kSpatialDim>> PackedConvWeightsOnednn<
444445
exp_wgt.init(w_desc);
445446
exp_wgt.set_scale(wgt_scales); // Also for feed_from()
446447
exp_wgt.feed_from(wgt, transpose); // expect wgt to be in [OC IC KH KW] format
447-
ideep::tensor * packed_weight_p = new ideep::tensor(exp_wgt);
448+
ideep::tensor * packed_weight_p = new ideep::tensor(std::move(exp_wgt));
448449
packed_weight_p->set_scale(wgt_scales);
449450
packed_weight_p->set_zero_point(wgt_zero_points);
450451
std::unique_ptr<ideep::tensor> weight_ptr(packed_weight_p);

aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include <c10/util/irange.h>
2424

2525
#include <algorithm>
26+
#include <utility>
2627
#include <vector>
2728

2829
int register_linear_params();
@@ -249,7 +250,7 @@ c10::intrusive_ptr<LinearPackedParamsBase> PackedLinearWeightsOnednn::prepack(
249250
dnnl::memory::data_type::u8);
250251
ideep::tensor exp_wgt(w_desc);
251252
exp_wgt.feed_from(wgt);
252-
ideep::tensor * packed_weight_p = new ideep::tensor(exp_wgt);
253+
ideep::tensor * packed_weight_p = new ideep::tensor(std::move(exp_wgt));
253254
packed_weight_p->set_scale(wgt_scales);
254255
packed_weight_p->set_zero_point(wgt_zero_points);
255256
std::unique_ptr<ideep::tensor> weight_ptr(packed_weight_p);

c10/core/Storage.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ struct C10_API Storage {
7676
}
7777

7878
void set_nbytes(c10::SymInt size_bytes) const {
79-
storage_impl_.get()->set_nbytes(size_bytes);
79+
storage_impl_.get()->set_nbytes(std::move(size_bytes));
8080
}
8181

8282
bool resizable() const {

c10/core/StorageImpl.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ struct C10_API StorageImpl : public c10::intrusive_ptr_target {
112112
}
113113

114114
void set_nbytes(c10::SymInt size_bytes) {
115-
size_bytes_ = size_bytes;
115+
size_bytes_ = std::move(size_bytes);
116116
}
117117

118118
bool resizable() const {

c10/core/WrapDimMinimal.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,8 @@ T maybe_wrap_dim_slow(T dim, T dim_post_expr, bool wrap_scalar) {
1414
"Dimension specified as ",
1515
dim,
1616
" but tensor has no dimensions");
17-
return c10::maybe_wrap_dim(dim, /*dim_post_expr=*/1, /*wrap_scalar=*/false);
17+
return c10::maybe_wrap_dim(
18+
std::move(dim), /*dim_post_expr=*/1, /*wrap_scalar=*/false);
1819
}
1920

2021
T min = dim_post_expr * -1;

c10/core/WrapDimMinimal.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ inline c10::SymInt maybe_wrap_dim(
3838
c10::SymInt dim,
3939
c10::SymInt dim_post_expr,
4040
bool wrap_scalar = true) {
41-
return _maybe_wrap_dim(dim, dim_post_expr, wrap_scalar);
41+
return _maybe_wrap_dim(std::move(dim), std::move(dim_post_expr), wrap_scalar);
4242
}
4343

4444
} // namespace c10

0 commit comments

Comments
 (0)