Skip to content

Commit cc3618c

Browse files
gchananfacebook-github-bot
authored andcommitted
Move _cumsum and _cumprod to _th_ prefixes.
Summary: Pull Request resolved: pytorch#13311 Reviewed By: ezyang Differential Revision: D12839706 Pulled By: gchanan fbshipit-source-id: 79e20b31c6ca2f22229ad3903aacf70dc674c25c
1 parent ce469e6 commit cc3618c

File tree

3 files changed

+8
-8
lines changed

3 files changed

+8
-8
lines changed

aten/src/ATen/Declarations.cwrap

+2-2
Original file line numberDiff line numberDiff line change
@@ -1972,7 +1972,7 @@
19721972
- THTensor* self
19731973
]]
19741974
[[
1975-
name: _cumsum
1975+
name: _th_cumsum
19761976
cname: cumsum
19771977
variants: function
19781978
return: argument 0
@@ -1984,7 +1984,7 @@
19841984
wrap_dim: self
19851985
]]
19861986
[[
1987-
name: _cumprod
1987+
name: _th_cumprod
19881988
cname: cumprod
19891989
variants: function
19901990
return: argument 0

aten/src/ATen/native/ReduceOps.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ static std::unique_ptr<TensorIterator> make_reduction(
102102
}
103103

104104
static inline Tensor cumsum(const Tensor& self, int64_t dim, optional<ScalarType> dtype) {
105-
return at::_cumsum(integer_upcast(self, dtype), dim);
105+
return at::_th_cumsum(integer_upcast(self, dtype), dim);
106106
}
107107

108108
Tensor cumsum(const Tensor& self, int64_t dim, ScalarType dtype) {
@@ -122,7 +122,7 @@ static inline Tensor& cumsum_out(Tensor& result, const Tensor& self, int64_t dim
122122
" and ",
123123
at::toString(dtype.value()),
124124
".");
125-
return at::_cumsum_out(result, self.toType(result.type().scalarType()), dim);
125+
return at::_th_cumsum_out(result, self.toType(result.type().scalarType()), dim);
126126
}
127127

128128
Tensor& cumsum_out(Tensor& result, const Tensor& self, int64_t dim, ScalarType dtype) {
@@ -134,7 +134,7 @@ Tensor& cumsum_out(Tensor& result, const Tensor& self, int64_t dim) {
134134
}
135135

136136
static inline Tensor cumprod(const Tensor& self, int64_t dim, optional<ScalarType> dtype) {
137-
return at::_cumprod(integer_upcast(self, dtype), dim);
137+
return at::_th_cumprod(integer_upcast(self, dtype), dim);
138138
}
139139

140140
Tensor cumprod(const Tensor& self, int64_t dim, ScalarType dtype) {
@@ -154,7 +154,7 @@ static inline Tensor& cumprod_out(Tensor& result, const Tensor& self, int64_t di
154154
" and ",
155155
at::toString(dtype.value()),
156156
".");
157-
return at::_cumprod_out(result, self.toType(result.type().scalarType()), dim);
157+
return at::_th_cumprod_out(result, self.toType(result.type().scalarType()), dim);
158158
}
159159

160160
Tensor& cumprod_out(Tensor& result, const Tensor& self, int64_t dim, ScalarType dtype) {

tools/autograd/derivatives.yaml

+2-2
Original file line numberDiff line numberDiff line change
@@ -211,10 +211,10 @@
211211
self: other.cross(grad, dim)
212212
other: grad.cross(self, dim)
213213

214-
- name: _cumprod(Tensor self, int64_t dim)
214+
- name: _th_cumprod(Tensor self, int64_t dim)
215215
self: cumprod_backward(grad, self, dim)
216216

217-
- name: _cumsum(Tensor self, int64_t dim)
217+
- name: _th_cumsum(Tensor self, int64_t dim)
218218
self: cumsum_backward(grad, dim)
219219

220220
- name: conv_tbc(Tensor self, Tensor weight, Tensor bias, int64_t pad)

0 commit comments

Comments
 (0)