Skip to content

Commit ce469e6

Browse files
jerryzh168facebook-github-bot
authored andcommitted
dims() to sizes() remaining part
Summary: Made the clangr rule more robust and it discovered more callsites. Reviewed By: smessmer Differential Revision: D12825017 fbshipit-source-id: 3be1eeb7ea697b36ef89e78ba64c0ee1259439c4
1 parent 9af18d8 commit ce469e6

File tree

4 files changed

+9
-9
lines changed

4 files changed

+9
-9
lines changed

caffe2/contrib/nccl/cuda_nccl_gpu.cc

+5-5
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ void NCCL<T>::AllReduce(const NCCLExecution& ex) {
205205
return runNCCL<T>(
206206
ex,
207207
[](const NCCLElement& ctx) {
208-
ctx.dst->Resize(ctx.src->dims());
208+
ctx.dst->Resize(ctx.src->sizes());
209209
ctx.dst->template mutable_data<T>();
210210
},
211211
[](const NCCLElement& ctx, ncclComm_t comm, cudaStream_t stream) {
@@ -225,7 +225,7 @@ void NCCL<T>::Broadcast(const NCCLExecution& ex) {
225225
return runNCCL<T>(
226226
ex,
227227
[](const NCCLElement& ctx) {
228-
ctx.dst->Resize(ctx.src->dims());
228+
ctx.dst->Resize(ctx.src->sizes());
229229
ctx.dst->template mutable_data<T>();
230230
},
231231
[&ex](const NCCLElement& ctx, ncclComm_t comm, cudaStream_t stream) {
@@ -245,7 +245,7 @@ void NCCL<T>::Reduce(const NCCLExecution& ex) {
245245
ex,
246246
[](const NCCLElement& ctx) {
247247
if (ctx.dst) {
248-
ctx.dst->Resize(ctx.src->dims());
248+
ctx.dst->Resize(ctx.src->sizes());
249249
ctx.dst->template mutable_data<T>();
250250
}
251251
},
@@ -272,7 +272,7 @@ void NCCL<T>::AllGather(const NCCLExecution& ex) {
272272
std::vector<int64_t> dims;
273273
dims.reserve(ctx.src->ndim() + 1);
274274
dims.push_back(n);
275-
for (auto d : ctx.src->dims()) {
275+
for (auto d : ctx.src->sizes()) {
276276
dims.push_back(d);
277277
}
278278
ctx.dst->Resize(dims);
@@ -306,7 +306,7 @@ void NCCL<T>::ReduceScatter(const NCCLExecution& ex) {
306306
ex,
307307
[](const NCCLElement& ctx) {
308308
CAFFE_ENFORCE_NE(ctx.src, ctx.dst);
309-
const auto& srcDims = ctx.src->dims();
309+
const auto& srcDims = ctx.src->sizes();
310310
std::vector<int64_t> dstDims(srcDims.begin() + 1, srcDims.end());
311311
ctx.dst->Resize(dstDims);
312312
ctx.dst->template mutable_data<T>();

caffe2/operators/dataset_ops.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ void TreeWalker::advance() {
156156
}
157157

158158
std::vector<int64_t> TreeWalker::fieldDim(int fieldId) const {
159-
auto tensorDim = input(fieldId).dims().vec();
159+
auto tensorDim = input(fieldId).sizes().vec();
160160
tensorDim[0] = sizes_[lengthIdx(fieldId)];
161161
return tensorDim;
162162
}
@@ -185,7 +185,7 @@ void TreeWalker::gatherSizeLimits() {
185185
for (auto fieldId = 0; fieldId < cursor_.it.fields().size(); ++fieldId) {
186186
auto lengthFieldIdx = lengthIdx(fieldId);
187187
limits_[lengthFieldIdx] =
188-
std::min(limits_[lengthFieldIdx], (TOffset)input(fieldId).dims()[0]);
188+
std::min(limits_[lengthFieldIdx], (TOffset)input(fieldId).sizes()[0]);
189189
}
190190
}
191191

caffe2/operators/generate_proposals_op.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ utils::ConstTensorView<T> GetSubTensorView(
3737
auto st_idx = ComputeStartIndex(tensor, start_dims);
3838
auto ptr = tensor.data<T>() + st_idx;
3939

40-
auto input_dims = tensor.dims();
40+
auto input_dims = tensor.sizes();
4141
std::vector<int> ret_dims(input_dims.begin() + 1, input_dims.end());
4242

4343
utils::ConstTensorView<T> ret(ptr, ret_dims);

caffe2/python/pybind_state.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -460,7 +460,7 @@ void addObjectMethods(py::module& m) {
460460
"Initialize this tensor to given shape and data type. "
461461
"Fail if the given data type cannot be accessed from python.")
462462
.def_property_readonly(
463-
"_shape", [](const TensorCPU& t) { return t.dims().vec(); })
463+
"_shape", [](const TensorCPU& t) { return t.sizes().vec(); })
464464
.def("_reshape", [](TensorCPU* t, std::vector<int64_t> dims) {
465465
t->Resize(dims);
466466
});

0 commit comments

Comments
 (0)