Skip to content

Commit e292ddf

Browse files
cyyeverpytorchmergebot
authored andcommitted
More clang-tidy fixes (pytorch#92944)
Pull Request resolved: pytorch#92944 Approved by: https://github.com/Skylion007
1 parent 4e67332 commit e292ddf

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+98
-280
lines changed

aten/src/ATen/EmptyTensor.cpp

+5-10
Original file line numberDiff line numberDiff line change
@@ -242,8 +242,7 @@ TensorBase empty_cpu(
242242
c10::optional<Device> device_opt,
243243
c10::optional<bool> pin_memory_opt,
244244
c10::optional<c10::MemoryFormat> memory_format_opt) {
245-
auto device = device_or_default(device_opt);
246-
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device.type() == DeviceType::CPU);
245+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU);
247246
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
248247

249248
auto pin_memory = pinned_memory_or_default(pin_memory_opt);
@@ -277,8 +276,7 @@ TensorBase empty_strided_cpu(
277276
c10::optional<Layout> layout_opt,
278277
c10::optional<Device> device_opt,
279278
c10::optional<bool> pin_memory_opt) {
280-
auto device = device_or_default(device_opt);
281-
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device.type() == DeviceType::CPU);
279+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::CPU);
282280
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
283281

284282
auto pin_memory = pinned_memory_or_default(pin_memory_opt);
@@ -335,8 +333,7 @@ TensorBase empty_meta(
335333
c10::optional<bool> pin_memory_opt,
336334
c10::optional<c10::MemoryFormat> memory_format_opt
337335
) {
338-
auto device = device_or_default(device_opt);
339-
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device.type() == DeviceType::Meta);
336+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
340337
// NB: because there is no SparseMeta (yet), non-strided layout is
341338
// exerciseable
342339
TORCH_CHECK_NOT_IMPLEMENTED(
@@ -388,8 +385,7 @@ TensorBase empty_strided_meta(
388385
c10::optional<Layout> layout_opt,
389386
c10::optional<Device> device_opt,
390387
c10::optional<bool> pin_memory_opt) {
391-
auto device = device_or_default(device_opt);
392-
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device.type() == DeviceType::Meta);
388+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
393389
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
394390

395391
auto dtype = dtype_or_default(dtype_opt);
@@ -424,8 +420,7 @@ TensorBase empty_strided_symint_meta(
424420
c10::optional<Layout> layout_opt,
425421
c10::optional<Device> device_opt,
426422
c10::optional<bool> pin_memory_opt) {
427-
auto device = device_or_default(device_opt);
428-
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device.type() == DeviceType::Meta);
423+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device_or_default(device_opt).type() == DeviceType::Meta);
429424
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(layout_or_default(layout_opt) == Layout::Strided);
430425

431426
auto dtype = dtype_or_default(dtype_opt);

aten/src/ATen/FunctionalizeFallbackKernel.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ const at::Tensor & resize__functionalization(c10::DispatchKeySet dispatchKeySet,
128128
// Case 1: arguments are not functional tensors, so we no-op and redispatch.
129129
if (!at::functionalization::impl::isFunctionalTensor(self)) {
130130
at::AutoDispatchSkipFunctionalize guard;
131-
at::Tensor tmp_output = self_.resize_(size, memory_format);
131+
self_.resize_(size, memory_format);
132132
return self;
133133
}
134134

aten/src/ATen/PythonTorchFunctionTLS.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ void PythonTorchFunctionTLS::push_onto_stack(std::shared_ptr<SafePyObject> mode)
1212

1313
const std::shared_ptr<SafePyObject> PythonTorchFunctionTLS::pop_stack() {
1414
TORCH_CHECK(pythonTorchFunctionState.stack_.size() > 0, "trying to pop from empty mode stack");
15-
const auto out = pythonTorchFunctionState.stack_.back();
15+
auto out = pythonTorchFunctionState.stack_.back();
1616
pythonTorchFunctionState.stack_.pop_back();
1717
return out;
1818
}

aten/src/ATen/TensorIterator.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,8 @@ static OptionalTensorRef make_otr(const TensorBase &tensor) {
6969
namespace internal {
7070

7171
OpaqueOptionalTensorRef::OpaqueOptionalTensorRef() {
72-
static_assert(alignof(OptionalTensorRef) == alignof(TensorBase), "");
73-
static_assert(sizeof(OptionalTensorRef) == sizeof(TensorBase), "");
72+
static_assert(alignof(OptionalTensorRef) == alignof(TensorBase));
73+
static_assert(sizeof(OptionalTensorRef) == sizeof(TensorBase));
7474
new (data_.data()) OptionalTensorRef();
7575
}
7676

aten/src/ATen/core/VariableFallbackKernel.cpp

-5
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,7 @@
1919
// TODO This whole file should be deleted and replaced with the mechanism
2020
// described in https://github.com/pytorch/pytorch/issues/29548
2121

22-
using c10::OperatorHandle;
2322
using c10::Stack;
24-
using c10::DispatchKey;
25-
using c10::DispatchKeySet;
26-
using c10::Dispatcher;
27-
using c10::KernelFunction;
2823

2924
namespace {
3025

aten/src/ATen/functorch/DynamicLayer.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -253,10 +253,10 @@ int64_t initAndPushDynamicLayer(
253253
const auto& dynamicLayerStack = dynamicLayerStackAccessor();
254254
const auto layerId = 1 + dynamicLayerStack.size();
255255
DynamicLayer new_layer(transform_type, layerId, batch_size, randomness, prev_grad_mode, prev_fwd_grad_mode, functionalize_add_back_views);
256-
pushDynamicLayer(std::move(new_layer));
257-
258256
// NB: this function should be called while holding the GIL to avoid races
259257
new_layer.interpreter().set_is_alive(true);
258+
pushDynamicLayer(std::move(new_layer));
259+
260260

261261
if (transform_type == TransformType::Grad) {
262262
TORCH_INTERNAL_ASSERT(prev_grad_mode.has_value());

aten/src/ATen/native/Convolution.cpp

-3
Original file line numberDiff line numberDiff line change
@@ -1164,9 +1164,6 @@ at::Tensor convolution_overrideable(
11641164
const Tensor& input, const Tensor& weight, const c10::optional<Tensor>& bias_opt,
11651165
IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation,
11661166
bool transposed, IntArrayRef output_padding, int64_t groups) {
1167-
// See [Note: hacky wrapper removal for optional tensor]
1168-
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
1169-
11701167
TORCH_CHECK_NOT_IMPLEMENTED(false, "convolution_overrideable not implemented. You are likely triggering this with tensor backend other than CPU/CUDA/MKLDNN, if this is intended, please use TORCH_LIBRARY_IMPL to override this function ");
11711168
}
11721169

aten/src/ATen/native/TensorConversions.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1097,7 +1097,7 @@ Tensor sparse_compressed_to_flipped(
10971097
// performance.
10981098
const auto batch_nnz_offset = [&]() -> Tensor {
10991099
const auto wrapped_nnz = at::tensor({nnz}, compressed_indices.options());
1100-
const auto offset = wrapped_nnz
1100+
auto offset = wrapped_nnz
11011101
.expand({batch_numel_nonzero})
11021102
.cumsum(-1).sub_(wrapped_nnz)
11031103
.reshape(batch_sizes_nonempty);
@@ -1152,7 +1152,7 @@ Tensor sparse_compressed_to_flipped(
11521152
// To CSC/BSC inputs these indices will appear "transposed".
11531153
const auto is_transposed_indices = layout == at::kSparseCsc || layout == at::kSparseBsc;
11541154
const auto coo_indices_2d_transposed = [&]() -> Tensor {
1155-
const auto coo_indices_2d = _convert_indices_from_csr_to_coo(
1155+
auto coo_indices_2d = _convert_indices_from_csr_to_coo(
11561156
compressed_indices_2d,
11571157
plain_indices_2d,
11581158
is_out_int32,
@@ -1415,7 +1415,7 @@ void _csr_to_block_csr_cpu_kernel(
14151415
// value lives within them. Otherwise they're not.
14161416

14171417
// Allocate pointers for all possible column blocks plus 1
1418-
std::vector<T*> blocks(n_col / C + 1, (T*)0);
1418+
std::vector<T*> blocks(n_col / C + 1, nullptr);
14191419

14201420
assert(n_row % R == 0);
14211421
assert(n_col % C == 0);

aten/src/ATen/native/mkl/SparseCsrLinearAlgebra.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ static constexpr ScalarType TORCH_INT_TYPE = at::kInt;
5353

5454
class SparseCsrMKLInterface {
5555
private:
56-
sparse_matrix_t A = 0;
56+
sparse_matrix_t A{nullptr};
5757
matrix_descr desc;
5858

5959
public:

aten/src/ATen/native/quantized/cpu/ReduceOps.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,7 @@ inline bool is_std_inner_dim_fast_path(
200200
auto all_dims = std::vector<int64_t>(self.dim());
201201
std::iota(all_dims.begin(), all_dims.end(), 0);
202202
dims = dims.empty() ? all_dims : dims;
203-
bool is_unbiased = unbiased.has_value() ? unbiased.value() : 0;
203+
bool is_unbiased = unbiased.has_value() ? unbiased.value() : false;
204204
int64_t num_ele = 1;
205205
for (auto d : dims) {
206206
num_ele *= self.size(d);

aten/src/ATen/native/quantized/cpu/UpSampleNearest3d.cpp

-3
Original file line numberDiff line numberDiff line change
@@ -215,9 +215,6 @@ Tensor _upsample_nearest3d_quantized_cpu(
215215
}
216216
}
217217

218-
using at::native::upsample::compute_output_size;
219-
using at::native::upsample::get_scale_value;
220-
221218
Tensor upsample_nearest3d_quantized_cpu(
222219
const Tensor& input,
223220
IntArrayRef osize,

aten/src/ATen/native/quantized/cpu/qnnpack/src/q8dwconv/mp8x25-sse2-per-channel.c

-3
Original file line numberDiff line numberDiff line change
@@ -418,7 +418,6 @@ void pytorch_q8dwconv_ukernel_mp8x25_per_channel__sse2(
418418
_mm_storeu_si128((__m128i*)outacc, vacc_lo);
419419
outacc += 4;
420420
_mm_storeu_si128((__m128i*)outacc, vacc_hi);
421-
outacc += 4;
422421
}
423422
}
424423
{
@@ -806,7 +805,6 @@ void pytorch_q8dwconv_ukernel_mp8x25_per_channel__sse2(
806805
_mm_storeu_si128((__m128i*)outacc, vacc_lo);
807806
outacc += 4;
808807
_mm_storeu_si128((__m128i*)outacc, vacc_hi);
809-
outacc += 4;
810808
}
811809
}
812810
{
@@ -1043,7 +1041,6 @@ void pytorch_q8dwconv_ukernel_mp8x25_per_channel__sse2(
10431041
vacc_lo = _mm_add_epi32(vacc_lo, _mm_loadu_si128((__m128i*)outacc));
10441042
vacc_hi =
10451043
_mm_add_epi32(vacc_hi, _mm_loadu_si128((__m128i*)(outacc + 4)));
1046-
outacc += 8;
10471044

10481045
const __m128 vmultiplier_lo =
10491046
_mm_loadu_ps(&quantization_params->sse2.requantization_scales[channels - c]);

0 commit comments

Comments
 (0)