Skip to content

Commit 996cc1c

Browse files
malfetpytorchmergebot
authored andcommitted
Fix Win+CUDA builds using VS2017 (pytorch#94091)
Summary: Followup after pytorch#93267 Generated by running: ``` for i in *.cu; do sed -i -e "s/constexpr char/CONSTEXPR_EXCEPT_WIN_CUDA char/" $i; done ``` Otherwise, attempts to compile using VS-15.9 results in: ``` D:\pytorch\aten\src\aten\native\cuda\laguerre_polynomial_l.cu(17): fatal error C1001: An internal error has occurred in the compiler. (compiler file 'msc1.cpp', line 1518) To work around this problem, try simplifying or changing the program near the locations listed above. Please choose the Technical Support command on the Visual C++ Help menu, or open the Technical Support help file for more information Internal Compiler Error in D:\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64\cl.exe. You will be prompted to send an error report to Microsoft later. INTERNAL COMPILER ERROR in 'D:\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64\cl.exe' Please choose the Technical Support command on the Visual C++ Help menu, or open the Technical Support help file for more information ``` Test Plan: CI Differential Revision: D43011140 Pull Request resolved: pytorch#94091 Approved by: https://github.com/seemethere
1 parent 2064fa9 commit 996cc1c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

52 files changed

+82
-82
lines changed

aten/src/ATen/native/cuda/AbsKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ struct AbsFunctor {
1515
}
1616
};
1717

18-
constexpr char abs_name[] = "abs_kernel";
18+
CONSTEXPR_EXCEPT_WIN_CUDA char abs_name[] = "abs_kernel";
1919
void abs_kernel_cuda(TensorIteratorBase& iter) {
2020
auto dtype = iter.dtype();
2121
if (at::isComplexType(dtype)) {

aten/src/ATen/native/cuda/BinaryDivTrueKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
namespace at::native {
1717
namespace binary_internal {
1818

19-
constexpr char div_name[] = "div_kernel";
19+
CONSTEXPR_EXCEPT_WIN_CUDA char div_name[] = "div_kernel";
2020
void div_true_kernel_cuda(TensorIteratorBase& iter) {
2121
auto common_dtype = iter.common_dtype();
2222
if (iter.common_dtype() == kComplexHalf) {

aten/src/ATen/native/cuda/BinaryLogicalOpsKernels.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char logical_and_name[] = "logical_and_kernel";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char logical_and_name[] = "logical_and_kernel";
1515
void logical_and_kernel_cuda(TensorIterator& iter) {
1616
auto dtype = iter.common_dtype();
1717
if (at::isComplexType(dtype)) {
@@ -48,7 +48,7 @@ void logical_and_kernel_cuda(TensorIterator& iter) {
4848
}
4949
}
5050

51-
constexpr char logical_or_name[] = "logical_or_kernel";
51+
CONSTEXPR_EXCEPT_WIN_CUDA char logical_or_name[] = "logical_or_kernel";
5252
void logical_or_kernel_cuda(TensorIterator& iter) {
5353
auto dtype = iter.common_dtype();
5454
if (at::isComplexType(dtype)) {
@@ -84,7 +84,7 @@ void logical_or_kernel_cuda(TensorIterator& iter) {
8484
}
8585
}
8686

87-
constexpr char logical_xor_name[] = "logical_xor_kernel";
87+
CONSTEXPR_EXCEPT_WIN_CUDA char logical_xor_name[] = "logical_xor_kernel";
8888
void logical_xor_kernel_cuda(TensorIterator& iter) {
8989
auto dtype = iter.common_dtype();
9090
if (at::isComplexType(dtype)) {

aten/src/ATen/native/cuda/BinaryMiscBackwardOpsKernels.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
namespace at::native {
1717

18-
constexpr char sigmoid_backward_name[] = "sigmoid_backward";
18+
CONSTEXPR_EXCEPT_WIN_CUDA char sigmoid_backward_name[] = "sigmoid_backward";
1919
void sigmoid_backward_kernel_cuda(TensorIteratorBase& iter) {
2020
auto dtype = iter.dtype();
2121
if(isComplexType(dtype)) {
@@ -86,7 +86,7 @@ void logit_backward_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scal
8686
});
8787
}
8888

89-
constexpr char tanh_backward_name[] = "tanh_backward";
89+
CONSTEXPR_EXCEPT_WIN_CUDA char tanh_backward_name[] = "tanh_backward";
9090
void tanh_backward_kernel_cuda(TensorIteratorBase& iter) {
9191
auto dtype = iter.dtype();
9292
if(isComplexType(dtype)) {

aten/src/ATen/native/cuda/BinaryMulKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
namespace at::native {
2020

21-
constexpr char mul_name[] = "mul_kernel";
21+
CONSTEXPR_EXCEPT_WIN_CUDA char mul_name[] = "mul_kernel";
2222
void mul_kernel_cuda(TensorIteratorBase& iter) {
2323
auto common_dtype = iter.common_dtype();
2424
if (common_dtype == kComplexHalf) {

aten/src/ATen/native/cuda/GcdLcmKernel.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
namespace at::native {
1515

1616
// See note [Jiterator]
17-
constexpr char gcd_name[] = "gcd";
17+
CONSTEXPR_EXCEPT_WIN_CUDA char gcd_name[] = "gcd";
1818
void gcd_kernel_cuda(TensorIteratorBase& iter) {
1919
#if AT_USE_JITERATOR()
2020
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "gcd_cuda", [&]() {
@@ -33,7 +33,7 @@ void gcd_kernel_cuda(TensorIteratorBase& iter) {
3333
}
3434

3535
// See note [Jiterator]
36-
constexpr char lcm_name[] = "lcm";
36+
CONSTEXPR_EXCEPT_WIN_CUDA char lcm_name[] = "lcm";
3737
void lcm_kernel_cuda(TensorIteratorBase& iter) {
3838
#if AT_USE_JITERATOR()
3939
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "lcm_cuda", [&]() {

aten/src/ATen/native/cuda/Lerp.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
namespace at::native {
1010
namespace {
1111

12-
constexpr char lerp_tensor_name[] = "lerp_tensor";
12+
CONSTEXPR_EXCEPT_WIN_CUDA char lerp_tensor_name[] = "lerp_tensor";
1313
void lerp_tensor_kernel(at::TensorIteratorBase& iter) {
1414
auto dtype = iter.common_dtype();
1515
if(at::isComplexType(dtype)) {
@@ -63,7 +63,7 @@ void lerp_tensor_kernel(at::TensorIteratorBase& iter) {
6363
}
6464
}
6565

66-
constexpr char lerp_scalar_name[] = "lerp_scalar";
66+
CONSTEXPR_EXCEPT_WIN_CUDA char lerp_scalar_name[] = "lerp_scalar";
6767
void lerp_scalar_kernel(at::TensorIteratorBase& iter, const c10::Scalar& weight) {
6868
auto dtype = iter.common_dtype();
6969
if (at::isComplexType(dtype)) {

aten/src/ATen/native/cuda/PointwiseOpsKernel.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char addcmul_name[] = "addcmul";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char addcmul_name[] = "addcmul";
1515
void addcmul_cuda_kernel(TensorIteratorBase& iter, const Scalar& value) {
1616
auto dtype = iter.common_dtype();
1717
if (at::isComplexType(dtype)) {
@@ -56,7 +56,7 @@ void addcmul_cuda_kernel(TensorIteratorBase& iter, const Scalar& value) {
5656
}
5757

5858
// return a + alpha * (b / static_cast<accscalar_t>(c));
59-
constexpr char addcdiv_name[] = "addcdiv";
59+
CONSTEXPR_EXCEPT_WIN_CUDA char addcdiv_name[] = "addcdiv";
6060
void addcdiv_cuda_kernel(TensorIteratorBase& iter, const Scalar& value) {
6161
auto dtype = iter.common_dtype();
6262
if (at::isComplexType(dtype)) {

aten/src/ATen/native/cuda/PowKernel.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ void pow_scalar_tensor_impl(TensorIteratorBase& iter, c10::complex<value_t> base
3838
}
3939

4040
/* complex<Half> support impl */
41-
constexpr char pow_scalar_base_name[] = "pow_scalar_base_kernel";
41+
CONSTEXPR_EXCEPT_WIN_CUDA char pow_scalar_base_name[] = "pow_scalar_base_kernel";
4242
template <>
4343
void pow_scalar_tensor_impl(TensorIteratorBase& iter, c10::complex<at::Half> base) {
4444
using scalar_t = c10::complex<at::Half>;
@@ -68,7 +68,7 @@ namespace {
6868

6969
#if AT_USE_JITERATOR()
7070
/* complex<Half> support impl */
71-
constexpr char pow_name[] = "pow_kernel";
71+
CONSTEXPR_EXCEPT_WIN_CUDA char pow_name[] = "pow_kernel";
7272
static const auto pow_kernel_string =
7373
jiterator_stringify(template <typename T> T pow_kernel(T base, T exp) {
7474
return std::pow(base, exp);

aten/src/ATen/native/cuda/ReduceSumProdKernel.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ struct sum_functor {
2121
};
2222

2323
// jiterated specialization for `complex<Half>`
24-
constexpr char sum_name[] = "sum";
24+
CONSTEXPR_EXCEPT_WIN_CUDA char sum_name[] = "sum";
2525
template <>
2626
struct sum_functor<c10::complex<at::Half>> {
2727
// jiterator reduction fails on windows
@@ -57,7 +57,7 @@ struct nansum_functor {
5757
}
5858
};
5959

60-
constexpr char prod_name[] = "prod";
60+
CONSTEXPR_EXCEPT_WIN_CUDA char prod_name[] = "prod";
6161

6262
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = scalar_t>
6363
struct prod_functor {

aten/src/ATen/native/cuda/UnaryComplexKernels.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ __host__ __device__ static inline c10::complex<T> angle_wrapper(c10::complex<T>
2525
return c10::complex<T>{std::arg(v), 0};
2626
}
2727

28-
constexpr char angle_name[] = "angle_kernel";
28+
CONSTEXPR_EXCEPT_WIN_CUDA char angle_name[] = "angle_kernel";
2929
void angle_kernel_cuda(TensorIteratorBase& iter) {
3030
auto dtype = iter.common_dtype();
3131
if (at::isComplexType(dtype)) {
@@ -60,7 +60,7 @@ void angle_kernel_cuda(TensorIteratorBase& iter) {
6060
}
6161

6262
// NB: Ignores the negative bit on tensors
63-
constexpr char conj_name[] = "conj_kernel";
63+
CONSTEXPR_EXCEPT_WIN_CUDA char conj_name[] = "conj_kernel";
6464
void conj_kernel_cuda(TensorIteratorBase& iter) {
6565
auto conj_chalf = [&] {
6666
using scalar_t = c10::complex<at::Half>;

aten/src/ATen/native/cuda/UnaryGammaKernels.cu

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
namespace at::native {
1414

1515
// See note [Jiterator]
16-
constexpr char digamma_name[] = "digamma";
16+
CONSTEXPR_EXCEPT_WIN_CUDA char digamma_name[] = "digamma";
1717
void digamma_kernel_cuda(TensorIteratorBase& iter) {
1818
#if AT_USE_JITERATOR()
1919
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "digamma_cuda", [&]() {
@@ -32,7 +32,7 @@ void digamma_kernel_cuda(TensorIteratorBase& iter) {
3232
}
3333

3434
// See note [Jiterator]
35-
constexpr char trigamma_name[] = "trigamma";
35+
CONSTEXPR_EXCEPT_WIN_CUDA char trigamma_name[] = "trigamma";
3636
void trigamma_kernel_cuda(TensorIteratorBase& iter) {
3737
#if AT_USE_JITERATOR()
3838
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "trigamma_cuda", [&]() {
@@ -50,7 +50,7 @@ void trigamma_kernel_cuda(TensorIteratorBase& iter) {
5050
#endif // AT_USE_JITERATOR()
5151
}
5252

53-
constexpr char polygamma_name[] = "polygamma";
53+
CONSTEXPR_EXCEPT_WIN_CUDA char polygamma_name[] = "polygamma";
5454
void polygamma_kernel_cuda(TensorIteratorBase& iter, int64_t n) {
5555
if (n == 0) {
5656
digamma_kernel_cuda(iter);
@@ -83,7 +83,7 @@ void polygamma_kernel_cuda(TensorIteratorBase& iter, int64_t n) {
8383
}
8484
}
8585

86-
constexpr char lgamma_name[] = "lgamma_kernel";
86+
CONSTEXPR_EXCEPT_WIN_CUDA char lgamma_name[] = "lgamma_kernel";
8787
void lgamma_kernel_cuda(TensorIteratorBase& iter) {
8888
#if AT_USE_JITERATOR()
8989
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "lgamma_cuda", [&]() {

aten/src/ATen/native/cuda/UnaryGeometricAcosKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char acos_name[] = "acos";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char acos_name[] = "acos";
1515
void acos_kernel_cuda(TensorIteratorBase& iter) {
1616
auto common_dtype = iter.common_dtype();
1717
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryGeometricAcoshKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char acosh_name[] = "acosh";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char acosh_name[] = "acosh";
1515
void acosh_kernel_cuda(TensorIteratorBase& iter) {
1616
auto common_dtype = iter.common_dtype();
1717
if(at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryGeometricAsinKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char asin_name[] = "asin";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char asin_name[] = "asin";
1515
void asin_kernel_cuda(TensorIteratorBase& iter) {
1616
auto common_dtype = iter.common_dtype();
1717
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryGeometricAsinhKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char asinh_name[] = "asinh";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char asinh_name[] = "asinh";
1515
void asinh_kernel_cuda(TensorIteratorBase& iter) {
1616
auto common_dtype = iter.common_dtype();
1717
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryGeometricAtanKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char atan_name[] = "atan";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char atan_name[] = "atan";
1515
void atan_kernel_cuda(TensorIteratorBase& iter) {
1616
auto common_dtype = iter.common_dtype();
1717
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryGeometricAtanhKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char atanh_name[] = "atanh";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char atanh_name[] = "atanh";
1515
void atanh_kernel_cuda(TensorIteratorBase& iter) {
1616
auto common_dtype = iter.common_dtype();
1717
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryGeometricCosKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char cos_name[] = "cos";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char cos_name[] = "cos";
1515
void cos_kernel_cuda(TensorIteratorBase& iter) {
1616
auto common_dtype = iter.common_dtype();
1717
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryGeometricCoshKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char cosh_name[] = "cosh";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char cosh_name[] = "cosh";
1515
void cosh_kernel_cuda(TensorIteratorBase& iter) {
1616
auto common_dtype = iter.common_dtype();
1717
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryGeometricSinKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char sin_name[] = "sin";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char sin_name[] = "sin";
1515
void sin_kernel_cuda(TensorIteratorBase& iter) {
1616
auto common_dtype = iter.common_dtype();
1717
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryGeometricSinhKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char sinh_name[] = "sinh";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char sinh_name[] = "sinh";
1515
void sinh_kernel_cuda(TensorIteratorBase& iter) {
1616
auto common_dtype = iter.common_dtype();
1717
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryGeometricTanKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char tan_name[] = "tan";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char tan_name[] = "tan";
1515
void tan_kernel_cuda(TensorIteratorBase& iter) {
1616
auto common_dtype = iter.common_dtype();
1717
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryGeometricTanhKernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
namespace at::native {
1313

14-
constexpr char tanh_name[] = "tanh";
14+
CONSTEXPR_EXCEPT_WIN_CUDA char tanh_name[] = "tanh";
1515
void tanh_kernel_cuda(TensorIteratorBase& iter) {
1616
auto common_dtype = iter.common_dtype();
1717
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryLogKernels.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
namespace at::native {
1414

15-
constexpr char log_name[] = "log_kernel";
15+
CONSTEXPR_EXCEPT_WIN_CUDA char log_name[] = "log_kernel";
1616
void log_kernel_cuda(TensorIteratorBase& iter) {
1717
auto common_dtype = iter.common_dtype();
1818
if (at::isComplexType(common_dtype)) {
@@ -44,7 +44,7 @@ void log_kernel_cuda(TensorIteratorBase& iter) {
4444
}
4545
}
4646

47-
constexpr char log10_name[] = "log10_kernel";
47+
CONSTEXPR_EXCEPT_WIN_CUDA char log10_name[] = "log10_kernel";
4848
void log10_kernel_cuda(TensorIteratorBase& iter) {
4949
auto common_dtype = iter.common_dtype();
5050
if (at::isComplexType(common_dtype)) {
@@ -81,7 +81,7 @@ void log1p_kernel_cuda(TensorIteratorBase& iter) {
8181
});
8282
}
8383

84-
constexpr char log2_name[] = "log2_kernel";
84+
CONSTEXPR_EXCEPT_WIN_CUDA char log2_name[] = "log2_kernel";
8585
void log2_kernel_cuda(TensorIteratorBase& iter) {
8686
auto common_dtype = iter.common_dtype();
8787
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnaryOpsKernel.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ void bitwise_not_kernel_cuda(TensorIteratorBase& iter) {
3434
}
3535
}
3636

37-
constexpr char exp_name[] = "exp_kernel";
37+
CONSTEXPR_EXCEPT_WIN_CUDA char exp_name[] = "exp_kernel";
3838
void exp_kernel_cuda(TensorIteratorBase& iter) {
3939
auto common_dtype = iter.common_dtype();
4040
if (at::isComplexType(common_dtype)) {
@@ -92,7 +92,7 @@ C10_HOST_DEVICE static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) {
9292
return one / ::sqrt(v);
9393
}
9494

95-
constexpr char rsqrt_name[] = "rsqrt_kernel";
95+
CONSTEXPR_EXCEPT_WIN_CUDA char rsqrt_name[] = "rsqrt_kernel";
9696
void rsqrt_kernel_cuda(TensorIteratorBase& iter) {
9797
auto common_dtype = iter.common_dtype();
9898
if (at::isComplexType(common_dtype)) {
@@ -131,7 +131,7 @@ void rsqrt_kernel_cuda(TensorIteratorBase& iter) {
131131
}
132132
}
133133

134-
constexpr char sqrt_name[] = "sqrt_kernel";
134+
CONSTEXPR_EXCEPT_WIN_CUDA char sqrt_name[] = "sqrt_kernel";
135135
void sqrt_kernel_cuda(TensorIteratorBase& iter) {
136136
auto common_dtype = iter.common_dtype();
137137
if (at::isComplexType(common_dtype)) {

aten/src/ATen/native/cuda/UnarySignKernels.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ void logical_not_kernel_cuda(TensorIteratorBase& iter) {
2525
}
2626

2727
// NB: Ignores the negative bit on tensors
28-
constexpr char neg_name[] = "neg_kernel";
28+
CONSTEXPR_EXCEPT_WIN_CUDA char neg_name[] = "neg_kernel";
2929
void neg_kernel_cuda(TensorIteratorBase& iter) {
3030
auto dtype = iter.dtype();
3131
if (at::isComplexType(dtype)) {
@@ -96,7 +96,7 @@ C10_HOST_DEVICE static inline c10::complex<T> sgn_wrapper(c10::complex<T> z) {
9696
}
9797
}
9898

99-
constexpr char sgn_name[] = "sgn_kernel";
99+
CONSTEXPR_EXCEPT_WIN_CUDA char sgn_name[] = "sgn_kernel";
100100
void sgn_kernel_cuda(TensorIteratorBase& iter){
101101
auto dtype = iter.dtype();
102102
#if AT_USE_JITERATOR()

0 commit comments

Comments
 (0)