Skip to content

Commit fba13d9

Browse files
IvanYashchukpytorchmergebot
authored andcommitted
Remove deprecated torch.symeig (pytorch#70988)
The time has come to remove deprecated linear algebra related functions. This PR removes `torch.symeig`. - [x] XLA PR: pytorch/xla#4498 Pull Request resolved: pytorch#70988 Approved by: https://github.com/lezcano, https://github.com/kit1980, https://github.com/malfet
1 parent ec2461b commit fba13d9

33 files changed

+32
-532
lines changed

.github/ci_commit_pins/xla.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
5714e03fdd9d86b9bd9ca684631e95ea2cf65c4f
1+
021a1cc2173138548481342c1863fcd3f177dca5

aten/src/ATen/autocast_mode.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -601,7 +601,6 @@ TORCH_LIBRARY_IMPL(aten, AutocastCPU, m) {
601601
KERNEL_CPU(_lu_with_info, fp32)
602602
KERNEL_CPU(qr, fp32)
603603
KERNEL_CPU(svd, fp32)
604-
KERNEL_CPU(symeig, fp32)
605604
KERNEL_CPU(triangular_solve, fp32)
606605
KERNEL_CPU(fractional_max_pool2d, fp32)
607606
KERNEL_CPU(fractional_max_pool3d, fp32)

aten/src/ATen/functorch/BatchRulesLinearAlgebra.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -595,7 +595,6 @@ LINALG_CHECK_MATRIX_BINARY_ONE_OUT(linalg_solve_triangular, linalg.solve_triangu
595595

596596
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(geqrf, geqrf);
597597
LINALG_CHECK_MATRIX_UNARY_ONE_OUT(logdet, logdet);
598-
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(symeig, symeig);
599598
LINALG_CHECK_MATRIX_BINARY_TWO_OUT(triangular_solve, triangular_solve);
600599
LINALG_CHECK_MATRIX_UNARY_THREE_OUT(_linalg_det, linalg.det);
601600
LINALG_CHECK_MATRIX_UNARY_TWO_OUT(_linalg_eigh, linalg.eigh);

aten/src/ATen/native/BatchLinearAlgebra.cpp

-156
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,6 @@
3434
#include <ATen/ops/_linalg_svd_meta.h>
3535
#include <ATen/ops/_linalg_svd_native.h>
3636
#include <ATen/ops/_lu_with_info_native.h>
37-
#include <ATen/ops/_symeig_helper.h>
38-
#include <ATen/ops/_symeig_helper_native.h>
3937
#include <ATen/ops/all.h>
4038
#include <ATen/ops/arange.h>
4139
#include <ATen/ops/cat.h>
@@ -110,8 +108,6 @@
110108
#include <ATen/ops/resize_as_native.h>
111109
#include <ATen/ops/sum.h>
112110
#include <ATen/ops/svd_native.h>
113-
#include <ATen/ops/symeig.h>
114-
#include <ATen/ops/symeig_native.h>
115111
#include <ATen/ops/triangular_solve_meta.h>
116112
#include <ATen/ops/triangular_solve_native.h>
117113
#include <ATen/ops/tril.h>
@@ -289,12 +285,6 @@ extern "C" void cunmqr_(char *side, char *trans, int *m, int *n, int *k, std::co
289285
extern "C" void dormqr_(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info);
290286
extern "C" void sormqr_(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info);
291287

292-
// syev
293-
extern "C" void zheev_(char *jobz, char *uplo, int *n, std::complex<double> *a, int *lda, double *w, std::complex<double> *work, int *lwork, double *rwork, int *info);
294-
extern "C" void cheev_(char *jobz, char *uplo, int *n, std::complex<float> *a, int *lda, float *w, std::complex<float> *work, int *lwork, float *rwork, int *info);
295-
extern "C" void dsyev_(char *jobz, char *uplo, int *n, double *a, int *lda, double *w, double *work, int *lwork, int *info);
296-
extern "C" void ssyev_(char *jobz, char *uplo, int *n, float *a, int *lda, float *w, float *work, int *lwork, int *info);
297-
298288
// syevd
299289
extern "C" void zheevd_(char *jobz, char *uplo, int *n, std::complex<double> *a, int *lda, double *w, std::complex<double> *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info);
300290
extern "C" void cheevd_(char *jobz, char *uplo, int *n, std::complex<float> *a, int *lda, float *w, std::complex<float> *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info);
@@ -910,24 +900,6 @@ template<> void lapackOrmqr<float>(char side, char trans, int m, int n, int k, f
910900
sormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, &lwork, info);
911901
}
912902

913-
template<> void lapackSymeig<c10::complex<double>, double>(char jobz, char uplo, int n, c10::complex<double> *a, int lda, double *w, c10::complex<double> *work, int lwork, double *rwork, int *info) {
914-
zheev_(&jobz, &uplo, &n, reinterpret_cast<std::complex<double>*>(a), &lda, w, reinterpret_cast<std::complex<double>*>(work), &lwork, rwork, info);
915-
}
916-
917-
template<> void lapackSymeig<c10::complex<float>, float>(char jobz, char uplo, int n, c10::complex<float> *a, int lda, float *w, c10::complex<float> *work, int lwork, float *rwork, int *info) {
918-
cheev_(&jobz, &uplo, &n, reinterpret_cast<std::complex<float>*>(a), &lda, w, reinterpret_cast<std::complex<float>*>(work), &lwork, rwork, info);
919-
}
920-
921-
template<> void lapackSymeig<double>(char jobz, char uplo, int n, double *a, int lda, double *w, double *work, int lwork, double* rwork, int *info) {
922-
(void)rwork; // unused
923-
dsyev_(&jobz, &uplo, &n, a, &lda, w, work, &lwork, info);
924-
}
925-
926-
template<> void lapackSymeig<float>(char jobz, char uplo, int n, float *a, int lda, float *w, float *work, int lwork, float* rwork, int *info) {
927-
(void)rwork; // unused
928-
ssyev_(&jobz, &uplo, &n, a, &lda, w, work, &lwork, info);
929-
}
930-
931903
template<> void lapackSyevd<c10::complex<double>, double>(char jobz, char uplo, int n, c10::complex<double> *a, int lda, double *w, c10::complex<double> *work, int lwork, double *rwork, int lrwork, int *iwork, int liwork, int *info) {
932904
zheevd_(&jobz, &uplo, &n, reinterpret_cast<std::complex<double>*>(a), &lda, w, reinterpret_cast<std::complex<double>*>(work), &lwork, rwork, &lrwork, iwork, &liwork, info);
933905
}
@@ -2815,134 +2787,6 @@ Tensor& linalg_eigvalsh_out(const Tensor& A, c10::string_view uplo, Tensor& L) {
28152787
return L;
28162788
}
28172789

2818-
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2819-
2820-
template <typename scalar_t>
2821-
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, int* infos) {
2822-
#if !AT_BUILD_WITH_LAPACK()
2823-
AT_ERROR("symeig: LAPACK library not found in compilation");
2824-
#else
2825-
using value_t = typename c10::scalar_value_type<scalar_t>::type;
2826-
auto self_data = self.data_ptr<scalar_t>();
2827-
auto eigvals_data = eigvals.data_ptr<value_t>();
2828-
auto self_matrix_stride = matrixStride(self);
2829-
auto eigvals_stride = eigvals.size(-1);
2830-
auto batch_size = batchCount(self);
2831-
auto n = self.size(-1);
2832-
2833-
char uplo = upper ? 'U' : 'L';
2834-
char jobz = eigenvectors ? 'V' : 'N';
2835-
2836-
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
2837-
int info;
2838-
// Run once, first to get the optimum work size.
2839-
// Since we deal with batches of matrices with the same dimensions, doing this outside
2840-
// the loop saves (batch_size - 1) workspace queries which would provide the same result
2841-
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
2842-
int lwork = -1;
2843-
scalar_t wkopt;
2844-
2845-
Tensor rwork;
2846-
value_t* rwork_data = nullptr;
2847-
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
2848-
int64_t lrwork = std::max(int64_t(1), 3 * n - 2);
2849-
ScalarType dtype = toRealValueType(typeMetaToScalarType(self.dtype()));
2850-
rwork = at::empty({lrwork}, self.options().dtype(dtype));
2851-
rwork_data = rwork.data_ptr<value_t>();
2852-
}
2853-
2854-
lapackSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, &wkopt, lwork, rwork_data, &info);
2855-
lwork = std::max<int>(1, real_impl<scalar_t, value_t>(wkopt));
2856-
Tensor work = at::empty({lwork}, self.options());
2857-
2858-
for (const auto i : c10::irange(batch_size)) {
2859-
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
2860-
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
2861-
2862-
// now compute the eigenvalues and the eigenvectors (optionally)
2863-
lapackSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr, work.data_ptr<scalar_t>(), lwork, rwork_data, &info);
2864-
infos[i] = info;
2865-
if (info != 0) {
2866-
return;
2867-
}
2868-
}
2869-
#endif
2870-
}
2871-
2872-
std::tuple<Tensor, Tensor> _symeig_helper_cpu(const Tensor& self, bool eigenvectors, bool upper) {
2873-
auto infos = at::zeros({batchCount(self)}, self.options().dtype(kInt));
2874-
2875-
auto self_sizes = self.sizes().vec();
2876-
self_sizes.pop_back();
2877-
ScalarType dtype = toRealValueType(typeMetaToScalarType(self.dtype()));
2878-
auto eigvals = at::empty(self_sizes, self.options().dtype(dtype));
2879-
2880-
if (self.numel() == 0) {
2881-
return std::tuple<Tensor, Tensor>(eigvals, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
2882-
}
2883-
2884-
auto self_working_copy = cloneBatchedColumnMajor(self);
2885-
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cpu", [&]{
2886-
apply_symeig<scalar_t>(self_working_copy, eigvals, eigenvectors, upper, infos.data_ptr<int>());
2887-
});
2888-
2889-
at::_linalg_check_errors(infos, "symeig", self.dim() == 2);
2890-
if (eigenvectors) {
2891-
return std::tuple<Tensor, Tensor>(eigvals, self_working_copy);
2892-
} else {
2893-
return std::tuple<Tensor, Tensor>(eigvals, at::empty({0}, self.options()));
2894-
}
2895-
}
2896-
2897-
std::tuple<Tensor, Tensor> symeig(const Tensor& self, bool eigenvectors, bool upper) {
2898-
TORCH_WARN_ONCE(
2899-
"torch.symeig is deprecated in favor of torch.linalg.eigh and will be removed in a future ",
2900-
"PyTorch release.\n",
2901-
"The default behavior has changed from using the upper triangular portion of the matrix by default ",
2902-
"to using the lower triangular portion.\n",
2903-
"L, _ = torch.symeig(A, upper=upper)\n",
2904-
"should be replaced with\n",
2905-
"L = torch.linalg.eigvalsh(A, UPLO='U' if upper else 'L')\n",
2906-
"and\n",
2907-
"L, V = torch.symeig(A, eigenvectors=True)\n"
2908-
"should be replaced with\n",
2909-
"L, V = torch.linalg.eigh(A, UPLO='U' if upper else 'L')"
2910-
);
2911-
squareCheckInputs(self, "linalg.symeig");
2912-
return at::_symeig_helper(self, eigenvectors, upper);
2913-
}
2914-
2915-
std::tuple<Tensor&, Tensor&> symeig_out(const Tensor& self, bool eigenvectors, bool upper, Tensor& vals, Tensor& vecs) {
2916-
TORCH_WARN_ONCE(
2917-
"torch.symeig is deprecated in favor of torch.linalg.eigh and will be removed in a future ",
2918-
"PyTorch release.\n",
2919-
"The default behavior has changed from using the upper triangular portion of the matrix by default ",
2920-
"to using the lower triangular portion.\n",
2921-
"L, _ = torch.symeig(A, upper=upper)\n",
2922-
"should be replaced with\n",
2923-
"L = torch.linalg.eigvalsh(A, UPLO='U' if upper else 'L')\n",
2924-
"and\n",
2925-
"L, V = torch.symeig(A, eigenvectors=True)\n"
2926-
"should be replaced with\n",
2927-
"L, V = torch.linalg.eigh(A, UPLO='U' if upper else 'L')"
2928-
);
2929-
checkSameDevice("symeig", vals, self, "eigenvalues");
2930-
checkSameDevice("symeig", vecs, self, "eigenvectors");
2931-
checkLinalgCompatibleDtype("symeig", vecs, self, "eigenvectors");
2932-
// eigenvalues are always real-valued here
2933-
ScalarType real_dtype = toRealValueType(self.scalar_type());
2934-
checkLinalgCompatibleDtype("symeig", vals.scalar_type(), real_dtype, "eigenvalues");
2935-
2936-
Tensor vals_tmp, vecs_tmp;
2937-
std::tie(vals_tmp, vecs_tmp) = at::symeig(self, eigenvectors, upper);
2938-
2939-
at::native::resize_output(vals, vals_tmp.sizes());
2940-
at::native::resize_output(vecs, vecs_tmp.sizes());
2941-
vals.copy_(vals_tmp);
2942-
vecs.copy_(vecs_tmp);
2943-
return std::tuple<Tensor&, Tensor&>(vals, vecs);
2944-
}
2945-
29462790
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29472791

29482792
// This function returns complex-valued eigenvectors that is obtained from LAPACK GEEV's real-valued output

aten/src/ATen/native/cuda/LinearAlgebraStubs.cpp

+1-8
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,7 @@ struct MagmaInitializer {
3232
namespace at::native {
3333
#if defined(BUILD_LAZY_CUDA_LINALG)
3434
namespace {
35-
cuda::detail::LinalgDispatch disp = {_symeig_helper_cuda,
36-
_cholesky_solve_helper_cuda};
35+
cuda::detail::LinalgDispatch disp = {_cholesky_solve_helper_cuda};
3736

3837
at::DynamicLibrary& getTorchLinalgLibrary() {
3938
static at::DynamicLibrary lib("libtorch_cuda_linalg.so", nullptr, true);
@@ -174,12 +173,6 @@ Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upp
174173
return disp.cholesky_solve_helper(self, A, upper);
175174
}
176175

177-
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
178-
getTorchLinalgLibrary();
179-
TORCH_CHECK(disp.symeig_helper != _symeig_helper_cuda, "Can't find _symeig_helper_cuda");
180-
return disp.symeig_helper(self, eigenvectors, upper);
181-
}
182-
183176
#endif /*defined(BUILD_LAZY_CUDA_LINALG)*/
184177

185178
} // namespace at::native

aten/src/ATen/native/cuda/linalg/BatchLinearAlgebra.cpp

+1-38
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
#include <ATen/NativeFunctions.h>
2525
#else
2626
#include <ATen/ops/_cholesky_solve_helper_native.h>
27-
#include <ATen/ops/_symeig_helper_native.h>
2827
#include <ATen/ops/arange.h>
2928
#include <ATen/ops/empty.h>
3029
#include <ATen/ops/empty_like.h>
@@ -1873,8 +1872,6 @@ void geqrf_kernel(const Tensor& input, const Tensor& tau) {
18731872

18741873
REGISTER_CUDA_DISPATCH(geqrf_stub, &geqrf_kernel);
18751874

1876-
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1877-
18781875
template <typename scalar_t>
18791876
static void apply_magma_eigh(const Tensor& values, const Tensor& vectors, const Tensor& infos, bool upper, bool compute_eigenvectors) {
18801877
#if !AT_MAGMA_ENABLED()
@@ -1949,39 +1946,6 @@ static void apply_magma_eigh(const Tensor& values, const Tensor& vectors, const
19491946
#endif
19501947
}
19511948

1952-
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
1953-
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
1954-
1955-
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
1956-
ScalarType real_dtype = toRealValueType(self.scalar_type());
1957-
1958-
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
1959-
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
1960-
// The data is later moved to the appropriate device.
1961-
// In the case where self.numel() == 0, we just return an empty tensor of
1962-
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
1963-
auto eigvals_working_copy = self.numel() == 0
1964-
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
1965-
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
1966-
1967-
if (self.numel() == 0) {
1968-
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
1969-
}
1970-
1971-
auto self_working_copy = cloneBatchedColumnMajor(self);
1972-
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
1973-
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
1974-
});
1975-
1976-
at::_linalg_check_errors(infos, "symeig", self.dim() == 2);
1977-
1978-
if (eigenvectors) {
1979-
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
1980-
} else {
1981-
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
1982-
}
1983-
}
1984-
19851949
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19861950

19871951
// This is a type dispatch function for 'apply_magma_eigh'
@@ -2796,8 +2760,7 @@ REGISTER_CUDA_DISPATCH(lstsq_stub, &lstsq_kernel);
27962760
#if defined(BUILD_LAZY_CUDA_LINALG)
27972761
struct DispatchInitializer {
27982762
DispatchInitializer() {
2799-
cuda::detail::LinalgDispatch disp{ _symeig_helper_cuda,
2800-
_cholesky_solve_helper_cuda};
2763+
cuda::detail::LinalgDispatch disp{_cholesky_solve_helper_cuda};
28012764
cuda::detail::registerLinalgDispatch(disp);
28022765
};
28032766
} initializer;

aten/src/ATen/native/cuda/linalg/BatchLinearAlgebraLib.h

-1
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,6 @@ namespace cuda { namespace detail {
8484
// This is only used for an old-style dispatches
8585
// Please do not add any new entires to it
8686
struct LinalgDispatch {
87-
std::tuple<Tensor, Tensor> (*symeig_helper)(const Tensor& self, bool eigenvectors, bool upper);
8887
Tensor (*cholesky_solve_helper)(const Tensor& self, const Tensor& A, bool upper);
8988
};
9089
C10_EXPORT void registerLinalgDispatch(const LinalgDispatch&);

aten/src/ATen/native/native_functions.yaml

-16
Original file line numberDiff line numberDiff line change
@@ -8699,22 +8699,6 @@
86998699
- func: linalg_vander(Tensor x, *, int? N=None) -> Tensor
87008700
python_module: linalg
87018701

8702-
- func: symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
8703-
dispatch:
8704-
CompositeExplicitAutograd: symeig_out
8705-
8706-
- func: symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors)
8707-
variants: method, function
8708-
dispatch:
8709-
CompositeExplicitAutograd: symeig
8710-
8711-
- func: _symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor)
8712-
variants: function
8713-
dispatch:
8714-
CPU: _symeig_helper_cpu
8715-
CUDA: _symeig_helper_cuda
8716-
autogen: _symeig_helper.out
8717-
87188702
- func: svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
87198703

87208704
- func: svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)

docs/source/tensors.rst

-1
Original file line numberDiff line numberDiff line change
@@ -650,7 +650,6 @@ Tensor class reference
650650
Tensor.svd
651651
Tensor.swapaxes
652652
Tensor.swapdims
653-
Tensor.symeig
654653
Tensor.t
655654
Tensor.t_
656655
Tensor.tensor_split

docs/source/torch.rst

-1
Original file line numberDiff line numberDiff line change
@@ -589,7 +589,6 @@ BLAS and LAPACK Operations
589589
svd
590590
svd_lowrank
591591
pca_lowrank
592-
symeig
593592
lobpcg
594593
trapz
595594
trapezoid

test/cpp/lazy/test_lazy_ops.cpp

-33
Original file line numberDiff line numberDiff line change
@@ -1028,39 +1028,6 @@ TEST_F(LazyOpsTest, TestQR) {
10281028
}
10291029
}
10301030

1031-
TEST_F(LazyOpsTest, TestSymEig) {
1032-
static const int dims[] = {4, 7};
1033-
for (auto m : dims) {
1034-
for (bool eigenvectors : {true, false}) {
1035-
for (bool upper : {true, false}) {
1036-
torch::Tensor a = torch::rand(
1037-
{m, m},
1038-
torch::TensorOptions(torch::kFloat).device(DefaultDevice()));
1039-
torch::Tensor sym_a = a.mm(a.t());
1040-
auto b = torch::symeig(sym_a, eigenvectors, upper);
1041-
ForEachDevice([&](const torch::Device& device) {
1042-
torch::Tensor lazy_a = CopyToDevice(sym_a, device);
1043-
auto lazy_b = torch::symeig(lazy_a, eigenvectors, upper);
1044-
AllClose(
1045-
std::get<0>(b),
1046-
std::get<0>(lazy_b),
1047-
/*rtol=*/3e-2,
1048-
/*atol=*/1e-2);
1049-
if (eigenvectors) {
1050-
AllClose(
1051-
std::get<1>(b).abs(),
1052-
std::get<1>(lazy_b).abs(),
1053-
/*rtol=*/3e-2,
1054-
/*atol=*/1e-2);
1055-
} else {
1056-
EXPECT_EQ(std::get<1>(b).sizes(), std::get<1>(lazy_b).sizes());
1057-
}
1058-
});
1059-
}
1060-
}
1061-
}
1062-
}
1063-
10641031
TEST_F(LazyOpsTest, TestCholesky) {
10651032
static const int dims[] = {4, 7};
10661033
for (auto m : dims) {

test/distributed/_tensor/test_dtensor_ops.py

-1
Original file line numberDiff line numberDiff line change
@@ -476,7 +476,6 @@ def wrapped(fn):
476476
xfail("stft"),
477477
xfail("svd"),
478478
xfail("svd_lowrank"),
479-
xfail("symeig"),
480479
xfail("t"),
481480
xfail("take_along_dim"),
482481
xfail("take"),

0 commit comments

Comments
 (0)