17
17
from torch .testing ._internal .common_utils import \
18
18
(TestCase , run_tests , TEST_SCIPY , IS_MACOS , IS_WINDOWS , slowTest ,
19
19
TEST_WITH_ASAN , TEST_WITH_ROCM , IS_FBCODE , IS_REMOTE_GPU ,
20
- iter_indices , gradcheck , gradgradcheck )
20
+ iter_indices , gradcheck , gradgradcheck ,
21
+ make_fullrank_matrices_with_distinct_singular_values )
21
22
from torch .testing ._internal .common_device_type import \
22
23
(instantiate_device_type_tests , dtypes ,
23
24
onlyCPU , skipCUDAIf , skipCUDAIfNoMagma , skipCPUIfNoLapack , precisionOverride ,
@@ -3213,7 +3214,8 @@ def test_cholesky_solve_out_errors_and_warnings(self, device, dtype):
3213
3214
@precisionOverride ({torch .float32 : 2e-3 , torch .complex64 : 2e-3 ,
3214
3215
torch .float64 : 1e-8 , torch .complex128 : 1e-8 })
3215
3216
def test_inverse (self , device , dtype ):
3216
- from torch .testing ._internal .common_utils import random_fullrank_matrix_distinct_singular_value
3217
+ make_fullrank = make_fullrank_matrices_with_distinct_singular_values
3218
+ make_arg = partial (make_fullrank , device = device , dtype = dtype )
3217
3219
3218
3220
def run_test (torch_inverse , matrix , batches , n ):
3219
3221
matrix_inverse = torch_inverse (matrix )
@@ -3265,15 +3267,15 @@ def test_inv_ex(input, out=None):
3265
3267
[[], [0 ], [2 ], [2 , 1 ]],
3266
3268
[0 , 5 ]
3267
3269
):
3268
- matrices = random_fullrank_matrix_distinct_singular_value ( n , * batches , dtype = dtype , device = device )
3270
+ matrices = make_arg ( * batches , n , n )
3269
3271
run_test (torch_inverse , matrices , batches , n )
3270
3272
3271
3273
# test non-contiguous input
3272
3274
run_test (torch_inverse , matrices .mT , batches , n )
3273
3275
if n > 0 :
3274
3276
run_test (
3275
3277
torch_inverse ,
3276
- random_fullrank_matrix_distinct_singular_value ( n * 2 , * batches , dtype = dtype , device = device )
3278
+ make_arg ( * batches , 2 * n , 2 * n )
3277
3279
.view (- 1 , n * 2 , n * 2 )[:, ::2 , ::2 ].view (* batches , n , n ),
3278
3280
batches , n
3279
3281
)
@@ -3321,10 +3323,11 @@ def test_inv_ex_singular(self, device, dtype):
3321
3323
@precisionOverride ({torch .float32 : 2e-3 , torch .complex64 : 2e-3 ,
3322
3324
torch .float64 : 1e-5 , torch .complex128 : 1e-5 })
3323
3325
def test_inverse_many_batches (self , device , dtype ):
3324
- from torch .testing ._internal .common_utils import random_fullrank_matrix_distinct_singular_value
3326
+ make_fullrank = make_fullrank_matrices_with_distinct_singular_values
3327
+ make_arg = partial (make_fullrank , device = device , dtype = dtype )
3325
3328
3326
3329
def test_inverse_many_batches_helper (torch_inverse , b , n ):
3327
- matrices = random_fullrank_matrix_distinct_singular_value (b , n , n , dtype = dtype , device = device )
3330
+ matrices = make_arg (b , n , n )
3328
3331
matrices_inverse = torch_inverse (matrices )
3329
3332
3330
3333
# Compare against NumPy output
@@ -3542,10 +3545,11 @@ def run_test_singular_input(batch_dim, n):
3542
3545
self .assertTrue ("An output with one or more elements was resized" in str (w [- 1 ].message ))
3543
3546
3544
3547
def solve_test_helper (self , A_dims , b_dims , device , dtype ):
3545
- from torch .testing ._internal .common_utils import random_fullrank_matrix_distinct_singular_value
3548
+ make_fullrank = make_fullrank_matrices_with_distinct_singular_values
3549
+ make_A = partial (make_fullrank , device = device , dtype = dtype )
3546
3550
3547
3551
b = torch .randn (* b_dims , dtype = dtype , device = device )
3548
- A = random_fullrank_matrix_distinct_singular_value (* A_dims , dtype = dtype , device = device )
3552
+ A = make_A (* A_dims )
3549
3553
return b , A
3550
3554
3551
3555
@skipCUDAIfNoMagma
@@ -3554,7 +3558,7 @@ def solve_test_helper(self, A_dims, b_dims, device, dtype):
3554
3558
@precisionOverride ({torch .float32 : 1e-3 , torch .complex64 : 1e-3 })
3555
3559
def test_solve (self , device , dtype ):
3556
3560
def run_test (n , batch , rhs ):
3557
- A_dims = (n , * batch )
3561
+ A_dims = (* batch , n , n )
3558
3562
b_dims = (* batch , n , * rhs )
3559
3563
b , A = self .solve_test_helper (A_dims , b_dims , device , dtype )
3560
3564
@@ -3600,8 +3604,10 @@ def run_test(n, batch, rhs):
3600
3604
@dtypes (* floating_and_complex_types ())
3601
3605
@precisionOverride ({torch .float32 : 1e-3 , torch .complex64 : 1e-3 })
3602
3606
def test_solve_batched_non_contiguous (self , device , dtype ):
3603
- from torch .testing ._internal .common_utils import random_fullrank_matrix_distinct_singular_value
3604
- A = random_fullrank_matrix_distinct_singular_value (2 , 2 , dtype = dtype , device = device ).permute (1 , 0 , 2 )
3607
+ make_fullrank = make_fullrank_matrices_with_distinct_singular_values
3608
+ make_A = partial (make_fullrank , device = device , dtype = dtype )
3609
+
3610
+ A = make_A (2 , 2 , 2 ).permute (1 , 0 , 2 )
3605
3611
b = torch .randn (2 , 2 , 2 , dtype = dtype , device = device ).permute (2 , 1 , 0 )
3606
3612
self .assertFalse (A .is_contiguous ())
3607
3613
self .assertFalse (b .is_contiguous ())
@@ -3680,7 +3686,7 @@ def run_test_singular_input(batch_dim, n):
3680
3686
@dtypes (* floating_and_complex_types ())
3681
3687
def test_old_solve (self , device , dtype ):
3682
3688
for (k , n ) in zip ([2 , 3 , 5 ], [3 , 5 , 7 ]):
3683
- b , A = self .solve_test_helper ((n ,), (n , k ), device , dtype )
3689
+ b , A = self .solve_test_helper ((n , n ), (n , k ), device , dtype )
3684
3690
x = torch .solve (b , A )[0 ]
3685
3691
self .assertEqual (b , np .matmul (A .cpu (), x .cpu ()))
3686
3692
@@ -3700,15 +3706,18 @@ def solve_batch_helper(A_dims, b_dims):
3700
3706
self .assertEqual (b , Ax )
3701
3707
3702
3708
for batchsize in [1 , 3 , 4 ]:
3703
- solve_batch_helper ((5 , batchsize ), (batchsize , 5 , 10 ))
3709
+ solve_batch_helper ((batchsize , 5 , 5 ), (batchsize , 5 , 10 ))
3704
3710
3705
3711
@skipCUDAIfNoMagma
3706
3712
@skipCPUIfNoLapack
3707
3713
@dtypes (* floating_and_complex_types ())
3708
3714
def test_old_solve_batched_non_contiguous (self , device , dtype ):
3709
3715
from numpy .linalg import solve
3710
- from torch .testing ._internal .common_utils import random_fullrank_matrix_distinct_singular_value
3711
- A = random_fullrank_matrix_distinct_singular_value (2 , 2 , dtype = dtype , device = device ).permute (1 , 0 , 2 )
3716
+
3717
+ make_fullrank = make_fullrank_matrices_with_distinct_singular_values
3718
+ make_A = partial (make_fullrank , device = device , dtype = dtype )
3719
+
3720
+ A = make_A (2 , 2 , 2 ).permute (1 , 0 , 2 )
3712
3721
b = torch .randn (2 , 2 , 2 , dtype = dtype , device = device ).permute (2 , 1 , 0 )
3713
3722
x , _ = torch .solve (b , A )
3714
3723
x_exp = solve (A .cpu ().numpy (), b .cpu ().numpy ())
@@ -3719,7 +3728,7 @@ def test_old_solve_batched_non_contiguous(self, device, dtype):
3719
3728
@skipCPUIfNoLapack
3720
3729
@dtypes (* floating_and_complex_types ())
3721
3730
def test_old_solve_batched_many_batches (self , device , dtype ):
3722
- for A_dims , b_dims in zip ([(5 , 256 , 256 ), (3 , )], [(5 , 1 ), (512 , 512 , 3 , 1 )]):
3731
+ for A_dims , b_dims in zip ([(256 , 256 , 5 , 5 ), (3 , 3 )], [(5 , 1 ), (512 , 512 , 3 , 1 )]):
3723
3732
b , A = self .solve_test_helper (A_dims , b_dims , device , dtype )
3724
3733
x , _ = torch .solve (b , A )
3725
3734
Ax = torch .matmul (A , x )
@@ -3734,7 +3743,7 @@ def test_old_solve_batched_broadcasting(self, device, dtype):
3734
3743
def run_test (A_dims , b_dims ):
3735
3744
A_matrix_size = A_dims [- 1 ]
3736
3745
A_batch_dims = A_dims [:- 2 ]
3737
- b , A = self .solve_test_helper ((A_matrix_size ,) + A_batch_dims , b_dims , device , dtype )
3746
+ b , A = self .solve_test_helper (A_batch_dims + (A_matrix_size , A_matrix_size ) , b_dims , device , dtype )
3738
3747
x , _ = torch .solve (b , A )
3739
3748
x_exp = solve (A .cpu ().numpy (), b .cpu ().numpy ())
3740
3749
self .assertEqual (x , x_exp )
@@ -4196,26 +4205,27 @@ def run_test_atol(shape0, shape1, batch):
4196
4205
@skipCPUIfNoLapack
4197
4206
@dtypes (torch .float64 )
4198
4207
def test_matrix_rank_atol_rtol (self , device , dtype ):
4199
- from torch .testing ._internal .common_utils import make_fullrank_matrices_with_distinct_singular_values
4208
+ make_fullrank = make_fullrank_matrices_with_distinct_singular_values
4209
+ make_arg = partial (make_fullrank , device = device , dtype = dtype )
4200
4210
4201
- # creates a matrix with singular values arange(1/(n+1), 1, 1/(n+1)) and rank=n
4211
+ # creates a matrix with singular values rank=n and singular values in range [2/3, 3/2]
4212
+ # the singular values are 1 + 1/2, 1 - 1/3, 1 + 1/4, 1 - 1/5, ...
4202
4213
n = 9
4203
- a = make_fullrank_matrices_with_distinct_singular_values (n , n , dtype = dtype , device = device )
4214
+ a = make_arg (n , n )
4204
4215
4205
4216
# test float and tensor variants
4206
- for tol_value in [0.51 , torch .tensor (0.51 , device = device )]:
4207
- # using rtol (relative tolerance) takes into account the largest singular value (0.9 in this case)
4217
+ for tol_value in [0.81 , torch .tensor (0.81 , device = device )]:
4218
+ # using rtol (relative tolerance) takes into account the largest singular value (1.5 in this case)
4208
4219
result = torch .linalg .matrix_rank (a , rtol = tol_value )
4209
- self .assertEqual (result , 5 ) # there are 5 singular values above 0.9 *0.51=0.459
4220
+ self .assertEqual (result , 2 ) # there are 2 singular values above 1.5 *0.81 = 1.215
4210
4221
4211
4222
# atol is used directly to compare with singular values
4212
4223
result = torch .linalg .matrix_rank (a , atol = tol_value )
4213
- self .assertEqual (result , 4 ) # there are 4 singular values above 0.51
4224
+ self .assertEqual (result , 7 ) # there are 7 singular values above 0.81
4214
4225
4215
4226
# when both are specified the maximum tolerance is used
4216
4227
result = torch .linalg .matrix_rank (a , atol = tol_value , rtol = tol_value )
4217
- self .assertEqual (result , 4 ) # there are 4 singular values above max(0.51, 0.9*0.51)
4218
-
4228
+ self .assertEqual (result , 2 ) # there are 2 singular values above max(0.81, 1.5*0.81)
4219
4229
4220
4230
@skipCUDAIfNoMagma
4221
4231
@skipCPUIfNoLapack
@@ -6832,7 +6842,8 @@ def test_solve_methods_arg_device(self, device):
6832
6842
@skipCPUIfNoLapack
6833
6843
@dtypes (* floating_and_complex_types ())
6834
6844
def test_pinverse (self , device , dtype ):
6835
- from torch .testing ._internal .common_utils import random_fullrank_matrix_distinct_singular_value as fullrank
6845
+ make_fullrank = make_fullrank_matrices_with_distinct_singular_values
6846
+ make_arg = partial (make_fullrank , device = device , dtype = dtype )
6836
6847
6837
6848
def run_test (M ):
6838
6849
# Testing against definition for pseudo-inverses
@@ -6857,7 +6868,7 @@ def run_test(M):
6857
6868
for sizes in [(5 , 5 ), (3 , 5 , 5 ), (3 , 7 , 5 , 5 )]:
6858
6869
matsize = sizes [- 1 ]
6859
6870
batchdims = sizes [:- 2 ]
6860
- M = fullrank ( matsize , * batchdims , dtype = dtype , device = device )
6871
+ M = make_arg ( * batchdims , matsize , matsize )
6861
6872
self .assertEqual (torch .eye (matsize , dtype = dtype , device = device ).expand (sizes ), M .pinverse ().matmul (M ),
6862
6873
atol = 1e-7 , rtol = 0 , msg = 'pseudo-inverse for invertible matrix' )
6863
6874
@@ -6884,21 +6895,22 @@ def check(*size, noncontiguous=False):
6884
6895
@skipCUDAIfNoMagmaAndNoCusolver
6885
6896
@dtypes (torch .double , torch .cdouble )
6886
6897
def test_matrix_power_negative (self , device , dtype ):
6887
- from torch .testing ._internal .common_utils import random_fullrank_matrix_distinct_singular_value
6898
+ make_fullrank = make_fullrank_matrices_with_distinct_singular_values
6899
+ make_arg = partial (make_fullrank , device = device , dtype = dtype )
6888
6900
6889
6901
def check (* size ):
6890
- t = random_fullrank_matrix_distinct_singular_value (* size , dtype = dtype , device = device )
6902
+ t = make_arg (* size )
6891
6903
for n in range (- 7 , 0 ):
6892
6904
res = torch .linalg .matrix_power (t , n )
6893
6905
ref = np .linalg .matrix_power (t .cpu ().numpy (), n )
6894
6906
self .assertEqual (res .cpu (), torch .from_numpy (ref ))
6895
6907
6896
- check (0 )
6897
- check (5 )
6898
- check (0 , 2 )
6899
- check (3 , 0 )
6900
- check (3 , 2 )
6901
- check (5 , 2 , 3 )
6908
+ check (0 , 0 )
6909
+ check (5 , 5 )
6910
+ check (2 , 0 , 0 )
6911
+ check (0 , 3 , 3 )
6912
+ check (2 , 3 , 3 )
6913
+ check (2 , 3 , 5 , 5 )
6902
6914
6903
6915
@skipCUDAIfNoMagma
6904
6916
@skipCPUIfNoLapack
@@ -7761,9 +7773,10 @@ def maybe_squeeze_result(l, r, result):
7761
7773
@skipCPUIfNoLapack
7762
7774
@dtypes (* floating_and_complex_types ())
7763
7775
def test_lu_solve_batched_non_contiguous (self , device , dtype ):
7764
- from torch .testing ._internal .common_utils import random_fullrank_matrix_distinct_singular_value
7776
+ make_fullrank = make_fullrank_matrices_with_distinct_singular_values
7777
+ make_A = partial (make_fullrank , device = device , dtype = dtype )
7765
7778
7766
- A = random_fullrank_matrix_distinct_singular_value (2 , 2 , dtype = dtype , device = device )
7779
+ A = make_A (2 , 2 , 2 )
7767
7780
b = torch .randn (2 , 2 , 2 , dtype = dtype , device = device )
7768
7781
x_exp = np .linalg .solve (A .cpu ().permute (0 , 2 , 1 ).numpy (), b .cpu ().permute (2 , 1 , 0 ).numpy ())
7769
7782
A = A .permute (0 , 2 , 1 )
@@ -7774,10 +7787,11 @@ def test_lu_solve_batched_non_contiguous(self, device, dtype):
7774
7787
self .assertEqual (x , x_exp )
7775
7788
7776
7789
def lu_solve_test_helper (self , A_dims , b_dims , pivot , device , dtype ):
7777
- from torch .testing ._internal .common_utils import random_fullrank_matrix_distinct_singular_value
7790
+ make_fullrank = make_fullrank_matrices_with_distinct_singular_values
7791
+ make_A = partial (make_fullrank , device = device , dtype = dtype )
7778
7792
7779
7793
b = torch .randn (* b_dims , dtype = dtype , device = device )
7780
- A = random_fullrank_matrix_distinct_singular_value (* A_dims , dtype = dtype , device = device )
7794
+ A = make_A (* A_dims )
7781
7795
LU_data , LU_pivots , info = torch .lu (A , get_infos = True , pivot = pivot )
7782
7796
self .assertEqual (info , torch .zeros_like (info ))
7783
7797
return b , A , LU_data , LU_pivots
@@ -7790,7 +7804,7 @@ def lu_solve_test_helper(self, A_dims, b_dims, pivot, device, dtype):
7790
7804
def test_lu_solve (self , device , dtype ):
7791
7805
def sub_test (pivot ):
7792
7806
for k , n in zip ([2 , 3 , 5 ], [3 , 5 , 7 ]):
7793
- b , A , LU_data , LU_pivots = self .lu_solve_test_helper ((n ,), (n , k ), pivot , device , dtype )
7807
+ b , A , LU_data , LU_pivots = self .lu_solve_test_helper ((n , n ), (n , k ), pivot , device , dtype )
7794
7808
x = torch .lu_solve (b , LU_data , LU_pivots )
7795
7809
self .assertEqual (b , np .matmul (A .cpu (), x .cpu ()))
7796
7810
@@ -7817,7 +7831,7 @@ def lu_solve_batch_test_helper(A_dims, b_dims, pivot):
7817
7831
self .assertEqual (b , Ax )
7818
7832
7819
7833
for batchsize in [1 , 3 , 4 ]:
7820
- lu_solve_batch_test_helper ((5 , batchsize ), (batchsize , 5 , 10 ), pivot )
7834
+ lu_solve_batch_test_helper ((batchsize , 5 , 5 ), (batchsize , 5 , 10 ), pivot )
7821
7835
7822
7836
# Tests tensors with 0 elements
7823
7837
b = torch .randn (3 , 0 , 3 , dtype = dtype , device = device )
@@ -7840,19 +7854,20 @@ def run_test(A_dims, b_dims):
7840
7854
Ax = torch .matmul (A , x )
7841
7855
self .assertEqual (Ax , b .expand_as (Ax ))
7842
7856
7843
- run_test ((5 , 65536 ), (65536 , 5 , 10 ))
7844
- run_test ((5 , 262144 ), (262144 , 5 , 10 ))
7857
+ run_test ((65536 , 5 , 5 ), (65536 , 5 , 10 ))
7858
+ run_test ((262144 , 5 , 5 ), (262144 , 5 , 10 ))
7845
7859
7846
7860
@skipCUDAIfNoMagma
7847
7861
@skipCPUIfNoLapack
7848
7862
@dtypes (* floating_and_complex_types ())
7849
7863
def test_lu_solve_batched_broadcasting (self , device , dtype ):
7850
- from torch .testing ._internal .common_utils import random_fullrank_matrix_distinct_singular_value
7864
+ make_fullrank = make_fullrank_matrices_with_distinct_singular_values
7865
+ make_A = partial (make_fullrank , device = device , dtype = dtype )
7851
7866
7852
7867
def run_test (A_dims , b_dims , pivot = True ):
7853
7868
A_matrix_size = A_dims [- 1 ]
7854
7869
A_batch_dims = A_dims [:- 2 ]
7855
- A = random_fullrank_matrix_distinct_singular_value ( A_matrix_size , * A_batch_dims , dtype = dtype , device = device )
7870
+ A = make_A ( * A_batch_dims , A_matrix_size , A_matrix_size )
7856
7871
b = make_tensor (b_dims , dtype = dtype , device = device )
7857
7872
x_exp = np .linalg .solve (A .cpu (), b .cpu ())
7858
7873
LU_data , LU_pivots = torch .lu (A , pivot = pivot )
0 commit comments