Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 18 additions & 9 deletions psydac/linalg/tests/test_kron_direct_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -647,8 +647,11 @@ def get_A_fun(n=1, m=1, A0=1e04):
# assert rhs_iterative is within the tolerance close to rhs, and so is rhs_direct
rhs_iterative = M1 @ x_iterative
rhs_direct = M1 @ x_direct
assert np.linalg.norm((rhs-rhs_iterative).toarray()) < tol
assert np.linalg.norm((rhs-rhs_direct).toarray()) < tol
# Define 2-norm of vector using inner
norm2 = lambda v: np.sqrt(v.inner(v))

assert norm2(rhs-rhs_iterative) < tol
assert norm2(rhs-rhs_direct) < tol


#===============================================================================
Expand Down Expand Up @@ -740,8 +743,10 @@ def get_A_fun_scalar(n=1, m=1, A0=1e04):
# assert rhs_iterative is within the tolerance close to rhs, and so is rhs_direct
rhs_iterative = M0 @ x_iterative
rhs_direct = M0 @ x_direct
assert np.linalg.norm((rhs-rhs_iterative).toarray()) < tol
assert np.linalg.norm((rhs-rhs_direct).toarray()) < tol
norm2 = lambda v: np.sqrt(v.inner(v))

assert norm2(rhs-rhs_iterative) < tol
assert norm2(rhs-rhs_direct) < tol

x1 = P1(get_A_fun_vec()).coeffs
rhs = M1 @ x1
Expand All @@ -753,9 +758,11 @@ def get_A_fun_scalar(n=1, m=1, A0=1e04):
# assert rhs_iterative is within the tolerance close to rhs, and so is rhs_direct
rhs_iterative = M1 @ x_iterative
rhs_direct = M1 @ x_direct
assert np.linalg.norm((rhs-rhs_iterative).toarray()) < tol
assert np.linalg.norm((rhs-rhs_direct).toarray()) < tol

norm2 = lambda v: np.sqrt(v.inner(v))

assert norm2(rhs-rhs_iterative) < tol
assert norm2(rhs-rhs_direct) < tol

x2 = P2(get_A_fun_scalar()).coeffs
rhs = M2 @ x2

Expand All @@ -766,8 +773,10 @@ def get_A_fun_scalar(n=1, m=1, A0=1e04):
# assert rhs_iterative is within the tolerance close to rhs, and so is rhs_direct
rhs_iterative = M2 @ x_iterative
rhs_direct = M2 @ x_direct
assert np.linalg.norm((rhs-rhs_iterative).toarray()) < tol
assert np.linalg.norm((rhs-rhs_direct).toarray()) < tol
norm2 = lambda v: np.sqrt(v.inner(v))

assert norm2(rhs-rhs_iterative) < tol
assert norm2(rhs-rhs_direct) < tol

#===============================================================================

Expand Down
158 changes: 83 additions & 75 deletions psydac/linalg/tests/test_linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -778,64 +778,67 @@ def test_operator_evaluation(n1, n2, p1, p2):
###

### 2.1 PowerLO test
Bmat = B.toarray()
# Define 2-norm of vector using inner
norm2 = lambda v: np.sqrt(v.inner(v))

### 2.1 PowerLO test

assert_pos_def(B)
uarr = u.toarray()
b0 = ( B**0 @ u ).toarray()
b1 = ( B**1 @ u ).toarray()
b2 = ( B**2 @ u ).toarray()
assert np.array_equal(uarr, b0)
assert np.linalg.norm( np.dot(Bmat, uarr) - b1 ) < 1e-10
assert np.linalg.norm( np.dot(Bmat, np.dot(Bmat, uarr)) - b2 ) < 1e-10

bi0 = ( B_ILO**0 @ u ).toarray()
bi1 = ( B_ILO**1 @ u ).toarray()
bi2 = ( B_ILO**2 @ u ).toarray()
B_inv_mat = np.linalg.inv(Bmat)
b_inv_arr = np.matrix.flatten(B_inv_mat)
error_est = 2 + n1 * n2 * np.max( [ np.abs(b_inv_arr[i]) for i in range(len(b_inv_arr)) ] )
assert np.array_equal(uarr, bi0)
bi12 = np.linalg.solve(Bmat, uarr)
bi22 = np.linalg.solve(Bmat, bi12)
assert np.linalg.norm( (Bmat @ bi12) - uarr ) < tol
assert np.linalg.norm( (Bmat @ bi22) - bi12 ) < error_est * tol

zeros = U.zeros().toarray()
z0 = ( Z**0 @ u ).toarray()
z1 = ( Z**1 @ u ).toarray()
z2 = ( Z**2 @ u ).toarray()
assert np.array_equal(uarr, z0)
assert np.array_equal(zeros, z1)
assert np.array_equal(zeros, z2)

Smat = S.toarray()

b0 = ( B**0 @ u )
b1 = ( B**1 @ u )
b2 = ( B**2 @ u )
assert norm2(u-b0) < 1e-10
assert norm2(B @ u - b1) < 1e-10
assert norm2(B @ (B @ u) - b2) < 1e-10

bi0 = ( B_ILO**0 @ u )
bi1 = ( B_ILO**1 @ u )
bi2 = ( B_ILO**2 @ u )
error_est = 2 + n1 * n2 * 10 # Rough error estimate based on dimensions and operator norm bounds
assert norm2(u - bi0) < 1e-10
bi12 = B_ILO @ u
bi22 = B_ILO @ bi12

assert norm2((B @ bi12) - u) < tol
assert norm2((B @ bi22) - bi12 ) < error_est * tol
Comment on lines +800 to +804
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We want to test the interaction of a PowerLinearOperator with a InverseLinearOperator here, so we have to check whether bi1 and bi2 have been computed correctly.

Suggested change
bi12 = B_ILO @ u
bi22 = B_ILO @ bi12
assert norm2((B @ bi12) - u) < tol
assert norm2((B @ bi22) - bi12 ) < error_est * tol
assert norm2((B @ bi1) - u) < tol
assert norm2((B @ B @ bi2) - u) < error_est * tol


zeros = U.zeros()
z0 = ( Z**0 @ u )
z1 = ( Z**1 @ u )
z2 = ( Z**2 @ u )
assert norm2(u - z0) < 1e-10
assert norm2(zeros - z1) < 1e-10
assert norm2(zeros - z2) < 1e-10

assert_pos_def(S)
varr = v.toarray()
s0 = ( S**0 @ v ).toarray()
s1 = ( S**1 @ v ).toarray()
s2 = ( S**2 @ v ).toarray()
assert np.array_equal(varr, s0)
assert np.linalg.norm( np.dot(Smat, varr) - s1 ) < 1e-10
assert np.linalg.norm( np.dot(Smat, np.dot(Smat, varr)) - s2 ) < 1e-10

si0 = ( S_ILO**0 @ v ).toarray()
si1 = ( S_ILO**1 @ v ).toarray()
si2 = ( S_ILO**2 @ v ).toarray()
S_inv_mat = np.linalg.inv(Smat)
s_inv_arr = np.matrix.flatten(S_inv_mat)
error_est = 2 + n1 * n2 * np.max( [ np.abs(s_inv_arr[i]) for i in range(len(s_inv_arr)) ] )
assert np.array_equal(varr, si0)
si12 = np.linalg.solve(Smat, varr)
si22 = np.linalg.solve(Smat, si12)
assert np.linalg.norm( (Smat @ si12) - varr ) < tol
assert np.linalg.norm( (Smat @ si22) - si12 ) < error_est * tol

i0 = ( I**0 @ v ).toarray()
i1 = ( I**1 @ v ).toarray()
i2 = ( I**2 @ v ).toarray()
assert np.array_equal(varr, i0)
assert np.array_equal(varr, i1)
assert np.array_equal(varr, i2)
s0 = ( S**0 @ v )
s1 = ( S**1 @ v )
s2 = ( S**2 @ v )
assert norm2(v-s0) < 1e-10

assert norm2((S @ v) - s1) < 1e-10
assert norm2(S @ (S @ v ) - s2) < 1e-10


si0 = ( S_ILO**0 @ v )
si1 = ( S_ILO**1 @ v )
si2 = ( S_ILO**2 @ v )
error_est = 2 + n1 * n2 * 10 # Rough error estimate based on dimensions and operator norm bounds
assert norm2(v - si0) < 1e-10
si12 = S_ILO @ v
si22 = S_ILO @ si12

assert norm2((S @ si12) - v) < tol
assert norm2( (S @ si22) - si12 ) < error_est * tol
Comment on lines +829 to +833
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same thing here

Suggested change
si12 = S_ILO @ v
si22 = S_ILO @ si12
assert norm2((S @ si12) - v) < tol
assert norm2( (S @ si22) - si12 ) < error_est * tol
assert norm2((S @ si1) - v) < tol
assert norm2((S @ S @ si22) - si2) < error_est * tol



i0 = ( I**0 @ v )
i1 = ( I**1 @ v )
i2 = ( I**2 @ v )
assert norm2(v- i0) < 1e-10
assert norm2(v - i1) <1e-10
assert norm2(v - i2) < 1e-10
Comment on lines +839 to +841
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
assert norm2(v- i0) < 1e-10
assert norm2(v - i1) <1e-10
assert norm2(v - i2) < 1e-10
assert norm2(v - i0) < 1e-10
assert norm2(v - i1) < 1e-10
assert norm2(v - i2) < 1e-10


### 2.2 SumLO tests
Sum1 = B + B_ILO + B + B_ILO
Expand All @@ -844,16 +847,19 @@ def test_operator_evaluation(n1, n2, p1, p2):
sum2 = Sum2 @ v
u_approx = B @ (0.5*(sum1 - 2*B@u))
v_approx = S @ (0.5*(sum2 - 2*S@v))
assert np.linalg.norm( (u_approx - u).toarray() ) < tol
assert np.linalg.norm( (v_approx - v).toarray() ) < tol
# Define 2-norm of vector using inner
norm2 = lambda v: np.sqrt(v.inner(v))

assert norm2((u_approx - u)) < tol
assert norm2( (v_approx - v)) < tol
Comment on lines +853 to +854
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
assert norm2((u_approx - u)) < tol
assert norm2( (v_approx - v)) < tol
assert norm2((u_approx - u)) < tol
assert norm2((v_approx - v)) < tol


### 2.3 CompLO tests
C1 = B @ (-B)
C2 = S @ (-S)
c1 = ( C1 @ u ).toarray()
c2 = ( C2 @ v ).toarray()
assert np.array_equal(-c1, b2)
assert np.array_equal(-c2, s2)
c1 = ( C1 @ u )
c2 = ( C2 @ v )
assert norm2(-c1 - b2) < 1e-10
assert norm2(-c2 - s2) < 1e-10

### 2.4 Huge composition
ZV = ZeroOperator(V, V)
Expand All @@ -863,7 +869,8 @@ def test_operator_evaluation(n1, n2, p1, p2):
H4 = 2 * (S**1 @ S**0)
H5 = ZV @ I
H = H1 @ ( H2 + H3 - H4 + H5 ).T
assert np.linalg.norm( (H @ v).toarray() - v.toarray() ) < 10 * tol

assert norm2((H @ v)- v) < 10 * tol

### 2.5 InverseLO test

Expand Down Expand Up @@ -893,25 +900,26 @@ def test_operator_evaluation(n1, n2, p1, p2):
# Several break-criteria in the LSMR algorithm require different way to determine success
# than asserting rnorm < tol, as that is not required. Even though it should?

assert np.linalg.norm( (S @ xs_cg - v).toarray() ) < tol
assert np.linalg.norm( (S @ xs_pcg - v).toarray() ) < tol
assert np.linalg.norm( (S @ xs_bicg - v).toarray() ) < tol
assert norm2((S @ xs_cg - v)) < tol
assert norm2( (S @ xs_pcg - v)) < tol
assert norm2((S @ xs_bicg - v)) < tol
Comment on lines +903 to +905
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
assert norm2((S @ xs_cg - v)) < tol
assert norm2( (S @ xs_pcg - v)) < tol
assert norm2((S @ xs_bicg - v)) < tol
assert norm2((S @ xs_cg - v)) < tol
assert norm2((S @ xs_pcg - v)) < tol
assert norm2((S @ xs_bicg - v)) < tol

assert S_lsmr.get_success() == True
assert np.linalg.norm( (S @ xs_mr - v).toarray() ) < tol

assert np.linalg.norm( (B @ xb_cg - u).toarray() ) < tol
assert np.linalg.norm( (B @ xb_pcg - u).toarray() ) < tol
assert np.linalg.norm( (B @ xb_bicg - u).toarray() ) < tol
assert norm2((S @ xs_mr - v)) < tol
assert norm2((B @ xb_cg - u)) < tol
assert norm2((B @ xb_pcg - u)) < tol
assert norm2((B @ xb_bicg - u)) < tol
assert B_lsmr.get_success() == True
assert np.linalg.norm( (B @ xb_mr - u).toarray() ) < tol
assert norm2((B @ xb_mr - u)) < tol

#===============================================================================

def test_internal_storage():

# Create LinearOperator Z = A @ A.T @ A @ A.T @ A, where the domain and codomain of A are of different dimension.
# Prior to a fix, operator would not have enough preallocated storage defined.

# Define 2-norm of vector using inner
norm2 = lambda v: np.sqrt(v.inner(v))

n1=2
n2=1
p1=1
Expand Down Expand Up @@ -956,8 +964,8 @@ def test_internal_storage():
assert len(Z2_1.tmp_vectors) == 3
assert len(Z2_2.tmp_vectors) == 3
assert len(Z2_3.tmp_vectors) == 3
assert np.array_equal( y1_1.toarray(), y1_2.toarray() ) & np.array_equal( y1_2.toarray(), y1_3.toarray() )
assert np.array_equal( y2_1.toarray(), y2_2.toarray() ) & np.array_equal( y2_2.toarray(), y2_3.toarray() )
assert (norm2( y1_1- y1_2 ) < 1e-10) & (norm2( y1_2 - y1_3) < 1e-10)
assert (norm2( y2_1 - y2_2 ) < 1e-10) & (norm2( y2_2 - y2_3) < 1e-10)
Comment on lines +967 to +968
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
assert (norm2( y1_1- y1_2 ) < 1e-10) & (norm2( y1_2 - y1_3) < 1e-10)
assert (norm2( y2_1 - y2_2 ) < 1e-10) & (norm2( y2_2 - y2_3) < 1e-10)
assert (norm2(y1_1 - y1_2) < 1e-10) & (norm2(y1_2 - y1_3) < 1e-10)
assert (norm2(y2_1 - y2_2) < 1e-10) & (norm2(y2_2 - y2_3) < 1e-10)


#===============================================================================
@pytest.mark.parametrize('solver', ['cg', 'pcg', 'bicg', 'minres', 'lsmr'])
Expand Down
22 changes: 12 additions & 10 deletions psydac/linalg/tests/test_matrix_free.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@

from psydac.linalg.tests.test_linalg import get_StencilVectorSpace, get_positive_definite_StencilMatrix, assert_pos_def

norm2 = lambda v: np.sqrt(v.inner(v))

def get_random_StencilMatrix(domain, codomain):

np.random.seed(2)
Expand Down Expand Up @@ -67,19 +69,19 @@ def test_fake_matrix_free(n1, n2, p1, p2):
V2 = get_StencilVectorSpace([m1, m2], [q1, q2], [P1, P2])
S = get_random_StencilMatrix(codomain=V2, domain=V1)
O = MatrixFreeLinearOperator(codomain=V2, domain=V1, dot=lambda v: S @ v)

print(f'O.domain = {O.domain}')
print(f'S.domain = {S.domain}')
print(f'V1: = {V1}')
v = get_random_StencilVector(V1)
tol = 1e-10
y = S.dot(v)
x = O.dot(v)
print(f'error = {np.linalg.norm( (x - y).toarray() )}')
assert np.linalg.norm( (x - y).toarray() ) < tol
print(f'error = {norm2(x-y)}')
assert norm2(x-y) < tol

O.dot(v, out=x)
print(f'error = {np.linalg.norm( (x - y).toarray() )}')
assert np.linalg.norm( (x - y).toarray() ) < tol
print(f'error = {norm2(x-y)}')
assert norm2(x-y) < tol

@pytest.mark.parametrize('solver', ['cg', 'pcg', 'bicg', 'minres', 'lsmr'])

Expand Down Expand Up @@ -108,16 +110,16 @@ def test_solvers_matrix_free(solver):
A_inv = inverse(A, solver, pc=inv_diagonal, tol=tol)
else:
A_inv = inverse(A, solver, tol=tol)

AA = A_inv._A
xx = AA.dot(b)
print(f'norm(xx) = {np.linalg.norm( xx.toarray() )}')
print(f'norm(x) = {np.linalg.norm( x.toarray() )}')
print(f'norm(xx) = {norm2(xx)}')
print(f'norm(x) = {norm2(x)}')

# Apply inverse and check
y = A_inv @ x
error = np.linalg.norm( (b - y).toarray())
assert np.linalg.norm( (b - y).toarray() ) < tol
error = norm2(b-y)
assert error < tol

#===============================================================================
# SCRIPT FUNCTIONALITY
Expand Down