Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/FormatCheck.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,5 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: julia-actions/julia-format@v4
with:
version: '1'
3 changes: 2 additions & 1 deletion docs/src/basics/diagnostics_api.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ cache.timer
Let's try for some other solver:

```@example diagnostics_example
cache = NLS.init(prob, NLS.DFSane(); show_trace = Val(true), trace_level = NLS.TraceMinimal(50));
cache = NLS.init(
prob, NLS.DFSane(); show_trace = Val(true), trace_level = NLS.TraceMinimal(50));
NLS.solve!(cache)
cache.timer
```
Expand Down
24 changes: 16 additions & 8 deletions docs/src/tutorials/large_systems.md
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,8 @@ nothing # hide
import SparseConnectivityTracer

prob_brusselator_2d_autosparse = NLS.NonlinearProblem(
NLS.NonlinearFunction(brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()),
NLS.NonlinearFunction(
brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()),
u0, p; abstol = 1e-10, reltol = 1e-10
)

Expand Down Expand Up @@ -186,7 +187,8 @@ import ADTypes

f! = (du, u) -> brusselator_2d_loop(du, u, p)
du0 = similar(u0)
jac_sparsity = ADTypes.jacobian_sparsity(f!, du0, u0, SparseConnectivityTracer.TracerSparsityDetector())
jac_sparsity = ADTypes.jacobian_sparsity(
f!, du0, u0, SparseConnectivityTracer.TracerSparsityDetector())
```

Notice that Julia gives a nice print out of the sparsity pattern. That's neat, and would be
Expand All @@ -207,7 +209,8 @@ Now let's see how the version with sparsity compares to the version without:
```@example ill_conditioned_nlprob
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d, NLS.NewtonRaphson());
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, NLS.NewtonRaphson());
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, NLS.NewtonRaphson(linsolve = LS.KLUFactorization()));
BenchmarkTools.@btime NLS.solve(
prob_brusselator_2d_sparse, NLS.NewtonRaphson(linsolve = LS.KLUFactorization()));
nothing # hide
```

Expand All @@ -223,7 +226,8 @@ Krylov method. To swap the linear solver out, we use the `linsolve` command and
GMRES linear solver.

```@example ill_conditioned_nlprob
BenchmarkTools.@btime NLS.solve(prob_brusselator_2d, NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES()));
BenchmarkTools.@btime NLS.solve(
prob_brusselator_2d, NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES()));
nothing # hide
```

Expand Down Expand Up @@ -255,7 +259,8 @@ import IncompleteLU
incompletelu(W, p = nothing) = IncompleteLU.ilu(W, τ = 50.0), LinearAlgebra.I

BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse,
NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES(precs = incompletelu), concrete_jac = true)
NLS.NewtonRaphson(
linsolve = LS.KrylovJL_GMRES(precs = incompletelu), concrete_jac = true)
);
nothing # hide
```
Expand All @@ -280,7 +285,8 @@ which is more automatic. The setup is very similar to before:
import AlgebraicMultigrid

function algebraicmultigrid(W, p = nothing)
return AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(convert(AbstractMatrix, W))),
return AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(convert(
AbstractMatrix, W))),
LinearAlgebra.I
end

Expand Down Expand Up @@ -324,11 +330,13 @@ import DifferentiationInterface
import SparseConnectivityTracer

prob_brusselator_2d_exact_tracer = NLS.NonlinearProblem(
NLS.NonlinearFunction(brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()),
NLS.NonlinearFunction(
brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()),
u0, p; abstol = 1e-10, reltol = 1e-10)
prob_brusselator_2d_approx_di = NLS.NonlinearProblem(
NLS.NonlinearFunction(brusselator_2d_loop;
sparsity = DifferentiationInterface.DenseSparsityDetector(ADTypes.AutoForwardDiff(); atol = 1e-4)),
sparsity = DifferentiationInterface.DenseSparsityDetector(
ADTypes.AutoForwardDiff(); atol = 1e-4)),
u0, p; abstol = 1e-10, reltol = 1e-10)

BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_exact_tracer, NLS.NewtonRaphson());
Expand Down
3 changes: 2 additions & 1 deletion docs/src/tutorials/nonlinear_solve_gpus.md
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,8 @@ import AMDGPU # For if you have an AMD GPU
import Metal # For if you have a Mac M-series device and want to use the built-in GPU
import OneAPI # For if you have an Intel GPU

KernelAbstractions.@kernel function parallel_nonlinearsolve_kernel!(result, @Const(prob), @Const(alg))
KernelAbstractions.@kernel function parallel_nonlinearsolve_kernel!(
result, @Const(prob), @Const(alg))
i = @index(Global)
prob_i = SciMLBase.remake(prob; p = prob.p[i])
sol = NLS.solve(prob_i, alg)
Expand Down
3 changes: 2 additions & 1 deletion docs/src/tutorials/optimizing_parameterized_ode.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ end

p_init = zeros(4)

nlls_prob = NLS.NonlinearLeastSquaresProblem(loss_function, p_init, vec(reduce(hcat, sol.u)))
nlls_prob = NLS.NonlinearLeastSquaresProblem(
loss_function, p_init, vec(reduce(hcat, sol.u)))
```

Now, we can use any NLLS solver to solve this problem.
Expand Down
2 changes: 1 addition & 1 deletion ext/NonlinearSolveNLSolversExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ function SciMLBase.__solve(
)

fj_scalar = @closure (Jx,
x) -> begin
x) -> begin
return DifferentiationInterface.value_and_derivative(
prob.f, prep, autodiff, x, Constant(prob.p)
)
Expand Down
16 changes: 8 additions & 8 deletions ext/NonlinearSolvePETScExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ function SciMLBase.__solve(
nf = Ref{Int}(0)

f! = @closure (cfx,
cx,
user_ctx) -> begin
cx,
user_ctx) -> begin
nf[] += 1
fx = cfx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cfx; read = false) : cfx
x = cx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cx; write = false) : cx
Expand Down Expand Up @@ -90,9 +90,9 @@ function SciMLBase.__solve(
if J_init isa AbstractSparseMatrix
PJ = PETSc.MatSeqAIJ(J_init)
jac_fn! = @closure (cx,
J,
_,
user_ctx) -> begin
J,
_,
user_ctx) -> begin
njac[] += 1
x = cx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cx; write = false) : cx
if J isa PETSc.AbstractMat
Expand All @@ -110,9 +110,9 @@ function SciMLBase.__solve(
else
PJ = PETSc.MatSeqDense(J_init)
jac_fn! = @closure (cx,
J,
_,
user_ctx) -> begin
J,
_,
user_ctx) -> begin
njac[] += 1
x = cx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cx; write = false) : cx
jac!(J, x)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ using SciMLBase: SciMLBase, IntervalNonlinearProblem
using BracketingNonlinearSolve: Bisection, Brent, Alefeld, Falsi, ITP, Ridder

const DualIntervalNonlinearProblem{T,
V,
P} = IntervalNonlinearProblem{
V,
P} = IntervalNonlinearProblem{
uType, iip, <:Union{<:Dual{T, V, P}, <:AbstractArray{<:Dual{T, V, P}}}
} where {uType, iip}

Expand Down
8 changes: 4 additions & 4 deletions lib/BracketingNonlinearSolve/test/rootfind_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ end

@testset for p in 1.1:0.1:100.0
@test g(p)≈sqrt(p) atol=1e-3 rtol=1e-3
@test ForwardDiff.derivative(g, p)≈1/(2*sqrt(p)) atol=1e-3 rtol=1e-3
@test ForwardDiff.derivative(g, p)≈1 / (2 * sqrt(p)) atol=1e-3 rtol=1e-3
end

t = (p) -> [sqrt(p[2] / p[1])]
Expand All @@ -30,7 +30,7 @@ end
return [sol.u]
end

@test g2(p)≈[sqrt(p[2]/p[1])] atol=1e-3 rtol=1e-3
@test g2(p)≈[sqrt(p[2] / p[1])] atol=1e-3 rtol=1e-3
@test ForwardDiff.jacobian(g2, p)≈ForwardDiff.jacobian(t, p) atol=1e-3 rtol=1e-3

probB = IntervalNonlinearProblem{false}(quadratic_f, (1.0, 2.0), 2.0)
Expand All @@ -50,8 +50,8 @@ end
end

@testitem "Tolerance Tests Interval Methods" setup=[RootfindingTestSnippet] tags=[:core] begin
prob=IntervalNonlinearProblem(quadratic_f, (1.0, 20.0), 2.0)
ϵ=eps(Float64) # least possible tol for all methods
prob = IntervalNonlinearProblem(quadratic_f, (1.0, 20.0), 2.0)
ϵ = eps(Float64) # least possible tol for all methods

@testset for alg in (Bisection(), Falsi(), ITP(), Muller(), nothing)
@testset for abstol in [0.1, 0.01, 0.001, 0.0001, 1e-5, 1e-6]
Expand Down
2 changes: 1 addition & 1 deletion lib/NonlinearSolveBase/src/NonlinearSolveBase.jl
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ include("forward_diff.jl")
@compat(public, (construct_jacobian_cache,))
@compat(public,
(assert_extension_supported_termination_condition,
construct_extension_function_wrapper, construct_extension_jac))
construct_extension_function_wrapper, construct_extension_jac))

export TraceMinimal, TraceWithJacobianConditionNumber, TraceAll

Expand Down
12 changes: 6 additions & 6 deletions lib/NonlinearSolveBase/src/autodiff.jl
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu)
if SciMLBase.has_vjp(prob.f)
if SciMLBase.isinplace(prob)
return @closure (
du, u, p) -> begin
du, u, p) -> begin
resid = Utils.safe_similar(du, length(sol.resid))
prob.f(resid, u, p)
prob.f.vjp(du, resid, u, p)
Expand All @@ -128,15 +128,15 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu)
end
else
return @closure (
u, p) -> begin
u, p) -> begin
resid = prob.f(u, p)
return reshape(2 .* prob.f.vjp(resid, u, p), size(u))
end
end
elseif SciMLBase.has_jac(prob.f)
if SciMLBase.isinplace(prob)
return @closure (
du, u, p) -> begin
du, u, p) -> begin
J = Utils.safe_similar(du, length(sol.resid), length(u))
prob.f.jac(J, u, p)
resid = Utils.safe_similar(du, length(sol.resid))
Expand All @@ -146,7 +146,7 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu)
end
else
return @closure (u,
p) -> begin
p) -> begin
return reshape(2 .* vec(prob.f(u, p))' * prob.f.jac(u, p), size(u))
end
end
Expand All @@ -157,7 +157,7 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu)

if SciMLBase.isinplace(prob)
return @closure (
du, u, p) -> begin
du, u, p) -> begin
resid = Utils.safe_similar(du, length(sol.resid))
prob.f(resid, u, p)
# Using `Constant` lead to dual ordering issues
Expand All @@ -169,7 +169,7 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu)
end
else
return @closure (u,
p) -> begin
p) -> begin
v = prob.f(u, p)
# Using `Constant` lead to dual ordering issues
res = only(DI.pullback(Base.Fix2(prob.f, p), autodiff, u, (v,)))
Expand Down
1 change: 0 additions & 1 deletion lib/NonlinearSolveBase/src/public.jl
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@ for name in (:Norm, :RelNorm, :AbsNorm)
end

for norm_type in (:RelNorm, :AbsNorm), safety in (:Safe, :SafeBest)

struct_name = Symbol(norm_type, safety, :TerminationMode)
supertype_name = Symbol(:Abstract, safety, :NonlinearTerminationMode)

Expand Down
4 changes: 1 addition & 3 deletions lib/NonlinearSolveFirstOrder/src/NonlinearSolveFirstOrder.jl
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ include("forward_diff.jl")
(NonlinearFunction{false, NoSpecialize}((u, p) -> (u .^ 2 .- p)[1:1]), [0.1, 0.0]),
(
NonlinearFunction{false, NoSpecialize}((
u, p) -> vcat(u .* u .- p, u .* u .- p)),
u, p) -> vcat(u .* u .- p, u .* u .- p)),
[0.1, 0.1]
),
(
Expand Down Expand Up @@ -84,12 +84,10 @@ include("forward_diff.jl")
@compile_workload begin
@sync begin
for prob in nonlinear_problems, alg in nlp_algs

Threads.@spawn CommonSolve.solve(prob, alg; abstol = 1e-2, verbose = false)
end

for prob in nlls_problems, alg in nlls_algs

Threads.@spawn CommonSolve.solve(prob, alg; abstol = 1e-2, verbose = false)
end
end
Expand Down
3 changes: 2 additions & 1 deletion lib/NonlinearSolveFirstOrder/src/solve.jl
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,8 @@ function SciMLBase.__init(
stats = NLStats(0, 0, 0, 0, 0), alias_u0 = false, maxiters = 1000,
abstol = nothing, reltol = nothing, maxtime = nothing,
termination_condition = nothing, internalnorm::IN = L2_NORM,
linsolve_kwargs = (;), initializealg = NonlinearSolveBase.NonlinearSolveDefaultInit(), kwargs...
linsolve_kwargs = (;), initializealg = NonlinearSolveBase.NonlinearSolveDefaultInit(),
kwargs...
) where {IN}
@set! alg.autodiff = NonlinearSolveBase.select_jacobian_autodiff(prob, alg.autodiff)
provided_jvp_autodiff = alg.jvp_autodiff !== nothing
Expand Down
6 changes: 2 additions & 4 deletions lib/NonlinearSolveFirstOrder/test/least_squares_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ for linsolve in [nothing, LUFactorization(), KrylovJL_GMRES(), KrylovJL_LSMR()]
vjp_autodiffs = linsolve isa KrylovJL ? [nothing, AutoZygote(), AutoFiniteDiff()] :
[nothing]
for linesearch in linesearches, vjp_autodiff in vjp_autodiffs

push!(solvers, GaussNewton(; linsolve, linesearch, vjp_autodiff))
end
end
Expand All @@ -47,11 +46,10 @@ end
@testitem "General NLLS Solvers" setup=[CoreNLLSTesting] tags=[:core] begin
using LinearAlgebra

nlls_problems=[prob_oop, prob_iip, prob_oop_vjp, prob_iip_vjp]
nlls_problems = [prob_oop, prob_iip, prob_oop_vjp, prob_iip_vjp]

for prob in nlls_problems, solver in solvers

sol=solve(prob, solver; maxiters = 10000, abstol = 1e-6)
sol = solve(prob, solver; maxiters = 10000, abstol = 1e-6)
@test SciMLBase.successful_retcode(sol)
@test norm(sol.resid, 2) < 1e-6
end
Expand Down
Loading
Loading