diff --git a/.github/workflows/FormatCheck.yml b/.github/workflows/FormatCheck.yml index 72dc42de6..abae25248 100644 --- a/.github/workflows/FormatCheck.yml +++ b/.github/workflows/FormatCheck.yml @@ -7,3 +7,5 @@ jobs: runs-on: ubuntu-latest steps: - uses: julia-actions/julia-format@v4 + with: + version: '1' diff --git a/docs/src/basics/diagnostics_api.md b/docs/src/basics/diagnostics_api.md index 716c7faeb..8d9c9ae8f 100644 --- a/docs/src/basics/diagnostics_api.md +++ b/docs/src/basics/diagnostics_api.md @@ -70,7 +70,8 @@ cache.timer Let's try for some other solver: ```@example diagnostics_example -cache = NLS.init(prob, NLS.DFSane(); show_trace = Val(true), trace_level = NLS.TraceMinimal(50)); +cache = NLS.init( + prob, NLS.DFSane(); show_trace = Val(true), trace_level = NLS.TraceMinimal(50)); NLS.solve!(cache) cache.timer ``` diff --git a/docs/src/tutorials/large_systems.md b/docs/src/tutorials/large_systems.md index af9f3e2b6..dbea751b6 100644 --- a/docs/src/tutorials/large_systems.md +++ b/docs/src/tutorials/large_systems.md @@ -145,7 +145,8 @@ nothing # hide import SparseConnectivityTracer prob_brusselator_2d_autosparse = NLS.NonlinearProblem( - NLS.NonlinearFunction(brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()), + NLS.NonlinearFunction( + brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()), u0, p; abstol = 1e-10, reltol = 1e-10 ) @@ -186,7 +187,8 @@ import ADTypes f! = (du, u) -> brusselator_2d_loop(du, u, p) du0 = similar(u0) -jac_sparsity = ADTypes.jacobian_sparsity(f!, du0, u0, SparseConnectivityTracer.TracerSparsityDetector()) +jac_sparsity = ADTypes.jacobian_sparsity( + f!, du0, u0, SparseConnectivityTracer.TracerSparsityDetector()) ``` Notice that Julia gives a nice print out of the sparsity pattern. That's neat, and would be @@ -207,7 +209,8 @@ Now let's see how the version with sparsity compares to the version without: ```@example ill_conditioned_nlprob BenchmarkTools.@btime NLS.solve(prob_brusselator_2d, NLS.NewtonRaphson()); BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, NLS.NewtonRaphson()); -BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, NLS.NewtonRaphson(linsolve = LS.KLUFactorization())); +BenchmarkTools.@btime NLS.solve( + prob_brusselator_2d_sparse, NLS.NewtonRaphson(linsolve = LS.KLUFactorization())); nothing # hide ``` @@ -223,7 +226,8 @@ Krylov method. To swap the linear solver out, we use the `linsolve` command and GMRES linear solver. ```@example ill_conditioned_nlprob -BenchmarkTools.@btime NLS.solve(prob_brusselator_2d, NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES())); +BenchmarkTools.@btime NLS.solve( + prob_brusselator_2d, NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES())); nothing # hide ``` @@ -255,7 +259,8 @@ import IncompleteLU incompletelu(W, p = nothing) = IncompleteLU.ilu(W, τ = 50.0), LinearAlgebra.I BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_sparse, - NLS.NewtonRaphson(linsolve = LS.KrylovJL_GMRES(precs = incompletelu), concrete_jac = true) + NLS.NewtonRaphson( + linsolve = LS.KrylovJL_GMRES(precs = incompletelu), concrete_jac = true) ); nothing # hide ``` @@ -280,7 +285,8 @@ which is more automatic. The setup is very similar to before: import AlgebraicMultigrid function algebraicmultigrid(W, p = nothing) - return AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(convert(AbstractMatrix, W))), + return AlgebraicMultigrid.aspreconditioner(AlgebraicMultigrid.ruge_stuben(convert( + AbstractMatrix, W))), LinearAlgebra.I end @@ -324,11 +330,13 @@ import DifferentiationInterface import SparseConnectivityTracer prob_brusselator_2d_exact_tracer = NLS.NonlinearProblem( - NLS.NonlinearFunction(brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()), + NLS.NonlinearFunction( + brusselator_2d_loop; sparsity = SparseConnectivityTracer.TracerSparsityDetector()), u0, p; abstol = 1e-10, reltol = 1e-10) prob_brusselator_2d_approx_di = NLS.NonlinearProblem( NLS.NonlinearFunction(brusselator_2d_loop; - sparsity = DifferentiationInterface.DenseSparsityDetector(ADTypes.AutoForwardDiff(); atol = 1e-4)), + sparsity = DifferentiationInterface.DenseSparsityDetector( + ADTypes.AutoForwardDiff(); atol = 1e-4)), u0, p; abstol = 1e-10, reltol = 1e-10) BenchmarkTools.@btime NLS.solve(prob_brusselator_2d_exact_tracer, NLS.NewtonRaphson()); diff --git a/docs/src/tutorials/nonlinear_solve_gpus.md b/docs/src/tutorials/nonlinear_solve_gpus.md index 4bc9c1bd5..df2f01b82 100644 --- a/docs/src/tutorials/nonlinear_solve_gpus.md +++ b/docs/src/tutorials/nonlinear_solve_gpus.md @@ -95,7 +95,8 @@ import AMDGPU # For if you have an AMD GPU import Metal # For if you have a Mac M-series device and want to use the built-in GPU import OneAPI # For if you have an Intel GPU -KernelAbstractions.@kernel function parallel_nonlinearsolve_kernel!(result, @Const(prob), @Const(alg)) +KernelAbstractions.@kernel function parallel_nonlinearsolve_kernel!( + result, @Const(prob), @Const(alg)) i = @index(Global) prob_i = SciMLBase.remake(prob; p = prob.p[i]) sol = NLS.solve(prob_i, alg) diff --git a/docs/src/tutorials/optimizing_parameterized_ode.md b/docs/src/tutorials/optimizing_parameterized_ode.md index 3c34a7df2..cf43c7c08 100644 --- a/docs/src/tutorials/optimizing_parameterized_ode.md +++ b/docs/src/tutorials/optimizing_parameterized_ode.md @@ -47,7 +47,8 @@ end p_init = zeros(4) -nlls_prob = NLS.NonlinearLeastSquaresProblem(loss_function, p_init, vec(reduce(hcat, sol.u))) +nlls_prob = NLS.NonlinearLeastSquaresProblem( + loss_function, p_init, vec(reduce(hcat, sol.u))) ``` Now, we can use any NLLS solver to solve this problem. diff --git a/ext/NonlinearSolveNLSolversExt.jl b/ext/NonlinearSolveNLSolversExt.jl index b72ffbec3..fe1b0e433 100644 --- a/ext/NonlinearSolveNLSolversExt.jl +++ b/ext/NonlinearSolveNLSolversExt.jl @@ -34,7 +34,7 @@ function SciMLBase.__solve( ) fj_scalar = @closure (Jx, - x) -> begin + x) -> begin return DifferentiationInterface.value_and_derivative( prob.f, prep, autodiff, x, Constant(prob.p) ) diff --git a/ext/NonlinearSolvePETScExt.jl b/ext/NonlinearSolvePETScExt.jl index 0f8aa2bea..1210539e6 100644 --- a/ext/NonlinearSolvePETScExt.jl +++ b/ext/NonlinearSolvePETScExt.jl @@ -50,8 +50,8 @@ function SciMLBase.__solve( nf = Ref{Int}(0) f! = @closure (cfx, - cx, - user_ctx) -> begin + cx, + user_ctx) -> begin nf[] += 1 fx = cfx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cfx; read = false) : cfx x = cx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cx; write = false) : cx @@ -90,9 +90,9 @@ function SciMLBase.__solve( if J_init isa AbstractSparseMatrix PJ = PETSc.MatSeqAIJ(J_init) jac_fn! = @closure (cx, - J, - _, - user_ctx) -> begin + J, + _, + user_ctx) -> begin njac[] += 1 x = cx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cx; write = false) : cx if J isa PETSc.AbstractMat @@ -110,9 +110,9 @@ function SciMLBase.__solve( else PJ = PETSc.MatSeqDense(J_init) jac_fn! = @closure (cx, - J, - _, - user_ctx) -> begin + J, + _, + user_ctx) -> begin njac[] += 1 x = cx isa Ptr{Nothing} ? PETSc.unsafe_localarray(T, cx; write = false) : cx jac!(J, x) diff --git a/lib/BracketingNonlinearSolve/ext/BracketingNonlinearSolveForwardDiffExt.jl b/lib/BracketingNonlinearSolve/ext/BracketingNonlinearSolveForwardDiffExt.jl index 1d3dd326c..6c7b6b2b5 100644 --- a/lib/BracketingNonlinearSolve/ext/BracketingNonlinearSolveForwardDiffExt.jl +++ b/lib/BracketingNonlinearSolve/ext/BracketingNonlinearSolveForwardDiffExt.jl @@ -8,8 +8,8 @@ using SciMLBase: SciMLBase, IntervalNonlinearProblem using BracketingNonlinearSolve: Bisection, Brent, Alefeld, Falsi, ITP, Ridder const DualIntervalNonlinearProblem{T, - V, - P} = IntervalNonlinearProblem{ +V, +P} = IntervalNonlinearProblem{ uType, iip, <:Union{<:Dual{T, V, P}, <:AbstractArray{<:Dual{T, V, P}}} } where {uType, iip} diff --git a/lib/BracketingNonlinearSolve/test/rootfind_tests.jl b/lib/BracketingNonlinearSolve/test/rootfind_tests.jl index 8ccd3516e..e32c37df0 100644 --- a/lib/BracketingNonlinearSolve/test/rootfind_tests.jl +++ b/lib/BracketingNonlinearSolve/test/rootfind_tests.jl @@ -18,7 +18,7 @@ end @testset for p in 1.1:0.1:100.0 @test g(p)≈sqrt(p) atol=1e-3 rtol=1e-3 - @test ForwardDiff.derivative(g, p)≈1/(2*sqrt(p)) atol=1e-3 rtol=1e-3 + @test ForwardDiff.derivative(g, p)≈1 / (2 * sqrt(p)) atol=1e-3 rtol=1e-3 end t = (p) -> [sqrt(p[2] / p[1])] @@ -30,7 +30,7 @@ end return [sol.u] end - @test g2(p)≈[sqrt(p[2]/p[1])] atol=1e-3 rtol=1e-3 + @test g2(p)≈[sqrt(p[2] / p[1])] atol=1e-3 rtol=1e-3 @test ForwardDiff.jacobian(g2, p)≈ForwardDiff.jacobian(t, p) atol=1e-3 rtol=1e-3 probB = IntervalNonlinearProblem{false}(quadratic_f, (1.0, 2.0), 2.0) @@ -50,8 +50,8 @@ end end @testitem "Tolerance Tests Interval Methods" setup=[RootfindingTestSnippet] tags=[:core] begin - prob=IntervalNonlinearProblem(quadratic_f, (1.0, 20.0), 2.0) - ϵ=eps(Float64) # least possible tol for all methods + prob = IntervalNonlinearProblem(quadratic_f, (1.0, 20.0), 2.0) + ϵ = eps(Float64) # least possible tol for all methods @testset for alg in (Bisection(), Falsi(), ITP(), Muller(), nothing) @testset for abstol in [0.1, 0.01, 0.001, 0.0001, 1e-5, 1e-6] diff --git a/lib/NonlinearSolveBase/src/NonlinearSolveBase.jl b/lib/NonlinearSolveBase/src/NonlinearSolveBase.jl index e3efc9ef5..90bd8f4d6 100644 --- a/lib/NonlinearSolveBase/src/NonlinearSolveBase.jl +++ b/lib/NonlinearSolveBase/src/NonlinearSolveBase.jl @@ -71,7 +71,7 @@ include("forward_diff.jl") @compat(public, (construct_jacobian_cache,)) @compat(public, (assert_extension_supported_termination_condition, - construct_extension_function_wrapper, construct_extension_jac)) + construct_extension_function_wrapper, construct_extension_jac)) export TraceMinimal, TraceWithJacobianConditionNumber, TraceAll diff --git a/lib/NonlinearSolveBase/src/autodiff.jl b/lib/NonlinearSolveBase/src/autodiff.jl index b09992c1b..865aa18d9 100644 --- a/lib/NonlinearSolveBase/src/autodiff.jl +++ b/lib/NonlinearSolveBase/src/autodiff.jl @@ -119,7 +119,7 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu) if SciMLBase.has_vjp(prob.f) if SciMLBase.isinplace(prob) return @closure ( - du, u, p) -> begin + du, u, p) -> begin resid = Utils.safe_similar(du, length(sol.resid)) prob.f(resid, u, p) prob.f.vjp(du, resid, u, p) @@ -128,7 +128,7 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu) end else return @closure ( - u, p) -> begin + u, p) -> begin resid = prob.f(u, p) return reshape(2 .* prob.f.vjp(resid, u, p), size(u)) end @@ -136,7 +136,7 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu) elseif SciMLBase.has_jac(prob.f) if SciMLBase.isinplace(prob) return @closure ( - du, u, p) -> begin + du, u, p) -> begin J = Utils.safe_similar(du, length(sol.resid), length(u)) prob.f.jac(J, u, p) resid = Utils.safe_similar(du, length(sol.resid)) @@ -146,7 +146,7 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu) end else return @closure (u, - p) -> begin + p) -> begin return reshape(2 .* vec(prob.f(u, p))' * prob.f.jac(u, p), size(u)) end end @@ -157,7 +157,7 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu) if SciMLBase.isinplace(prob) return @closure ( - du, u, p) -> begin + du, u, p) -> begin resid = Utils.safe_similar(du, length(sol.resid)) prob.f(resid, u, p) # Using `Constant` lead to dual ordering issues @@ -169,7 +169,7 @@ function nlls_generate_vjp_function(prob::NonlinearLeastSquaresProblem, sol, uu) end else return @closure (u, - p) -> begin + p) -> begin v = prob.f(u, p) # Using `Constant` lead to dual ordering issues res = only(DI.pullback(Base.Fix2(prob.f, p), autodiff, u, (v,))) diff --git a/lib/NonlinearSolveBase/src/public.jl b/lib/NonlinearSolveBase/src/public.jl index 506f37527..a9bae2a5e 100644 --- a/lib/NonlinearSolveBase/src/public.jl +++ b/lib/NonlinearSolveBase/src/public.jl @@ -87,7 +87,6 @@ for name in (:Norm, :RelNorm, :AbsNorm) end for norm_type in (:RelNorm, :AbsNorm), safety in (:Safe, :SafeBest) - struct_name = Symbol(norm_type, safety, :TerminationMode) supertype_name = Symbol(:Abstract, safety, :NonlinearTerminationMode) diff --git a/lib/NonlinearSolveFirstOrder/src/NonlinearSolveFirstOrder.jl b/lib/NonlinearSolveFirstOrder/src/NonlinearSolveFirstOrder.jl index 57a1f0105..8eaef589c 100644 --- a/lib/NonlinearSolveFirstOrder/src/NonlinearSolveFirstOrder.jl +++ b/lib/NonlinearSolveFirstOrder/src/NonlinearSolveFirstOrder.jl @@ -56,7 +56,7 @@ include("forward_diff.jl") (NonlinearFunction{false, NoSpecialize}((u, p) -> (u .^ 2 .- p)[1:1]), [0.1, 0.0]), ( NonlinearFunction{false, NoSpecialize}(( - u, p) -> vcat(u .* u .- p, u .* u .- p)), + u, p) -> vcat(u .* u .- p, u .* u .- p)), [0.1, 0.1] ), ( @@ -84,12 +84,10 @@ include("forward_diff.jl") @compile_workload begin @sync begin for prob in nonlinear_problems, alg in nlp_algs - Threads.@spawn CommonSolve.solve(prob, alg; abstol = 1e-2, verbose = false) end for prob in nlls_problems, alg in nlls_algs - Threads.@spawn CommonSolve.solve(prob, alg; abstol = 1e-2, verbose = false) end end diff --git a/lib/NonlinearSolveFirstOrder/src/solve.jl b/lib/NonlinearSolveFirstOrder/src/solve.jl index a23e57ac2..9f0ea88a8 100644 --- a/lib/NonlinearSolveFirstOrder/src/solve.jl +++ b/lib/NonlinearSolveFirstOrder/src/solve.jl @@ -125,7 +125,8 @@ function SciMLBase.__init( stats = NLStats(0, 0, 0, 0, 0), alias_u0 = false, maxiters = 1000, abstol = nothing, reltol = nothing, maxtime = nothing, termination_condition = nothing, internalnorm::IN = L2_NORM, - linsolve_kwargs = (;), initializealg = NonlinearSolveBase.NonlinearSolveDefaultInit(), kwargs... + linsolve_kwargs = (;), initializealg = NonlinearSolveBase.NonlinearSolveDefaultInit(), + kwargs... ) where {IN} @set! alg.autodiff = NonlinearSolveBase.select_jacobian_autodiff(prob, alg.autodiff) provided_jvp_autodiff = alg.jvp_autodiff !== nothing diff --git a/lib/NonlinearSolveFirstOrder/test/least_squares_tests.jl b/lib/NonlinearSolveFirstOrder/test/least_squares_tests.jl index 7d41da88b..bf6ed48c8 100644 --- a/lib/NonlinearSolveFirstOrder/test/least_squares_tests.jl +++ b/lib/NonlinearSolveFirstOrder/test/least_squares_tests.jl @@ -20,7 +20,6 @@ for linsolve in [nothing, LUFactorization(), KrylovJL_GMRES(), KrylovJL_LSMR()] vjp_autodiffs = linsolve isa KrylovJL ? [nothing, AutoZygote(), AutoFiniteDiff()] : [nothing] for linesearch in linesearches, vjp_autodiff in vjp_autodiffs - push!(solvers, GaussNewton(; linsolve, linesearch, vjp_autodiff)) end end @@ -47,11 +46,10 @@ end @testitem "General NLLS Solvers" setup=[CoreNLLSTesting] tags=[:core] begin using LinearAlgebra - nlls_problems=[prob_oop, prob_iip, prob_oop_vjp, prob_iip_vjp] + nlls_problems = [prob_oop, prob_iip, prob_oop_vjp, prob_iip_vjp] for prob in nlls_problems, solver in solvers - - sol=solve(prob, solver; maxiters = 10000, abstol = 1e-6) + sol = solve(prob, solver; maxiters = 10000, abstol = 1e-6) @test SciMLBase.successful_retcode(sol) @test norm(sol.resid, 2) < 1e-6 end diff --git a/lib/NonlinearSolveFirstOrder/test/rootfind_tests.jl b/lib/NonlinearSolveFirstOrder/test/rootfind_tests.jl index a1c1968fb..491987c55 100644 --- a/lib/NonlinearSolveFirstOrder/test/rootfind_tests.jl +++ b/lib/NonlinearSolveFirstOrder/test/rootfind_tests.jl @@ -16,10 +16,10 @@ end using Enzyme end - u0s=([1.0, 1.0], @SVector[1.0, 1.0], 1.0) + u0s = ([1.0, 1.0], @SVector[1.0, 1.0], 1.0) # Filter autodiff backends based on Julia version - autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end @@ -57,8 +57,8 @@ end KrylovJL_GMRES(; precs = (A, p = nothing) -> ( - Diagonal(randn!(similar(A, size(A, 1)))), LinearAlgebra.I - ) + Diagonal(randn!(similar(A, size(A, 1)))), LinearAlgebra.I + ) ) ), (Val(false), \) @@ -83,7 +83,7 @@ end end @testitem "NewtonRaphson: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p=range(0.01, 2, length = 200) + p = range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, NewtonRaphson()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, NewtonRaphson()) ≈ sqrt.(p) end @@ -91,9 +91,7 @@ end @testitem "NewtonRaphson Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in - TERMINATION_CONDITIONS - + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, NewtonRaphson(); termination_condition) @@ -114,13 +112,13 @@ end using Enzyme end - preconditioners=[ - (u0)->nothing, - u0->((args...)->(Diagonal(rand!(similar(u0))), nothing)) + preconditioners = [ + (u0) -> nothing, + u0 -> ((args...) -> (Diagonal(rand!(similar(u0))), nothing)) ] # Filter autodiff backends based on Julia version - autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end @@ -152,8 +150,8 @@ end KrylovJL_GMRES(; precs = (A, p = nothing) -> ( - Diagonal(randn!(similar(A, size(A, 1)))), LinearAlgebra.I - ) + Diagonal(randn!(similar(A, size(A, 1)))), LinearAlgebra.I + ) ) ), (Val(false), \) @@ -176,7 +174,7 @@ end end @testitem "PseudoTransient: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p=range(0.01, 2, length = 200) + p = range(0.01, 2, length = 200) @test nlprob_iterator_interface( quadratic_f, p, false, PseudoTransient(; alpha_initial = 10.0) ) ≈ sqrt.(p) @@ -188,9 +186,7 @@ end @testitem "PseudoTransient Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in - TERMINATION_CONDITIONS - + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, PseudoTransient(); termination_condition) @@ -211,14 +207,14 @@ end using Enzyme end - radius_update_schemes=[ + radius_update_schemes = [ RadiusUpdateSchemes.Simple, RadiusUpdateSchemes.NocedalWright, RadiusUpdateSchemes.NLsolve, RadiusUpdateSchemes.Hei, RadiusUpdateSchemes.Yuan, RadiusUpdateSchemes.Fan, RadiusUpdateSchemes.Bastin ] # Filter autodiff backends based on Julia version - autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end @@ -258,52 +254,52 @@ end end @testitem "TrustRegion: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p=range(0.01, 2, length = 200) + p = range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, TrustRegion()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, TrustRegion()) ≈ sqrt.(p) end @testitem "TrustRegion NewtonRaphson Fails" setup=[CoreRootfindTesting] tags=[:core] begin - u0=[-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] - p=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - sol=solve_oop(newton_fails, u0, p; solver = TrustRegion()) + u0 = [-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] + p = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + sol = solve_oop(newton_fails, u0, p; solver = TrustRegion()) @test SciMLBase.successful_retcode(sol) @test all(abs.(newton_fails(sol.u, p)) .< 1e-9) end @testitem "TrustRegion: Kwargs" setup=[CoreRootfindTesting] tags=[:core] begin - max_trust_radius=[10.0, 100.0, 1000.0] - initial_trust_radius=[10.0, 1.0, 0.1] - step_threshold=[0.0, 0.01, 0.25] - shrink_threshold=[0.25, 0.3, 0.5] - expand_threshold=[0.5, 0.8, 0.9] - shrink_factor=[0.1, 0.3, 0.5] - expand_factor=[1.5, 2.0, 3.0] - max_shrink_times=[10, 20, 30] - - list_of_options=zip( + max_trust_radius = [10.0, 100.0, 1000.0] + initial_trust_radius = [10.0, 1.0, 0.1] + step_threshold = [0.0, 0.01, 0.25] + shrink_threshold = [0.25, 0.3, 0.5] + expand_threshold = [0.5, 0.8, 0.9] + shrink_factor = [0.1, 0.3, 0.5] + expand_factor = [1.5, 2.0, 3.0] + max_shrink_times = [10, 20, 30] + + list_of_options = zip( max_trust_radius, initial_trust_radius, step_threshold, shrink_threshold, expand_threshold, shrink_factor, expand_factor, max_shrink_times ) for options in list_of_options - alg=TrustRegion(; + alg = TrustRegion(; max_trust_radius = options[1], initial_trust_radius = options[2], step_threshold = options[3], shrink_threshold = options[4], expand_threshold = options[5], shrink_factor = options[6], expand_factor = options[7], max_shrink_times = options[8] ) - sol=solve_oop(quadratic_f, [1.0, 1.0], 2.0; solver = alg) + sol = solve_oop(quadratic_f, [1.0, 1.0], 2.0; solver = alg) @test SciMLBase.successful_retcode(sol) - err=maximum(abs, quadratic_f(sol.u, 2.0)) + err = maximum(abs, quadratic_f(sol.u, 2.0)) @test err < 1e-9 end end @testitem "TrustRegion OOP / IIP Consistency" setup=[CoreRootfindTesting] tags=[:core] begin - maxiterations=[2, 3, 4, 5] - u0=[1.0, 1.0] + maxiterations = [2, 3, 4, 5] + u0 = [1.0, 1.0] @testset for radius_update_scheme in [ RadiusUpdateSchemes.Simple, RadiusUpdateSchemes.NocedalWright, RadiusUpdateSchemes.NLsolve, RadiusUpdateSchemes.Hei, @@ -321,9 +317,7 @@ end @testitem "TrustRegion Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in - TERMINATION_CONDITIONS - + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, TrustRegion(); termination_condition) @@ -345,7 +339,7 @@ end end # Filter autodiff backends based on Julia version - autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end @@ -390,15 +384,15 @@ end end @testitem "LevenbergMarquardt NewtonRaphson Fails" setup=[CoreRootfindTesting] tags=[:core] begin - u0=[-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] - p=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - sol=solve_oop(newton_fails, u0, p; solver = LevenbergMarquardt()) + u0 = [-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] + p = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + sol = solve_oop(newton_fails, u0, p; solver = LevenbergMarquardt()) @test SciMLBase.successful_retcode(sol) @test all(abs.(newton_fails(sol.u, p)) .< 1e-9) end @testitem "LevenbergMarquardt: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p=range(0.01, 2, length = 200) + p = range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, LevenbergMarquardt()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, LevenbergMarquardt()) ≈ sqrt.(p) end @@ -406,9 +400,7 @@ end @testitem "LevenbergMarquardt Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in - TERMINATION_CONDITIONS - + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, LevenbergMarquardt(); termination_condition) @@ -419,29 +411,29 @@ end end @testitem "LevenbergMarquardt: Kwargs" setup=[CoreRootfindTesting] tags=[:core] begin - damping_initial=[0.5, 2.0, 5.0] - damping_increase_factor=[1.5, 3.0, 10.0] - damping_decrease_factor=Float64[2, 5, 10.0] - finite_diff_step_geodesic=[0.02, 0.2, 0.3] - α_geodesic=[0.6, 0.8, 0.9] - b_uphill=Float64[0, 1, 2] - min_damping_D=[1e-12, 1e-9, 1e-4] - - list_of_options=zip( + damping_initial = [0.5, 2.0, 5.0] + damping_increase_factor = [1.5, 3.0, 10.0] + damping_decrease_factor = Float64[2, 5, 10.0] + finite_diff_step_geodesic = [0.02, 0.2, 0.3] + α_geodesic = [0.6, 0.8, 0.9] + b_uphill = Float64[0, 1, 2] + min_damping_D = [1e-12, 1e-9, 1e-4] + + list_of_options = zip( damping_initial, damping_increase_factor, damping_decrease_factor, finite_diff_step_geodesic, α_geodesic, b_uphill, min_damping_D ) for options in list_of_options - alg=LevenbergMarquardt(; + alg = LevenbergMarquardt(; damping_initial = options[1], damping_increase_factor = options[2], damping_decrease_factor = options[3], finite_diff_step_geodesic = options[4], α_geodesic = options[5], b_uphill = options[6], min_damping_D = options[7] ) - sol=solve_oop(quadratic_f, [1.0, 1.0], 2.0; solver = alg, maxiters = 10000) + sol = solve_oop(quadratic_f, [1.0, 1.0], 2.0; solver = alg, maxiters = 10000) @test SciMLBase.successful_retcode(sol) - err=maximum(abs, quadratic_f(sol.u, 2.0)) + err = maximum(abs, quadratic_f(sol.u, 2.0)) @test err < 1e-9 end end @@ -450,7 +442,7 @@ end using ADTypes, SparseConnectivityTracer, SparseMatrixColorings # Filter autodiff backends based on Julia version - autodiff_backends=[AutoForwardDiff(), AutoFiniteDiff(), AutoZygote()] + autodiff_backends = [AutoForwardDiff(), AutoFiniteDiff(), AutoZygote()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end @@ -480,51 +472,51 @@ end using LinearAlgebra, LinearSolve, ADTypes function F(u::Vector{Float64}, p::Vector{Float64}) - Δ=Tridiagonal(-ones(99), 2*ones(100), -ones(99)) - return u+0.1*u .* Δ*u-p + Δ = Tridiagonal(-ones(99), 2 * ones(100), -ones(99)) + return u + 0.1 * u .* Δ * u - p end function F!(du::Vector{Float64}, u::Vector{Float64}, p::Vector{Float64}) - Δ=Tridiagonal(-ones(99), 2*ones(100), -ones(99)) - du.=u+0.1*u .* Δ*u-p + Δ = Tridiagonal(-ones(99), 2 * ones(100), -ones(99)) + du .= u + 0.1 * u .* Δ * u - p return nothing end function JVP(v::Vector{Float64}, u::Vector{Float64}, p::Vector{Float64}) - Δ=Tridiagonal(-ones(99), 2*ones(100), -ones(99)) - return v+0.1*(u .* Δ*v+v .* Δ*u) + Δ = Tridiagonal(-ones(99), 2 * ones(100), -ones(99)) + return v + 0.1 * (u .* Δ * v + v .* Δ * u) end function JVP!( du::Vector{Float64}, v::Vector{Float64}, u::Vector{Float64}, p::Vector{Float64}) - Δ=Tridiagonal(-ones(99), 2*ones(100), -ones(99)) - du.=v+0.1*(u .* Δ*v+v .* Δ*u) + Δ = Tridiagonal(-ones(99), 2 * ones(100), -ones(99)) + du .= v + 0.1 * (u .* Δ * v + v .* Δ * u) return nothing end - u0=rand(100) + u0 = rand(100) - prob=NonlinearProblem(NonlinearFunction{false}(F; jvp = JVP), u0, u0) - sol=solve(prob, NewtonRaphson(; linsolve = KrylovJL_GMRES()); abstol = 1e-13) - err=maximum(abs, sol.resid) + prob = NonlinearProblem(NonlinearFunction{false}(F; jvp = JVP), u0, u0) + sol = solve(prob, NewtonRaphson(; linsolve = KrylovJL_GMRES()); abstol = 1e-13) + err = maximum(abs, sol.resid) @test err < 1e-6 - sol=solve( + sol = solve( prob, TrustRegion(; linsolve = KrylovJL_GMRES(), vjp_autodiff = AutoFiniteDiff()); abstol = 1e-13 ) - err=maximum(abs, sol.resid) + err = maximum(abs, sol.resid) @test err < 1e-6 - prob=NonlinearProblem(NonlinearFunction{true}(F!; jvp = JVP!), u0, u0) - sol=solve(prob, NewtonRaphson(; linsolve = KrylovJL_GMRES()); abstol = 1e-13) - err=maximum(abs, sol.resid) + prob = NonlinearProblem(NonlinearFunction{true}(F!; jvp = JVP!), u0, u0) + sol = solve(prob, NewtonRaphson(; linsolve = KrylovJL_GMRES()); abstol = 1e-13) + err = maximum(abs, sol.resid) @test err < 1e-6 - sol=solve( + sol = solve( prob, TrustRegion(; linsolve = KrylovJL_GMRES(), vjp_autodiff = AutoFiniteDiff()); abstol = 1e-13 ) - err=maximum(abs, sol.resid) + err = maximum(abs, sol.resid) @test err < 1e-6 end diff --git a/lib/NonlinearSolveHomotopyContinuation/src/jacobian_handling.jl b/lib/NonlinearSolveHomotopyContinuation/src/jacobian_handling.jl index 5165b62ba..8143f1ecf 100644 --- a/lib/NonlinearSolveHomotopyContinuation/src/jacobian_handling.jl +++ b/lib/NonlinearSolveHomotopyContinuation/src/jacobian_handling.jl @@ -111,7 +111,7 @@ and `p` the parameter object. The returned function must have the signature required by `HomotopySystemWrapper`. """ -function construct_jacobian(f::F, autodiff, variant, u0, p) where F +function construct_jacobian(f::F, autodiff, variant, u0, p) where {F} if variant == Scalar tmp = reinterpret(Float64, Vector{ComplexF64}(undef, 1)) else @@ -182,7 +182,7 @@ end Construct an `EnzymeJacobian` function. """ -function construct_jacobian(f::F, autodiff::AutoEnzyme, variant, u0, p) where F +function construct_jacobian(f::F, autodiff::AutoEnzyme, variant, u0, p) where {F} if variant == Scalar prep = DI.prepare_derivative(f, autodiff, u0, DI.Constant(p), strict = Val(false)) else diff --git a/lib/NonlinearSolveQuasiNewton/src/NonlinearSolveQuasiNewton.jl b/lib/NonlinearSolveQuasiNewton/src/NonlinearSolveQuasiNewton.jl index 167f1fa85..02bbc865f 100644 --- a/lib/NonlinearSolveQuasiNewton/src/NonlinearSolveQuasiNewton.jl +++ b/lib/NonlinearSolveQuasiNewton/src/NonlinearSolveQuasiNewton.jl @@ -50,7 +50,6 @@ include("solve.jl") @compile_workload begin @sync for prob in nonlinear_problems, alg in algs - Threads.@spawn CommonSolve.solve(prob, alg; abstol = 1e-2, verbose = false) end end diff --git a/lib/NonlinearSolveQuasiNewton/test/core_tests.jl b/lib/NonlinearSolveQuasiNewton/test/core_tests.jl index 367e23246..a0cee1a90 100644 --- a/lib/NonlinearSolveQuasiNewton/test/core_tests.jl +++ b/lib/NonlinearSolveQuasiNewton/test/core_tests.jl @@ -16,10 +16,10 @@ end using Enzyme end - u0s=([1.0, 1.0], @SVector[1.0, 1.0], 1.0) + u0s = ([1.0, 1.0], @SVector[1.0, 1.0], 1.0) # Filter autodiff backends based on Julia version - autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end @@ -72,7 +72,7 @@ end end @testitem "Broyden: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p=range(0.01, 2, length = 200) + p = range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, Broyden()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, Broyden()) ≈ sqrt.(p) end @@ -80,9 +80,7 @@ end @testitem "Broyden Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in - TERMINATION_CONDITIONS - + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, Broyden(); termination_condition) @@ -105,7 +103,7 @@ end end # Filter autodiff backends based on Julia version - autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end @@ -158,7 +156,7 @@ end end @testitem "Klement: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p=range(0.01, 2, length = 200) + p = range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, Klement()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, Klement()) ≈ sqrt.(p) end @@ -166,9 +164,7 @@ end @testitem "Klement Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in - TERMINATION_CONDITIONS - + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, Klement(); termination_condition) @@ -191,7 +187,7 @@ end end # Filter autodiff backends based on Julia version - autodiff_backends=[AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] + autodiff_backends = [AutoForwardDiff(), AutoZygote(), AutoFiniteDiff()] if isempty(VERSION.prerelease) push!(autodiff_backends, AutoEnzyme()) end @@ -244,7 +240,7 @@ end end @testitem "LimitedMemoryBroyden: Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p=range(0.01, 2, length = 200) + p = range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, LimitedMemoryBroyden()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, LimitedMemoryBroyden()) ≈ @@ -254,9 +250,7 @@ end @testitem "LimitedMemoryBroyden Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in - TERMINATION_CONDITIONS - + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, LimitedMemoryBroyden(); termination_condition) diff --git a/lib/NonlinearSolveSciPy/src/NonlinearSolveSciPy.jl b/lib/NonlinearSolveSciPy/src/NonlinearSolveSciPy.jl index 5d7165722..262c27e03 100644 --- a/lib/NonlinearSolveSciPy/src/NonlinearSolveSciPy.jl +++ b/lib/NonlinearSolveSciPy/src/NonlinearSolveSciPy.jl @@ -94,7 +94,7 @@ end """ Internal: wrap a Julia residual function into a Python callable """ -function _make_py_residual(f::F, p) where F +function _make_py_residual(f::F, p) where {F} return pyfunc(x_py -> begin x = Vector{Float64}(x_py) r = f(x, p) @@ -105,7 +105,7 @@ end """ Internal: wrap a Julia scalar function into a Python callable """ -function _make_py_scalar(f::F, p) where F +function _make_py_scalar(f::F, p) where {F} return pyfunc(x_py -> begin x = Float64(x_py) return f(x, p) @@ -182,13 +182,11 @@ function SciMLBase.__solve(prob::SciMLBase.NonlinearProblem, alg::SciPyRoot; nfev = try Int(res.nfev) catch - ; 0 end niter = try Int(res.nit) catch - ; 0 end stats = SciMLBase.NLStats(nfev, 0, 0, 0, niter) @@ -219,13 +217,11 @@ function SciMLBase.__solve(prob::SciMLBase.IntervalNonlinearProblem, alg::SciPyR nfev = try Int(res.function_calls) catch - ; 0 end niter = try Int(res.iterations) catch - ; 0 end stats = SciMLBase.NLStats(nfev, 0, 0, 0, niter) diff --git a/lib/NonlinearSolveSciPy/test/wrappers_tests.jl b/lib/NonlinearSolveSciPy/test/wrappers_tests.jl index 8ae9e7e82..6540c0cb8 100644 --- a/lib/NonlinearSolveSciPy/test/wrappers_tests.jl +++ b/lib/NonlinearSolveSciPy/test/wrappers_tests.jl @@ -18,8 +18,9 @@ prob = NonlinearLeastSquaresProblem(residuals, x0_ls) sol = solve(prob, SciPyLeastSquaresTRF()) @test SciMLBase.successful_retcode(sol) - prob_bounded = NonlinearLeastSquaresProblem(residuals, x0_ls; lb = [0.0, -2.0], ub = [ - 5.0, 3.0]) + prob_bounded = NonlinearLeastSquaresProblem( + residuals, x0_ls; lb = [0.0, -2.0], ub = [ + 5.0, 3.0]) sol2 = solve(prob_bounded, SciPyLeastSquares(method = "trf")) @test SciMLBase.successful_retcode(sol2) else diff --git a/lib/NonlinearSolveSpectralMethods/src/NonlinearSolveSpectralMethods.jl b/lib/NonlinearSolveSpectralMethods/src/NonlinearSolveSpectralMethods.jl index 93a620761..49949e438 100644 --- a/lib/NonlinearSolveSpectralMethods/src/NonlinearSolveSpectralMethods.jl +++ b/lib/NonlinearSolveSpectralMethods/src/NonlinearSolveSpectralMethods.jl @@ -34,7 +34,6 @@ include("solve.jl") @compile_workload begin @sync for prob in nonlinear_problems, alg in algs - Threads.@spawn CommonSolve.solve(prob, alg; abstol = 1e-2, verbose = false) end end diff --git a/lib/NonlinearSolveSpectralMethods/src/dfsane.jl b/lib/NonlinearSolveSpectralMethods/src/dfsane.jl index cfe8bb2f6..759df6475 100644 --- a/lib/NonlinearSolveSpectralMethods/src/dfsane.jl +++ b/lib/NonlinearSolveSpectralMethods/src/dfsane.jl @@ -22,7 +22,7 @@ function DFSane(; sigma_min = 1 // 10^10, sigma_max = 1e10, sigma_1 = 1, M::Int = 10, gamma = 1 // 10^4, tau_min = 1 // 10, tau_max = 1 // 2, n_exp::Int = 2, max_inner_iterations::Int = 100, eta_strategy::F = ( - fn_1, n, x_n, f_n) -> fn_1 / n^2 + fn_1, n, x_n, f_n) -> fn_1 / n^2 ) where {F} linesearch = RobustNonMonotoneLineSearch(; gamma = gamma, sigma_1 = sigma_1, M, tau_min = tau_min, tau_max = tau_max, diff --git a/lib/NonlinearSolveSpectralMethods/test/core_tests.jl b/lib/NonlinearSolveSpectralMethods/test/core_tests.jl index 9144e4daf..e127d1e16 100644 --- a/lib/NonlinearSolveSpectralMethods/test/core_tests.jl +++ b/lib/NonlinearSolveSpectralMethods/test/core_tests.jl @@ -8,7 +8,7 @@ end using BenchmarkTools: @ballocated using StaticArrays: @SVector - u0s=([1.0, 1.0], @SVector[1.0, 1.0], 1.0) + u0s = ([1.0, 1.0], @SVector[1.0, 1.0], 1.0) @testset "[OOP] u0: $(typeof(u0))" for u0 in u0s sol = solve_oop(quadratic_f, u0; solver = DFSane()) @@ -32,44 +32,44 @@ end end @testitem "DFSane Iterator Interface" setup=[CoreRootfindTesting] tags=[:core] begin - p=range(0.01, 2, length = 200) + p = range(0.01, 2, length = 200) @test nlprob_iterator_interface(quadratic_f, p, false, DFSane()) ≈ sqrt.(p) @test nlprob_iterator_interface(quadratic_f!, p, true, DFSane()) ≈ sqrt.(p) end @testitem "DFSane NewtonRaphson Fails" setup=[CoreRootfindTesting] tags=[:core] begin - u0=[-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] - p=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] - sol=solve_oop(newton_fails, u0, p; solver = DFSane()) + u0 = [-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] + p = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + sol = solve_oop(newton_fails, u0, p; solver = DFSane()) @test SciMLBase.successful_retcode(sol) @test all(abs.(newton_fails(sol.u, p)) .< 1e-9) end @testitem "DFSane: Kwargs" setup=[CoreRootfindTesting] tags=[:core] begin - σ_min=[1e-10, 1e-5, 1e-4] - σ_max=[1e10, 1e5, 1e4] - σ_1=[1.0, 0.5, 2.0] - M=[10, 1, 100] - γ=[1e-4, 1e-3, 1e-5] - τ_min=[0.1, 0.2, 0.3] - τ_max=[0.5, 0.8, 0.9] - nexp=[2, 1, 2] - η_strategy=[ - (f_1, k, x, F)->f_1/k^2, (f_1, k, x, F)->f_1/k^3, - (f_1, k, x, F)->f_1/k^4 + σ_min = [1e-10, 1e-5, 1e-4] + σ_max = [1e10, 1e5, 1e4] + σ_1 = [1.0, 0.5, 2.0] + M = [10, 1, 100] + γ = [1e-4, 1e-3, 1e-5] + τ_min = [0.1, 0.2, 0.3] + τ_max = [0.5, 0.8, 0.9] + nexp = [2, 1, 2] + η_strategy = [ + (f_1, k, x, F) -> f_1 / k^2, (f_1, k, x, F) -> f_1 / k^3, + (f_1, k, x, F) -> f_1 / k^4 ] - list_of_options=zip(σ_min, σ_max, σ_1, M, γ, τ_min, τ_max, nexp, η_strategy) + list_of_options = zip(σ_min, σ_max, σ_1, M, γ, τ_min, τ_max, nexp, η_strategy) for options in list_of_options local probN, sol, alg - alg=DFSane(; + alg = DFSane(; sigma_min = options[1], sigma_max = options[2], sigma_1 = options[3], M = options[4], gamma = options[5], tau_min = options[6], tau_max = options[7], n_exp = options[8], eta_strategy = options[9] ) - probN=NonlinearProblem{false}(quadratic_f, [1.0, 1.0], 2.0) - sol=solve(probN, alg, abstol = 1e-11) + probN = NonlinearProblem{false}(quadratic_f, [1.0, 1.0], 2.0) + sol = solve(probN, alg, abstol = 1e-11) @test all(abs.(quadratic_f(sol.u, 2.0)) .< 1e-6) end end @@ -77,9 +77,7 @@ end @testitem "DFSane Termination Conditions" setup=[CoreRootfindTesting] tags=[:core] begin using StaticArrays: @SVector - @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in - TERMINATION_CONDITIONS - + @testset "TC: $(nameof(typeof(termination_condition)))" for termination_condition in TERMINATION_CONDITIONS @testset "u0: $(typeof(u0))" for u0 in ([1.0, 1.0], 1.0, @SVector([1.0, 1.0])) probN = NonlinearProblem(quadratic_f, u0, 2.0) sol = solve(probN, DFSane(); termination_condition) diff --git a/lib/SCCNonlinearSolve/test/core_tests.jl b/lib/SCCNonlinearSolve/test/core_tests.jl index 06446833b..b0ea35cc7 100644 --- a/lib/SCCNonlinearSolve/test/core_tests.jl +++ b/lib/SCCNonlinearSolve/test/core_tests.jl @@ -7,71 +7,71 @@ end @testitem "Manual SCC" setup=[CoreRootfindTesting] tags=[:core] begin using NonlinearSolveFirstOrder function f(du, u, p) - du[1]=cos(u[2])-u[1] - du[2]=sin(u[1]+u[2])+u[2] - du[3]=2u[4]+u[3]+1.0 - du[4]=u[5]^2+u[4] - du[5]=u[3]^2+u[5] - du[6]=u[1]+u[2]+u[3]+u[4]+u[5]+2.0u[6]+2.5u[7]+1.5u[8] - du[7]=u[1]+u[2]+u[3]+2.0u[4]+u[5]+4.0u[6]-1.5u[7]+1.5u[8] - du[8]=u[1]+2.0u[2]+3.0u[3]+5.0u[4]+6.0u[5]+u[6]-u[7]-u[8] + du[1] = cos(u[2]) - u[1] + du[2] = sin(u[1] + u[2]) + u[2] + du[3] = 2u[4] + u[3] + 1.0 + du[4] = u[5]^2 + u[4] + du[5] = u[3]^2 + u[5] + du[6] = u[1] + u[2] + u[3] + u[4] + u[5] + 2.0u[6] + 2.5u[7] + 1.5u[8] + du[7] = u[1] + u[2] + u[3] + 2.0u[4] + u[5] + 4.0u[6] - 1.5u[7] + 1.5u[8] + du[8] = u[1] + 2.0u[2] + 3.0u[3] + 5.0u[4] + 6.0u[5] + u[6] - u[7] - u[8] end - prob=NonlinearProblem(f, zeros(8)) - sol=solve(prob, NewtonRaphson()) + prob = NonlinearProblem(f, zeros(8)) + sol = solve(prob, NewtonRaphson()) - u0=zeros(2) - p=zeros(3) + u0 = zeros(2) + p = zeros(3) function f1(du, u, p) - du[1]=cos(u[2])-u[1] - du[2]=sin(u[1]+u[2])+u[2] + du[1] = cos(u[2]) - u[1] + du[2] = sin(u[1] + u[2]) + u[2] end - explicitfun1(p, sols)=nothing - prob1=NonlinearProblem( + explicitfun1(p, sols) = nothing + prob1 = NonlinearProblem( NonlinearFunction{true, SciMLBase.NoSpecialize}(f1), zeros(2), p) - sol1=solve(prob1, NewtonRaphson()) + sol1 = solve(prob1, NewtonRaphson()) function f2(du, u, p) - du[1]=2u[2]+u[1]+1.0 - du[2]=u[3]^2+u[2] - du[3]=u[1]^2+u[3] + du[1] = 2u[2] + u[1] + 1.0 + du[2] = u[3]^2 + u[2] + du[3] = u[1]^2 + u[3] end - explicitfun2(p, sols)=nothing - prob2=NonlinearProblem( + explicitfun2(p, sols) = nothing + prob2 = NonlinearProblem( NonlinearFunction{true, SciMLBase.NoSpecialize}(f2), zeros(3), p) - sol2=solve(prob2, NewtonRaphson()) + sol2 = solve(prob2, NewtonRaphson()) # Convert f3 to a LinearProblem since it's linear in u # du = Au + b where A is the coefficient matrix and b is from parameters - A3=[2.0 2.5 1.5; 4.0 -1.5 1.5; 1.0 -1.0 -1.0] - b3=p # b will be updated by explicitfun3 - prob3=LinearProblem(A3, b3, zeros(3)) + A3 = [2.0 2.5 1.5; 4.0 -1.5 1.5; 1.0 -1.0 -1.0] + b3 = p # b will be updated by explicitfun3 + prob3 = LinearProblem(A3, b3, zeros(3)) function explicitfun3(p, sols) - p[1]=-(sols[1][1]+sols[1][2]+sols[2][1]+sols[2][2]+sols[2][3]) - p[2]=-(sols[1][1]+sols[1][2]+sols[2][1]+2.0sols[2][2]+sols[2][3]) - p[3]=-(sols[1][1]+2.0sols[1][2]+3.0sols[2][1]+5.0sols[2][2]+ - 6.0sols[2][3]) + p[1] = -(sols[1][1] + sols[1][2] + sols[2][1] + sols[2][2] + sols[2][3]) + p[2] = -(sols[1][1] + sols[1][2] + sols[2][1] + 2.0sols[2][2] + sols[2][3]) + p[3] = -(sols[1][1] + 2.0sols[1][2] + 3.0sols[2][1] + 5.0sols[2][2] + + 6.0sols[2][3]) end explicitfun3(p, [sol1, sol2]) - sol3=solve(prob3) # LinearProblem uses default linear solver - manualscc=reduce(vcat, (sol1, sol2, sol3)) + sol3 = solve(prob3) # LinearProblem uses default linear solver + manualscc = reduce(vcat, (sol1, sol2, sol3)) - sccprob=SciMLBase.SCCNonlinearProblem((prob1, prob2, prob3), + sccprob = SciMLBase.SCCNonlinearProblem((prob1, prob2, prob3), SciMLBase.Void{Any}.([explicitfun1, explicitfun2, explicitfun3])) # Test with SCCAlg that handles both nonlinear and linear problems using SCCNonlinearSolve - scc_alg=SCCNonlinearSolve.SCCAlg(nlalg = NewtonRaphson(), linalg = nothing) - scc_sol=solve(sccprob, scc_alg) + scc_alg = SCCNonlinearSolve.SCCAlg(nlalg = NewtonRaphson(), linalg = nothing) + scc_sol = solve(sccprob, scc_alg) @test sol ≈ manualscc ≈ scc_sol # Backwards compat of alg choice - scc_sol=solve(sccprob, NewtonRaphson()) + scc_sol = solve(sccprob, NewtonRaphson()) @test sol ≈ manualscc ≈ scc_sol import NonlinearSolve # Required for Default # Test default interface - scc_sol_default=solve(sccprob) + scc_sol_default = solve(sccprob) @test sol ≈ manualscc ≈ scc_sol_default end diff --git a/lib/SciMLJacobianOperators/src/SciMLJacobianOperators.jl b/lib/SciMLJacobianOperators/src/SciMLJacobianOperators.jl index 93a4b4b63..224998242 100644 --- a/lib/SciMLJacobianOperators/src/SciMLJacobianOperators.jl +++ b/lib/SciMLJacobianOperators/src/SciMLJacobianOperators.jl @@ -291,7 +291,7 @@ function prepare_vjp(::Val{false}, prob::AbstractNonlinearProblem, if SciMLBase.isinplace(f) jac_cache = similar(u, eltype(fu), length(fu), length(u)) return @closure ( - vJ, v, u, p) -> begin + vJ, v, u, p) -> begin f.jac(jac_cache, u, p) LinearAlgebra.mul!(vec(vJ), jac_cache', vec(v)) return @@ -311,9 +311,9 @@ function prepare_vjp(::Val{false}, prob::AbstractNonlinearProblem, di_extras = DI.prepare_pullback( f, fu_cache, autodiff, u, (fu,), Constant(prob.p), strict = Val(false)) return @closure (vJ, - v, - u, - p) -> begin + v, + u, + p) -> begin DI.pullback!(f, fu_cache, (reshape(vJ, size(u)),), di_extras, autodiff, u, (reshape(v, size(fu_cache)),), Constant(p)) return @@ -322,8 +322,8 @@ function prepare_vjp(::Val{false}, prob::AbstractNonlinearProblem, di_extras = DI.prepare_pullback( f, autodiff, u, (fu,), Constant(prob.p), strict = Val(false)) return @closure (v, - u, - p) -> begin + u, + p) -> begin return only(DI.pullback( f, di_extras, autodiff, u, (reshape(v, size(fu)),), Constant(p))) end @@ -345,7 +345,7 @@ function prepare_jvp(::Val{false}, prob::AbstractNonlinearProblem, if SciMLBase.isinplace(f) jac_cache = similar(u, eltype(fu), length(fu), length(u)) return @closure ( - Jv, v, u, p) -> begin + Jv, v, u, p) -> begin f.jac(jac_cache, u, p) LinearAlgebra.mul!(vec(Jv), jac_cache, vec(v)) return @@ -364,9 +364,9 @@ function prepare_jvp(::Val{false}, prob::AbstractNonlinearProblem, di_extras = DI.prepare_pushforward( f, fu_cache, autodiff, u, (u,), Constant(prob.p), strict = Val(false)) return @closure (Jv, - v, - u, - p) -> begin + v, + u, + p) -> begin DI.pushforward!(f, fu_cache, (reshape(Jv, size(fu_cache)),), di_extras, autodiff, u, (reshape(v, size(u)),), Constant(p)) return @@ -375,8 +375,8 @@ function prepare_jvp(::Val{false}, prob::AbstractNonlinearProblem, di_extras = DI.prepare_pushforward( f, autodiff, u, (u,), Constant(prob.p), strict = Val(false)) return @closure (v, - u, - p) -> begin + u, + p) -> begin return only(DI.pushforward( f, di_extras, autodiff, u, (reshape(v, size(u)),), Constant(p))) end @@ -405,7 +405,7 @@ function get_dense_ad(ad::AutoSparse) end function Base.copy(J::JacobianOperator{iip, T}) where {iip, T} - return JacobianOperator{iip,T}( + return JacobianOperator{iip, T}( J.mode, J.jvp_op, J.vjp_op, @@ -429,10 +429,8 @@ function Base.copy(J::StatefulJacobianNormalFormOperator{T}) where {T} J.jvp_operator === nothing ? nothing : copy(J.jvp_operator), J.cache === nothing ? nothing : copy(J.cache) ) - end - export JacobianOperator, VecJacOperator, JacVecOperator export StatefulJacobianOperator export StatefulJacobianNormalFormOperator diff --git a/lib/SciMLJacobianOperators/test/core_tests.jl b/lib/SciMLJacobianOperators/test/core_tests.jl index 7160ae0ec..5e2b9719f 100644 --- a/lib/SciMLJacobianOperators/test/core_tests.jl +++ b/lib/SciMLJacobianOperators/test/core_tests.jl @@ -36,11 +36,9 @@ @testset "AutoDiff" begin @testset for jvp_autodiff in forward_ADs, vjp_autodiff in reverse_ADs - jac_op = JacobianOperator(prob, -1.0, 1.0; jvp_autodiff, vjp_autodiff) @testset for u in rand(4), v in rand(4) - sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -62,7 +60,6 @@ jac_op = JacobianOperator(prob, -1.0, 1.0) @testset for u in rand(4), v in rand(4) - sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -82,7 +79,6 @@ jac_op = JacobianOperator(prob, -1.0, 1.0) @testset for u in rand(4), v in rand(4) - sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈2 * u * v atol=1e-5 @test (sop' * v)≈2 * u * v atol=1e-5 @@ -137,11 +133,9 @@ end @testset "AutoDiff" begin @testset for jvp_autodiff in forward_ADs, vjp_autodiff in reverse_ADs - jac_op = JacobianOperator(prob, [2.0, 3.0], prob.u0; jvp_autodiff, vjp_autodiff) @testset for u in [rand(2) for _ in 1:4], v in [rand(2) for _ in 1:4] - sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -163,7 +157,6 @@ end jac_op = JacobianOperator(prob, [2.0, 3.0], prob.u0) @testset for u in [rand(2) for _ in 1:4], v in [rand(2) for _ in 1:4] - sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -184,7 +177,6 @@ end jac_op = JacobianOperator(prob, [2.0, 3.0], prob.u0) @testset for u in [rand(2) for _ in 1:4], v in [rand(2) for _ in 1:4] - sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -237,11 +229,9 @@ end @testset "AutoDiff" begin @testset for jvp_autodiff in forward_ADs, vjp_autodiff in reverse_ADs - jac_op = JacobianOperator(prob, [2.0, 3.0], prob.u0; jvp_autodiff, vjp_autodiff) @testset for u in [rand(2) for _ in 1:4], v in [rand(2) for _ in 1:4] - sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -263,7 +253,6 @@ end jac_op = JacobianOperator(prob, [2.0, 3.0], prob.u0) @testset for u in [rand(2) for _ in 1:4], v in [rand(2) for _ in 1:4] - sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 @@ -284,7 +273,6 @@ end jac_op = JacobianOperator(prob, [2.0, 3.0], prob.u0) @testset for u in [rand(2) for _ in 1:4], v in [rand(2) for _ in 1:4] - sop = StatefulJacobianOperator(jac_op, u, prob.p) @test (sop * v)≈analytic_jvp(v, u, prob.p) atol=1e-5 @test (sop' * v)≈analytic_vjp(v, u, prob.p) atol=1e-5 diff --git a/lib/SimpleNonlinearSolve/src/SimpleNonlinearSolve.jl b/lib/SimpleNonlinearSolve/src/SimpleNonlinearSolve.jl index 1908a1131..495cadef2 100644 --- a/lib/SimpleNonlinearSolve/src/SimpleNonlinearSolve.jl +++ b/lib/SimpleNonlinearSolve/src/SimpleNonlinearSolve.jl @@ -153,7 +153,6 @@ function solve_adjoint_internal end @compile_workload begin @sync for prob in (prob_scalar, prob_iip, prob_oop), alg in algs - Threads.@spawn CommonSolve.solve(prob, alg; abstol = 1e-2, verbose = false) end end diff --git a/lib/SimpleNonlinearSolve/src/trust_region.jl b/lib/SimpleNonlinearSolve/src/trust_region.jl index aa6041d61..b1a57fbd2 100644 --- a/lib/SimpleNonlinearSolve/src/trust_region.jl +++ b/lib/SimpleNonlinearSolve/src/trust_region.jl @@ -193,7 +193,7 @@ function SciMLBase.__solve( return SciMLBase.build_solution(prob, alg, x, fx; retcode = ReturnCode.MaxIters) end -function dogleg_method!!(cache, J, f::F, g, Δ) where F +function dogleg_method!!(cache, J, f::F, g, Δ) where {F} (; δsd, δN_δsd, δN) = cache # Compute the Newton step diff --git a/lib/SimpleNonlinearSolve/src/utils.jl b/lib/SimpleNonlinearSolve/src/utils.jl index 2f8b49715..c3da10f9c 100644 --- a/lib/SimpleNonlinearSolve/src/utils.jl +++ b/lib/SimpleNonlinearSolve/src/utils.jl @@ -167,7 +167,7 @@ end function compute_hvvp(prob, autodiff, fx, x, dir) jvp_fn = if SciMLBase.isinplace(prob) @closure (u, - p) -> begin + p) -> begin du = NLBUtils.safe_similar(fx, promote_type(eltype(fx), eltype(u))) return only(DI.pushforward(prob.f, du, autodiff, u, (dir,), Constant(p))) end diff --git a/lib/SimpleNonlinearSolve/test/core/forward_diff_tests.jl b/lib/SimpleNonlinearSolve/test/core/forward_diff_tests.jl index be0adbf8a..68a5d6ec0 100644 --- a/lib/SimpleNonlinearSolve/test/core/forward_diff_tests.jl +++ b/lib/SimpleNonlinearSolve/test/core/forward_diff_tests.jl @@ -33,7 +33,6 @@ @testset "Scalar AD" begin for p in 1.0:0.1:100.0, u0 in us - sol = solve(NonlinearProblem{false}(test_f, u0, p), alg) if SciMLBase.successful_retcode(sol) gs = abs.(ForwardDiff.derivative(p) do pᵢ diff --git a/lib/SimpleNonlinearSolve/test/core/rootfind_tests.jl b/lib/SimpleNonlinearSolve/test/core/rootfind_tests.jl index def3b9ab1..85dc18faa 100644 --- a/lib/SimpleNonlinearSolve/test/core/rootfind_tests.jl +++ b/lib/SimpleNonlinearSolve/test/core/rootfind_tests.jl @@ -80,8 +80,7 @@ end @test maximum(abs, quadratic_f(sol.u, 2.0)) < 1e-9 end - @testset "Termination Condition: $(nameof(typeof(termination_condition))) u0: $(nameof(typeof(u0)))" for termination_condition in - TERMINATION_CONDITIONS, + @testset "Termination Condition: $(nameof(typeof(termination_condition))) u0: $(nameof(typeof(u0)))" for termination_condition in TERMINATION_CONDITIONS, u0 in (1.0, [1.0, 1.0], @SVector[1.0, 1.0]) probN = NonlinearProblem(quadratic_f, u0, 2.0) @@ -114,8 +113,7 @@ end end end - @testset "Termination Condition: $(nameof(typeof(termination_condition))) u0: $(nameof(typeof(u0)))" for termination_condition in - TERMINATION_CONDITIONS, + @testset "Termination Condition: $(nameof(typeof(termination_condition))) u0: $(nameof(typeof(u0)))" for termination_condition in TERMINATION_CONDITIONS, u0 in (1.0, [1.0, 1.0], @SVector[1.0, 1.0]) probN = NonlinearProblem(quadratic_f, u0, 2.0) @@ -147,8 +145,7 @@ end @test maximum(abs, quadratic_f(sol.u, 2.0)) < 1e-9 end - @testset "Termination Condition: $(nameof(typeof(termination_condition))) u0: $(nameof(typeof(u0)))" for termination_condition in - TERMINATION_CONDITIONS, + @testset "Termination Condition: $(nameof(typeof(termination_condition))) u0: $(nameof(typeof(u0)))" for termination_condition in TERMINATION_CONDITIONS, u0 in (1.0, [1.0, 1.0], @SVector[1.0, 1.0]) probN = NonlinearProblem(quadratic_f, u0, 2.0) @@ -158,8 +155,8 @@ end end @testitem "Newton Fails" setup=[RootfindTestSnippet] tags=[:core] begin - u0=[-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] - p=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + u0 = [-10.0, -1.0, 1.0, 2.0, 3.0, 4.0, 10.0] + p = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] @testset "$(nameof(typeof(alg)))" for alg in ( SimpleDFSane(), @@ -174,7 +171,7 @@ end end @testitem "Kwargs Propagation" setup=[RootfindTestSnippet] tags=[:core] begin - prob=NonlinearProblem(quadratic_f, ones(4), 2.0; maxiters = 2) - sol=solve(prob, SimpleNewtonRaphson()) + prob = NonlinearProblem(quadratic_f, ones(4), 2.0; maxiters = 2) + sol = solve(prob, SimpleNewtonRaphson()) @test sol.retcode === ReturnCode.MaxIters end diff --git a/src/NonlinearSolve.jl b/src/NonlinearSolve.jl index 8eddf0712..8439489de 100644 --- a/src/NonlinearSolve.jl +++ b/src/NonlinearSolve.jl @@ -64,7 +64,7 @@ include("forward_diff.jl") (NonlinearFunction{false, NoSpecialize}((u, p) -> (u .^ 2 .- p)[1:1]), [0.1, 0.0]), ( NonlinearFunction{false, NoSpecialize}(( - u, p) -> vcat(u .* u .- p, u .* u .- p)), + u, p) -> vcat(u .* u .- p, u .* u .- p)), [0.1, 0.1] ), ( diff --git a/src/default.jl b/src/default.jl index abadc5fc4..cc77e3624 100644 --- a/src/default.jl +++ b/src/default.jl @@ -37,13 +37,15 @@ function SciMLBase.__solve(prob::NonlinearProblem, ::Nothing, args...; kwargs... ) end -function SciMLBase.__init(prob::SciMLBase.AbstractSteadyStateProblem, ::Nothing, args...; kwargs...) +function SciMLBase.__init( + prob::SciMLBase.AbstractSteadyStateProblem, ::Nothing, args...; kwargs...) # Convert SteadyStateProblem to NonlinearProblem and use its default nlprob = SciMLBase.NonlinearProblem(prob) return SciMLBase.__init(nlprob, nothing, args...; kwargs...) end -function SciMLBase.__solve(prob::SciMLBase.AbstractSteadyStateProblem, ::Nothing, args...; kwargs...) +function SciMLBase.__solve( + prob::SciMLBase.AbstractSteadyStateProblem, ::Nothing, args...; kwargs...) # Convert SteadyStateProblem to NonlinearProblem and use its default nlprob = SciMLBase.NonlinearProblem(prob) return SciMLBase.__solve(nlprob, nothing, args...; kwargs...) diff --git a/test/23_test_problems_tests.jl b/test/23_test_problems_tests.jl index c4215cebb..18e3df27b 100644 --- a/test/23_test_problems_tests.jl +++ b/test/23_test_problems_tests.jl @@ -10,8 +10,7 @@ function test_on_library( x = dict["start"] res = similar(x) nlprob = NonlinearProblem(problem, copy(x)) - @testset "$idx: $(dict["title"]) | alg #$(alg_id)" for (alg_id, alg) in - enumerate(alg_ops) + @testset "$idx: $(dict["title"]) | alg #$(alg_id)" for (alg_id, alg) in enumerate(alg_ops) try sol = solve(nlprob, alg; maxiters = 10000) problem(res, sol.u, nothing) @@ -40,38 +39,38 @@ export test_on_library, problems, dicts end @testitem "23 Test Problems: PolyAlgorithms" setup=[RobustnessTesting] tags=[:nopre] begin - alg_ops=(RobustMultiNewton(), FastShortcutNonlinearPolyalg()) + alg_ops = (RobustMultiNewton(), FastShortcutNonlinearPolyalg()) - broken_tests=Dict(alg=>Int[] for alg in alg_ops) - broken_tests[alg_ops[1]]=[] - broken_tests[alg_ops[2]]=[] + broken_tests = Dict(alg => Int[] for alg in alg_ops) + broken_tests[alg_ops[1]] = [] + broken_tests[alg_ops[2]] = [] test_on_library(problems, dicts, alg_ops, broken_tests) end @testitem "23 Test Problems: NewtonRaphson" setup=[RobustnessTesting] tags=[:core] begin - alg_ops=( + alg_ops = ( NewtonRaphson(), SimpleNewtonRaphson() ) - broken_tests=Dict(alg=>Int[] for alg in alg_ops) - broken_tests[alg_ops[1]]=[1] + broken_tests = Dict(alg => Int[] for alg in alg_ops) + broken_tests[alg_ops[1]] = [1] test_on_library(problems, dicts, alg_ops, broken_tests) end @testitem "23 Test Problems: Halley" setup=[RobustnessTesting] tags=[:core] begin - alg_ops=(SimpleHalley(; autodiff = AutoForwardDiff()),) + alg_ops = (SimpleHalley(; autodiff = AutoForwardDiff()),) - broken_tests=Dict(alg=>Int[] for alg in alg_ops) - broken_tests[alg_ops[1]]=[1, 5, 15, 16, 18] + broken_tests = Dict(alg => Int[] for alg in alg_ops) + broken_tests[alg_ops[1]] = [1, 5, 15, 16, 18] test_on_library(problems, dicts, alg_ops, broken_tests) end @testitem "23 Test Problems: TrustRegion" setup=[RobustnessTesting] tags=[:core] begin - alg_ops=( + alg_ops = ( TrustRegion(; radius_update_scheme = RadiusUpdateSchemes.Simple), TrustRegion(; radius_update_scheme = RadiusUpdateSchemes.Fan), TrustRegion(; radius_update_scheme = RadiusUpdateSchemes.Hei), @@ -82,15 +81,15 @@ end SimpleTrustRegion(; nlsolve_update_rule = Val(true)) ) - broken_tests=Dict(alg=>Int[] for alg in alg_ops) - broken_tests[alg_ops[1]]=[11, 21] - broken_tests[alg_ops[2]]=[11, 21] - broken_tests[alg_ops[3]]=[11, 21] - broken_tests[alg_ops[4]]=[8, 11, 21] - broken_tests[alg_ops[5]]=[21] - broken_tests[alg_ops[6]]=[11, 21] - broken_tests[alg_ops[7]]=[3, 15, 16, 21] - broken_tests[alg_ops[8]]=[15, 16] + broken_tests = Dict(alg => Int[] for alg in alg_ops) + broken_tests[alg_ops[1]] = [11, 21] + broken_tests[alg_ops[2]] = [11, 21] + broken_tests[alg_ops[3]] = [11, 21] + broken_tests[alg_ops[4]] = [8, 11, 21] + broken_tests[alg_ops[5]] = [21] + broken_tests[alg_ops[6]] = [11, 21] + broken_tests[alg_ops[7]] = [3, 15, 16, 21] + broken_tests[alg_ops[8]] = [15, 16] test_on_library(problems, dicts, alg_ops, broken_tests) end @@ -98,43 +97,43 @@ end @testitem "23 Test Problems: LevenbergMarquardt" setup=[RobustnessTesting] tags=[:core] begin using LinearSolve - alg_ops=( + alg_ops = ( LevenbergMarquardt(), LevenbergMarquardt(; α_geodesic = 0.1), LevenbergMarquardt(; linsolve = CholeskyFactorization()) ) - broken_tests=Dict(alg=>Int[] for alg in alg_ops) - broken_tests[alg_ops[1]]=[11, 21] - broken_tests[alg_ops[2]]=[11, 21] - broken_tests[alg_ops[3]]=[11, 21] + broken_tests = Dict(alg => Int[] for alg in alg_ops) + broken_tests[alg_ops[1]] = [11, 21] + broken_tests[alg_ops[2]] = [11, 21] + broken_tests[alg_ops[3]] = [11, 21] test_on_library(problems, dicts, alg_ops, broken_tests) end @testitem "23 Test Problems: DFSane" setup=[RobustnessTesting] tags=[:core] begin - alg_ops=( + alg_ops = ( DFSane(), SimpleDFSane() ) - broken_tests=Dict(alg=>Int[] for alg in alg_ops) - broken_tests[alg_ops[1]]=[1, 2, 3, 5, 21] + broken_tests = Dict(alg => Int[] for alg in alg_ops) + broken_tests[alg_ops[1]] = [1, 2, 3, 5, 21] if Sys.isapple() - if VERSION≥v"1.11-" - broken_tests[alg_ops[2]]=[1, 2, 3, 5, 6, 11, 21] + if VERSION ≥ v"1.11-" + broken_tests[alg_ops[2]] = [1, 2, 3, 5, 6, 11, 21] else - broken_tests[alg_ops[2]]=[1, 2, 3, 5, 6, 21] + broken_tests[alg_ops[2]] = [1, 2, 3, 5, 6, 21] end else - broken_tests[alg_ops[2]]=[1, 2, 3, 5, 6, 11, 21] + broken_tests[alg_ops[2]] = [1, 2, 3, 5, 6, 11, 21] end test_on_library(problems, dicts, alg_ops, broken_tests) end @testitem "23 Test Problems: Broyden" setup=[RobustnessTesting] tags=[:core] retries=3 begin - alg_ops=( + alg_ops = ( Broyden(), Broyden(; init_jacobian = Val(:true_jacobian)), Broyden(; update_rule = Val(:bad_broyden)), @@ -142,37 +141,37 @@ end SimpleBroyden() ) - broken_tests=Dict(alg=>Int[] for alg in alg_ops) - broken_tests[alg_ops[2]]=[1, 5, 8, 11, 18] - broken_tests[alg_ops[4]]=[5, 6, 8, 11] + broken_tests = Dict(alg => Int[] for alg in alg_ops) + broken_tests[alg_ops[2]] = [1, 5, 8, 11, 18] + broken_tests[alg_ops[4]] = [5, 6, 8, 11] if Sys.isapple() - broken_tests[alg_ops[1]]=[1, 5, 11] - broken_tests[alg_ops[3]]=[1, 5, 6, 9, 11] - if VERSION≥v"1.11-" - broken_tests[alg_ops[5]]=[1, 4, 5, 11] + broken_tests[alg_ops[1]] = [1, 5, 11] + broken_tests[alg_ops[3]] = [1, 5, 6, 9, 11] + if VERSION ≥ v"1.11-" + broken_tests[alg_ops[5]] = [1, 4, 5, 11] else - broken_tests[alg_ops[5]]=[1, 5, 11] + broken_tests[alg_ops[5]] = [1, 5, 11] end else - broken_tests[alg_ops[1]]=[1, 5, 11, 15] - broken_tests[alg_ops[3]]=[1, 5, 6, 9, 11, 16] - broken_tests[alg_ops[5]]=[1, 5, 11] + broken_tests[alg_ops[1]] = [1, 5, 11, 15] + broken_tests[alg_ops[3]] = [1, 5, 6, 9, 11, 16] + broken_tests[alg_ops[5]] = [1, 5, 11] end test_on_library(problems, dicts, alg_ops, broken_tests, Sys.isapple() ? 1e-3 : 1e-4) end @testitem "23 Test Problems: Klement" setup=[RobustnessTesting] tags=[:core] begin - alg_ops=( + alg_ops = ( Klement(), Klement(; init_jacobian = Val(:true_jacobian_diagonal)), SimpleKlement() ) - broken_tests=Dict(alg=>Int[] for alg in alg_ops) - broken_tests[alg_ops[1]]=[1, 2, 4, 5, 11, 18, 22] - broken_tests[alg_ops[2]]=[2, 4, 5, 7, 18, 22] - broken_tests[alg_ops[3]]=[1, 2, 4, 5, 11, 22] + broken_tests = Dict(alg => Int[] for alg in alg_ops) + broken_tests[alg_ops[1]] = [1, 2, 4, 5, 11, 18, 22] + broken_tests[alg_ops[2]] = [2, 4, 5, 7, 18, 22] + broken_tests[alg_ops[3]] = [1, 2, 4, 5, 11, 22] test_on_library(problems, dicts, alg_ops, broken_tests) end @@ -180,10 +179,10 @@ end @testitem "23 Test Problems: PseudoTransient" setup=[RobustnessTesting] tags=[:core] begin # PT relies on the root being a stable equilibrium for convergence, so it won't work on # most problems - alg_ops=(PseudoTransient(),) + alg_ops = (PseudoTransient(),) - broken_tests=Dict(alg=>Int[] for alg in alg_ops) - broken_tests[alg_ops[1]]=[1, 2, 3, 11, 15, 16] + broken_tests = Dict(alg => Int[] for alg in alg_ops) + broken_tests[alg_ops[1]] = [1, 2, 3, 11, 15, 16] test_on_library(problems, dicts, alg_ops, broken_tests) end diff --git a/test/cuda_tests.jl b/test/cuda_tests.jl index af1a21590..f81ba952c 100644 --- a/test/cuda_tests.jl +++ b/test/cuda_tests.jl @@ -67,8 +67,8 @@ end end @testset "Mode: $(tcond)" for tcond in NORM_TERMINATION_CONDITIONS - for nfn in - (Base.Fix1(maximum, abs), Base.Fix2(norm, 2), Base.Fix2(norm, Inf)) + for nfn in ( + Base.Fix1(maximum, abs), Base.Fix2(norm, 2), Base.Fix2(norm, Inf)) @test_nowarn NonlinearSolveBase.check_convergence( tcond(nfn), du, u, uprev, 1e-3, 1e-3) end diff --git a/test/default_alg_tests.jl b/test/default_alg_tests.jl index 051d8fa8f..cf4d8f876 100644 --- a/test/default_alg_tests.jl +++ b/test/default_alg_tests.jl @@ -6,104 +6,104 @@ du[1] = 2 - 2u[1] du[2] = u[1] - 4u[2] end - + u0 = zeros(2) prob_iip = SteadyStateProblem(f_iip, u0) - + @testset "In-place SteadyStateProblem" begin # Test with default algorithm (nothing) sol = solve(prob_iip) @test SciMLBase.successful_retcode(sol.retcode) @test maximum(abs, sol.resid) < 1e-6 - + # Test with explicit nothing sol = solve(prob_iip, nothing) @test SciMLBase.successful_retcode(sol.retcode) @test maximum(abs, sol.resid) < 1e-6 - + # Test init interface cache = init(prob_iip) sol = solve!(cache) @test SciMLBase.successful_retcode(sol.retcode) @test maximum(abs, sol.resid) < 1e-6 - + # Test init with nothing cache = init(prob_iip, nothing) sol = solve!(cache) @test SciMLBase.successful_retcode(sol.retcode) @test maximum(abs, sol.resid) < 1e-6 end - + # Test with out-of-place function f_oop(u, p, t) = [2 - 2u[1], u[1] - 4u[2]] u0 = zeros(2) prob_oop = SteadyStateProblem(f_oop, u0) - + @testset "Out-of-place SteadyStateProblem" begin # Test with default algorithm (nothing) sol = solve(prob_oop) @test SciMLBase.successful_retcode(sol.retcode) @test maximum(abs, sol.resid) < 1e-6 - + # Test with explicit nothing sol = solve(prob_oop, nothing) @test SciMLBase.successful_retcode(sol.retcode) @test maximum(abs, sol.resid) < 1e-6 - + # Test init interface cache = init(prob_oop) sol = solve!(cache) @test SciMLBase.successful_retcode(sol.retcode) @test maximum(abs, sol.resid) < 1e-6 - + # Test init with nothing cache = init(prob_oop, nothing) sol = solve!(cache) @test SciMLBase.successful_retcode(sol.retcode) @test maximum(abs, sol.resid) < 1e-6 end - + # Test that SteadyStateProblem conversion works @testset "Problem conversion" begin # Create equivalent NonlinearProblem function f_nl(u, p) [2 - 2u[1], u[1] - 4u[2]] end - + prob_nl = NonlinearProblem(f_nl, u0) - + # Convert SteadyStateProblem to NonlinearProblem prob_converted = NonlinearProblem(prob_oop) - + # Both should solve to the same solution sol_nl = solve(prob_nl) sol_converted = solve(prob_converted) - - @test sol_nl.u ≈ sol_converted.u atol=1e-10 + + @test sol_nl.u≈sol_converted.u atol=1e-10 end - + # Test with StaticArrays @testset "StaticArrays support" begin f_static(u, p, t) = @SVector [2 - 2u[1], u[1] - 4u[2]] u0_static = @SVector [0.0, 0.0] prob_static = SteadyStateProblem(f_static, u0_static) - + sol = solve(prob_static) @test SciMLBase.successful_retcode(sol.retcode) @test maximum(abs, sol.resid) < 1e-6 end - + # Test that solve works with various problem types @testset "Mixed problem types" begin # Regular arrays prob1 = SteadyStateProblem(f_oop, [0.5, 0.5]) sol1 = solve(prob1) @test SciMLBase.successful_retcode(sol1.retcode) - + # With parameters f_param(u, p, t) = [p[1] - 2u[1], u[1] - 4u[2]] prob2 = SteadyStateProblem(f_param, [0.5, 0.5], [2.0]) sol2 = solve(prob2) @test SciMLBase.successful_retcode(sol2.retcode) end -end \ No newline at end of file +end diff --git a/test/forward_ad_tests.jl b/test/forward_ad_tests.jl index 9984c0a0b..47922afaa 100644 --- a/test/forward_ad_tests.jl +++ b/test/forward_ad_tests.jl @@ -104,6 +104,7 @@ end for u0 in us, p in ([2.0, 1.0], [2.0 1.0; 3.0 4.0]), mode in (:iip, :oop, :iip_cache, :oop_cache) + compatible(u0, p) || continue compatible(u0, alg) || continue compatible(u0, Val(mode)) || continue diff --git a/test/runtests.jl b/test/runtests.jl index dcfb869ff..420c8589a 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -22,12 +22,14 @@ const RETESTITEMS_NWORKERS = if GROUP == "wrappers" 0 # Sequential execution for wrapper tests else tmp = get(ENV, "RETESTITEMS_NWORKERS", "") - isempty(tmp) && (tmp = string(min(ifelse(Sys.iswindows(), 0, Hwloc.num_physical_cores()), 4))) + isempty(tmp) && + (tmp = string(min(ifelse(Sys.iswindows(), 0, Hwloc.num_physical_cores()), 4))) parse(Int, tmp) end const RETESTITEMS_NWORKER_THREADS = begin tmp = get(ENV, "RETESTITEMS_NWORKER_THREADS", "") - isempty(tmp) && (tmp = string(max(Hwloc.num_virtual_cores() ÷ max(RETESTITEMS_NWORKERS, 1), 1))) + isempty(tmp) && + (tmp = string(max(Hwloc.num_virtual_cores() ÷ max(RETESTITEMS_NWORKERS, 1), 1))) parse(Int, tmp) end diff --git a/test/wrappers/least_squares_tests.jl b/test/wrappers/least_squares_tests.jl index 99adc847f..effef126e 100644 --- a/test/wrappers/least_squares_tests.jl +++ b/test/wrappers/least_squares_tests.jl @@ -7,9 +7,9 @@ end @testitem "LeastSquaresOptim.jl" setup=[WrapperNLLSSetup] tags=[:wrappers] begin import LeastSquaresOptim - nlls_problems=[prob_oop, prob_iip] + nlls_problems = [prob_oop, prob_iip] - solvers=[] + solvers = [] for alg in (:lm, :dogleg), autodiff in (nothing, AutoForwardDiff(), AutoFiniteDiff(), :central, :forward) @@ -17,8 +17,7 @@ end end for prob in nlls_problems, solver in solvers - - sol=solve(prob, solver; maxiters = 10000, abstol = 1e-8) + sol = solve(prob, solver; maxiters = 10000, abstol = 1e-8) @test SciMLBase.successful_retcode(sol) @test maximum(abs, sol.resid) < 1e-6 end @@ -29,14 +28,14 @@ end using ForwardDiff function jac!(J, θ, p) - resid=zeros(length(p)) - ForwardDiff.jacobian!(J, (resid, θ)->loss_function(resid, θ, p), resid, θ) + resid = zeros(length(p)) + ForwardDiff.jacobian!(J, (resid, θ) -> loss_function(resid, θ, p), resid, θ) return J end - jac(θ, p)=ForwardDiff.jacobian(θ->loss_function(θ, p), θ) + jac(θ, p) = ForwardDiff.jacobian(θ -> loss_function(θ, p), θ) - probs=[ + probs = [ NonlinearLeastSquaresProblem( NonlinearFunction{true}( loss_function; resid_prototype = zero(y_target), jac = jac! @@ -54,12 +53,11 @@ end ) ] - solvers=Any[FastLevenbergMarquardtJL(linsolve) for linsolve in (:cholesky, :qr)] - Sys.isapple()||push!(solvers, CMINPACK()) + solvers = Any[FastLevenbergMarquardtJL(linsolve) for linsolve in (:cholesky, :qr)] + Sys.isapple() || push!(solvers, CMINPACK()) for solver in solvers, prob in probs - - sol=solve(prob, solver; maxiters = 10000, abstol = 1e-8) + sol = solve(prob, solver; maxiters = 10000, abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 end end @@ -67,7 +65,7 @@ end @testitem "FastLevenbergMarquardt.jl + CMINPACK: Jacobian Not Provided" setup=[WrapperNLLSSetup] tags=[:wrappers] begin import FastLevenbergMarquardt, MINPACK - probs=[ + probs = [ NonlinearLeastSquaresProblem( NonlinearFunction{true}(loss_function; resid_prototype = zero(y_target)), θ_init, x @@ -79,7 +77,7 @@ end NonlinearLeastSquaresProblem(NonlinearFunction{false}(loss_function), θ_init, x) ] - solvers=[] + solvers = [] for linsolve in (:cholesky, :qr), autodiff in (nothing, AutoForwardDiff(), AutoFiniteDiff()) @@ -92,8 +90,7 @@ end end for solver in solvers, prob in probs - - sol=solve(prob, solver; maxiters = 10000, abstol = 1e-8) + sol = solve(prob, solver; maxiters = 10000, abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 end end @@ -101,18 +98,18 @@ end @testitem "FastLevenbergMarquardt.jl + StaticArrays" setup=[WrapperNLLSSetup] tags=[:wrappers] begin using StaticArrays, FastLevenbergMarquardt - x_sa=SA[-1.0, -0.5, 0.0, 0.5, 1.0] + x_sa = SA[-1.0, -0.5, 0.0, 0.5, 1.0] - const y_target_sa=true_function(x_sa, θ_true) + const y_target_sa = true_function(x_sa, θ_true) function loss_function_sa(θ, p) - ŷ=true_function(p, θ) + ŷ = true_function(p, θ) return ŷ .- y_target_sa end - θ_init_sa=SVector{4}(θ_init) - prob_sa=NonlinearLeastSquaresProblem{false}(loss_function_sa, θ_init_sa, x) + θ_init_sa = SVector{4}(θ_init) + prob_sa = NonlinearLeastSquaresProblem{false}(loss_function_sa, θ_init_sa, x) - sol=solve(prob_sa, FastLevenbergMarquardtJL()) + sol = solve(prob_sa, FastLevenbergMarquardtJL()) @test maximum(abs, sol.resid) < 1e-6 end diff --git a/test/wrappers/rootfind_tests.jl b/test/wrappers/rootfind_tests.jl index 3046a1952..b62dd54ad 100644 --- a/test/wrappers/rootfind_tests.jl +++ b/test/wrappers/rootfind_tests.jl @@ -2,11 +2,11 @@ import NLSolvers, NLsolve, SIAMFANLEquations, MINPACK, PETSc function f_iip(du, u, p, t) - du[1]=2-2u[1] - du[2]=u[1]-4u[2] + du[1] = 2 - 2u[1] + du[2] = u[1] - 4u[2] end - u0=zeros(2) - prob_iip=SteadyStateProblem(f_iip, u0) + u0 = zeros(2) + prob_iip = SteadyStateProblem(f_iip, u0) @testset "$(nameof(typeof(alg)))" for alg in [ NLSolversJL(NLSolvers.LineSearch(NLSolvers.Newton(), NLSolvers.Backtracking())), @@ -23,9 +23,9 @@ @test maximum(abs, sol.resid) < 1e-6 end - f_oop(u, p, t)=[2-2u[1], u[1]-4u[2]] - u0=zeros(2) - prob_oop=SteadyStateProblem(f_oop, u0) + f_oop(u, p, t) = [2 - 2u[1], u[1] - 4u[2]] + u0 = zeros(2) + prob_oop = SteadyStateProblem(f_oop, u0) @testset "$(nameof(typeof(alg)))" for alg in [ NLSolversJL(NLSolvers.LineSearch(NLSolvers.Newton(), NLSolvers.Backtracking())), @@ -48,11 +48,11 @@ end import NLSolvers, NLsolve, SIAMFANLEquations, MINPACK, PETSc function f_iip(du, u, p) - du[1]=2-2u[1] - du[2]=u[1]-4u[2] + du[1] = 2 - 2u[1] + du[2] = u[1] - 4u[2] end - u0=zeros(2) - prob_iip=NonlinearProblem{true}(f_iip, u0) + u0 = zeros(2) + prob_iip = NonlinearProblem{true}(f_iip, u0) @testset "$(nameof(typeof(alg)))" for alg in [ NLSolversJL(NLSolvers.LineSearch(NLSolvers.Newton(), NLSolvers.Backtracking())), @@ -70,9 +70,9 @@ end @test maximum(abs, sol.resid) < 1e-6 end - f_oop(u, p)=[2-2u[1], u[1]-4u[2]] - u0=zeros(2) - prob_oop=NonlinearProblem{false}(f_oop, u0) + f_oop(u, p) = [2 - 2u[1], u[1] - 4u[2]] + u0 = zeros(2) + prob_oop = NonlinearProblem{false}(f_oop, u0) @testset "$(nameof(typeof(alg)))" for alg in [ NLSolversJL(NLSolvers.LineSearch(NLSolvers.Newton(), NLSolvers.Backtracking())), NLsolveJL(), @@ -89,8 +89,8 @@ end @test maximum(abs, sol.resid) < 1e-6 end - f_tol(u, p)=u^2-2 - prob_tol=NonlinearProblem(f_tol, 1.0) + f_tol(u, p) = u^2 - 2 + prob_tol = NonlinearProblem(f_tol, 1.0) for tol in [1e-1, 1e-3, 1e-6, 1e-10, 1e-15], alg in [ NLSolversJL(NLSolvers.LineSearch(NLSolvers.Newton(), NLSolvers.Backtracking())), @@ -103,62 +103,62 @@ end SIAMFANLEquationsJL(; method = :secant) ] - alg isa CMINPACK&&Sys.isapple()&&continue - alg isa PETScSNES&&Sys.iswindows()&&continue - sol=solve(prob_tol, alg, abstol = tol) + alg isa CMINPACK && Sys.isapple() && continue + alg isa PETScSNES && Sys.iswindows() && continue + sol = solve(prob_tol, alg, abstol = tol) @test abs(sol.u[1] - sqrt(2)) < tol end - f_jfnk(u, p)=u^2-2 - prob_jfnk=NonlinearProblem(f_jfnk, 1.0) + f_jfnk(u, p) = u^2 - 2 + prob_jfnk = NonlinearProblem(f_jfnk, 1.0) for tol in [1e-1, 1e-3, 1e-6, 1e-10, 1e-11] - sol=solve(prob_jfnk, SIAMFANLEquationsJL(linsolve = :gmres), abstol = tol) + sol = solve(prob_jfnk, SIAMFANLEquationsJL(linsolve = :gmres), abstol = tol) @test abs(sol.u[1] - sqrt(2)) < tol end # Test the finite differencing technique function f!(fvec, x, p) - fvec[1]=(x[1]+3)*(x[2]^3-7)+18 - fvec[2]=sin(x[2]*exp(x[1])-1) + fvec[1] = (x[1] + 3) * (x[2]^3 - 7) + 18 + fvec[2] = sin(x[2] * exp(x[1]) - 1) end - prob=NonlinearProblem{true}(f!, [0.1; 1.2]) - sol=solve(prob, NLsolveJL(autodiff = :central)) + prob = NonlinearProblem{true}(f!, [0.1; 1.2]) + sol = solve(prob, NLsolveJL(autodiff = :central)) @test maximum(abs, sol.resid) < 1e-6 - sol=solve(prob, SIAMFANLEquationsJL()) + sol = solve(prob, SIAMFANLEquationsJL()) @test maximum(abs, sol.resid) < 1e-6 # Test the autodiff technique - sol=solve(prob, NLsolveJL(autodiff = :forward)) + sol = solve(prob, NLsolveJL(autodiff = :forward)) @test maximum(abs, sol.resid) < 1e-6 # Custom Jacobian - f_custom_jac!(F, u, p)=(F[1:152]=u .^ 2 .- p) - j_custom_jac!(J, u, p)=(J[1:152, 1:152]=diagm(2 .* u)) + f_custom_jac!(F, u, p) = (F[1:152] = u .^ 2 .- p) + j_custom_jac!(J, u, p) = (J[1:152, 1:152] = diagm(2 .* u)) - init=ones(152) - A=ones(152) - A[6]=0.8 + init = ones(152) + A = ones(152) + A[6] = 0.8 - f=NonlinearFunction(f_custom_jac!; jac = j_custom_jac!) - p=A + f = NonlinearFunction(f_custom_jac!; jac = j_custom_jac!) + p = A - ProbN=NonlinearProblem(f, init, p) + ProbN = NonlinearProblem(f, init, p) - sol=solve(ProbN, NLsolveJL(); abstol = 1e-8) + sol = solve(ProbN, NLsolveJL(); abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 - sol=solve( + sol = solve( ProbN, NLSolversJL(NLSolvers.LineSearch(NLSolvers.Newton(), NLSolvers.Backtracking())); abstol = 1e-8 ) @test maximum(abs, sol.resid) < 1e-6 - sol=solve(ProbN, SIAMFANLEquationsJL(; method = :newton); abstol = 1e-8) + sol = solve(ProbN, SIAMFANLEquationsJL(; method = :newton); abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 - sol=solve(ProbN, SIAMFANLEquationsJL(; method = :pseudotransient); abstol = 1e-8) + sol = solve(ProbN, SIAMFANLEquationsJL(; method = :pseudotransient); abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 if !Sys.iswindows() - sol=solve(ProbN, PETScSNES(); abstol = 1e-8) + sol = solve(ProbN, PETScSNES(); abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 end end @@ -166,22 +166,22 @@ end @testitem "PETSc SNES Floating Points" tags=[:wrappers] retries=5 skip=:(Sys.iswindows()) begin import PETSc - f(u, p)=u .* u .- 2 + f(u, p) = u .* u .- 2 - u0=[1.0, 1.0] - probN=NonlinearProblem{false}(f, u0) + u0 = [1.0, 1.0] + probN = NonlinearProblem{false}(f, u0) - sol=solve(probN, PETScSNES(); abstol = 1e-8) + sol = solve(probN, PETScSNES(); abstol = 1e-8) @test maximum(abs, sol.resid) < 1e-6 - u0=[1.0f0, 1.0f0] - probN=NonlinearProblem{false}(f, u0) + u0 = [1.0f0, 1.0f0] + probN = NonlinearProblem{false}(f, u0) - sol=solve(probN, PETScSNES(); abstol = 1e-5) + sol = solve(probN, PETScSNES(); abstol = 1e-5) @test maximum(abs, sol.resid) < 1e-4 - u0=Float16[1.0, 1.0] - probN=NonlinearProblem{false}(f, u0) + u0 = Float16[1.0, 1.0] + probN = NonlinearProblem{false}(f, u0) @test_throws AssertionError solve(probN, PETScSNES(); abstol = 1e-8) end