Skip to content

Commit c64aa9f

Browse files
authored
Merge branch 'main' into nevergrad-OneplusOne
2 parents 70baba5 + b63836f commit c64aa9f

File tree

7 files changed

+94
-23
lines changed

7 files changed

+94
-23
lines changed

.pre-commit-config.yaml

+2-3
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,9 @@ repos:
2222
hooks:
2323
- id: update-algo-selection-code
2424
name: update algo selection code
25-
entry: bash .tools/create_and_format_algo_selection_code.sh
25+
entry: python .tools/update_algo_selection_hook.py
2626
language: python
27-
files: (src/optimagic/optimizers/.|src/optimagic/algorithms.py|.tools/.)
28-
always_run: false
27+
files: ^(src/optimagic/optimizers/|src/optimagic/algorithms\.py|\.tools/)
2928
require_serial: true
3029
additional_dependencies:
3130
- hatchling

.tools/create_and_format_algo_selection_code.sh

-17
This file was deleted.

.tools/update_algo_selection_hook.py

+42
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
#!/usr/bin/env python
2+
import subprocess
3+
import sys
4+
from pathlib import Path
5+
from typing import Any
6+
7+
ROOT = Path(__file__).resolve().parents[1]
8+
9+
# sys.executable guarantees we stay inside the pre‑commit venv
10+
PYTHON = [sys.executable]
11+
# "-m" lets us call std‑lib modules (e.g. pip) the same way
12+
PYTHON_MINUS_M = [*PYTHON, "-m"]
13+
14+
15+
def run(cmd: list[str], **kwargs: Any) -> None:
16+
subprocess.check_call(cmd, cwd=ROOT, **kwargs)
17+
18+
19+
def ensure_optimagic_is_locally_installed() -> None:
20+
try:
21+
run(PYTHON_MINUS_M + ["pip", "show", "optimagic"], stdout=subprocess.DEVNULL)
22+
except subprocess.CalledProcessError:
23+
run(PYTHON_MINUS_M + ["pip", "install", "-e", "."])
24+
25+
26+
def main() -> int:
27+
ensure_optimagic_is_locally_installed()
28+
run(PYTHON + [".tools/create_algo_selection_code.py"])
29+
30+
ruff_args = [
31+
"--silent",
32+
"--config",
33+
"pyproject.toml",
34+
"src/optimagic/algorithms.py",
35+
]
36+
run(["ruff", "format", *ruff_args])
37+
run(["ruff", "check", "--fix", *ruff_args])
38+
return 0 # explicit success code
39+
40+
41+
if __name__ == "__main__":
42+
sys.exit(main())

docs/source/algorithms.md

+23
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ install optimagic.
8686
f in the stopping criterion.
8787
- **stopping.maxiter** (int): If the maximum number of iterations is reached,
8888
the optimization stops, but we do not count this as convergence.
89+
- **display** (bool): Set to True to print convergence messages. Default is False. Scipy name: **disp**.
8990
9091
```
9192

@@ -122,6 +123,7 @@ install optimagic.
122123
- **convergence.ftol_abs** (float): Absolute difference in the criterion value between
123124
iterations that is tolerated to declare convergence. As no relative tolerances can be passed to Nelder-Mead,
124125
optimagic sets a non zero default for this.
126+
- **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.
125127
- **adaptive** (bool): Adapt algorithm parameters to dimensionality of problem.
126128
Useful for high-dimensional minimization (:cite:`Gao2012`, p. 259-277). scipy's default is False.
127129
@@ -165,6 +167,7 @@ install optimagic.
165167
the optimization stops but we do not count thisas convergence.
166168
- **stopping.maxiter** (int): If the maximum number of iterations is reached, the optimization stops,
167169
but we do not count this as convergence.
170+
- **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.
168171
169172
```
170173

@@ -190,7 +193,22 @@ install optimagic.
190193
- **norm** (float): Order of the vector norm that is used to calculate the gradient's "score" that
191194
is compared to the gradient tolerance to determine convergence. Default is infinite which means that
192195
the largest entry of the gradient vector is compared to the gradient tolerance.
196+
- **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.
197+
- **convergence_xtol_rel** (float): Relative tolerance for `x`. Terminate successfully if step size is less than `xk * xrtol` where `xk` is the current parameter vector. Default is 1e-5. SciPy name: **xrtol**.
198+
- **armijo_condition** (float): Parameter for Armijo condition rule. Default is 1e-4. Ensures
193199
200+
.. math::
201+
202+
f(x_k+\alpha p_k) \le f(x_k) \;+\mathrm{armijo\_condition}\,\cdot\,\alpha\,\nabla f(x_k)^\top p_k,
203+
204+
so each step yields at least a fraction **armijo_condition** of the predicted decrease. Smaller ⇒ more aggressive steps, larger ⇒ more conservative ones. SciPy name: **c1**.
205+
- **curvature_condition** (float): Parameter for curvature condition rule. Default is 0.9. Ensures
206+
207+
.. math::
208+
209+
\nabla f(x_k+\alpha p_k)^\top p_k \ge \mathrm{curvature\_condition}\,\cdot\,\nabla f(x_k)^\top p_k,
210+
211+
so the new slope isn’t too negative. Smaller ⇒ stricter curvature reduction (smaller steps), larger ⇒ looser (bigger steps). SciPy name: **c2**.
194212
```
195213

196214
```{eval-rst}
@@ -225,6 +243,7 @@ install optimagic.
225243
"score" that is compared to the gradient tolerance to determine convergence.
226244
Default is infinite which means that the largest entry of the gradient vector
227245
is compared to the gradient tolerance.
246+
- **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.
228247
229248
```
230249

@@ -271,6 +290,7 @@ install optimagic.
271290
relative change in the parameters for determining the convergence.
272291
- **stopping.maxiter** (int): If the maximum number of iterations is reached,
273292
the optimization stops, but we do not count this as convergence.
293+
- **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.
274294
275295
276296
@@ -309,6 +329,7 @@ install optimagic.
309329
RHO_j from x_j. RHO_j only decreases, never increases. The initial RHO_j is
310330
the `trustregion.initial_radius`. In this way COBYLA's iterations behave
311331
like a trust region algorithm.
332+
- **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.
312333
313334
```
314335

@@ -376,6 +397,7 @@ install optimagic.
376397
criterion rescaling. If 0, rescale at each iteration. If a large value,
377398
never rescale. If < 0, rescale is set to 1.3. optimagic defaults to scipy's
378399
default.
400+
- **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.
379401
380402
381403
```
@@ -436,6 +458,7 @@ install optimagic.
436458
valid only close to the current point it should be a small one.
437459
The trust radius is automatically updated throughout the optimization
438460
process, with ``trustregion_initial_radius`` being its initial value.
461+
- **display** (bool): Set to True to print convergence messages. Default is False. SciPy name: **disp**.
439462
440463
```
441464

src/optimagic/logging/logger.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ def _build_history_dataframe(self) -> pd.DataFrame:
190190
# For numpy arrays with ndim = 0, tolist() returns a scalar, which violates the
191191
# type hinting list[Any] from above. As history["time"] is always a list, this
192192
# case is safe to ignore.
193-
history["time"] = times.tolist() # type: ignore[assignment]
193+
history["time"] = times.tolist()
194194

195195
df = pd.DataFrame(history)
196196
df = df.merge(

src/optimagic/optimization/history.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -409,7 +409,7 @@ def _get_flat_param_names(param: PyTree) -> list[str]:
409409
if fast_path:
410410
# Mypy raises an error here because .tolist() returns a str for zero-dimensional
411411
# arrays, but the fast path is only taken for 1d arrays, so it can be ignored.
412-
return np.arange(param.size).astype(str).tolist() # type: ignore[return-value]
412+
return np.arange(param.size).astype(str).tolist()
413413

414414
registry = get_registry(extended=True)
415415
return leaf_names(param, registry=registry)
@@ -530,7 +530,7 @@ def _get_batch_starts_and_stops(batch_ids: list[int]) -> tuple[list[int], list[i
530530
"""
531531
ids_arr = np.array(batch_ids, dtype=np.int64)
532532
indices = np.where(ids_arr[:-1] != ids_arr[1:])[0] + 1
533-
list_indices: list[int] = indices.tolist() # type: ignore[assignment]
533+
list_indices: list[int] = indices.tolist()
534534
starts = [0, *list_indices]
535535
stops = [*starts[1:], len(batch_ids)]
536536
return starts, stops

src/optimagic/optimizers/scipy_optimizers.py

+24
Original file line numberDiff line numberDiff line change
@@ -145,13 +145,15 @@ def _solve_internal_problem(
145145
class ScipySLSQP(Algorithm):
146146
convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_FTOL_ABS
147147
stopping_maxiter: PositiveInt = STOPPING_MAXITER
148+
display: bool = False
148149

149150
def _solve_internal_problem(
150151
self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
151152
) -> InternalOptimizeResult:
152153
options = {
153154
"maxiter": self.stopping_maxiter,
154155
"ftol": self.convergence_ftol_abs,
156+
"disp": self.display,
155157
}
156158
raw_res = scipy.optimize.minimize(
157159
fun=problem.fun_and_jac,
@@ -186,6 +188,7 @@ class ScipyNelderMead(Algorithm):
186188
convergence_ftol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_FTOL_ABS
187189
convergence_xtol_abs: NonNegativeFloat = CONVERGENCE_SECOND_BEST_XTOL_ABS
188190
adaptive: bool = False
191+
display: bool = False
189192

190193
def _solve_internal_problem(
191194
self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
@@ -197,6 +200,7 @@ def _solve_internal_problem(
197200
"fatol": self.convergence_ftol_abs,
198201
# TODO: Benchmark if adaptive = True works better
199202
"adaptive": self.adaptive,
203+
"disp": self.display,
200204
}
201205
raw_res = scipy.optimize.minimize(
202206
fun=problem.fun,
@@ -228,6 +232,7 @@ class ScipyPowell(Algorithm):
228232
convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL
229233
stopping_maxfun: PositiveInt = STOPPING_MAXFUN
230234
stopping_maxiter: PositiveInt = STOPPING_MAXITER
235+
display: bool = False
231236

232237
def _solve_internal_problem(
233238
self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
@@ -237,6 +242,7 @@ def _solve_internal_problem(
237242
"ftol": self.convergence_ftol_rel,
238243
"maxfev": self.stopping_maxfun,
239244
"maxiter": self.stopping_maxiter,
245+
"disp": self.display,
240246
}
241247
raw_res = scipy.optimize.minimize(
242248
fun=problem.fun,
@@ -267,6 +273,10 @@ class ScipyBFGS(Algorithm):
267273
convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS
268274
stopping_maxiter: PositiveInt = STOPPING_MAXITER
269275
norm: NonNegativeFloat = np.inf
276+
convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL
277+
display: bool = False
278+
armijo_condition: NonNegativeFloat = 1e-4
279+
curvature_condition: NonNegativeFloat = 0.9
270280

271281
def _solve_internal_problem(
272282
self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
@@ -275,6 +285,10 @@ def _solve_internal_problem(
275285
"gtol": self.convergence_gtol_abs,
276286
"maxiter": self.stopping_maxiter,
277287
"norm": self.norm,
288+
"xrtol": self.convergence_xtol_rel,
289+
"disp": self.display,
290+
"c1": self.armijo_condition,
291+
"c2": self.curvature_condition,
278292
}
279293
raw_res = scipy.optimize.minimize(
280294
fun=problem.fun_and_jac, x0=x0, method="BFGS", jac=True, options=options
@@ -301,6 +315,7 @@ class ScipyConjugateGradient(Algorithm):
301315
convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS
302316
stopping_maxiter: PositiveInt = STOPPING_MAXITER
303317
norm: NonNegativeFloat = np.inf
318+
display: bool = False
304319

305320
def _solve_internal_problem(
306321
self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
@@ -309,6 +324,7 @@ def _solve_internal_problem(
309324
"gtol": self.convergence_gtol_abs,
310325
"maxiter": self.stopping_maxiter,
311326
"norm": self.norm,
327+
"disp": self.display,
312328
}
313329
raw_res = scipy.optimize.minimize(
314330
fun=problem.fun_and_jac, x0=x0, method="CG", jac=True, options=options
@@ -334,13 +350,15 @@ def _solve_internal_problem(
334350
class ScipyNewtonCG(Algorithm):
335351
convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL
336352
stopping_maxiter: PositiveInt = STOPPING_MAXITER
353+
display: bool = False
337354

338355
def _solve_internal_problem(
339356
self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
340357
) -> InternalOptimizeResult:
341358
options = {
342359
"xtol": self.convergence_xtol_rel,
343360
"maxiter": self.stopping_maxiter,
361+
"disp": self.display,
344362
}
345363
raw_res = scipy.optimize.minimize(
346364
fun=problem.fun_and_jac,
@@ -371,6 +389,7 @@ class ScipyCOBYLA(Algorithm):
371389
convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL
372390
stopping_maxiter: PositiveInt = STOPPING_MAXITER
373391
trustregion_initial_radius: PositiveFloat | None = None
392+
display: bool = False
374393

375394
def _solve_internal_problem(
376395
self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
@@ -384,6 +403,7 @@ def _solve_internal_problem(
384403
options = {
385404
"maxiter": self.stopping_maxiter,
386405
"rhobeg": radius,
406+
"disp": self.display,
387407
}
388408

389409
# cannot handle equality constraints
@@ -563,6 +583,7 @@ class ScipyTruncatedNewton(Algorithm):
563583
criterion_rescale_factor: float = -1
564584
# TODO: Check type hint for `func_min_estimate`
565585
func_min_estimate: float = 0
586+
display: bool = False
566587

567588
def _solve_internal_problem(
568589
self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
@@ -578,6 +599,7 @@ def _solve_internal_problem(
578599
"eta": self.line_search_severity,
579600
"accuracy": self.finite_difference_precision,
580601
"rescale": self.criterion_rescale_factor,
602+
"disp": self.display,
581603
}
582604

583605
raw_res = scipy.optimize.minimize(
@@ -612,6 +634,7 @@ class ScipyTrustConstr(Algorithm):
612634
convergence_xtol_rel: NonNegativeFloat = CONVERGENCE_XTOL_REL
613635
stopping_maxiter: PositiveInt = STOPPING_MAXITER
614636
trustregion_initial_radius: PositiveFloat | None = None
637+
display: bool = False
615638

616639
def _solve_internal_problem(
617640
self, problem: InternalOptimizationProblem, x0: NDArray[np.float64]
@@ -626,6 +649,7 @@ def _solve_internal_problem(
626649
"maxiter": self.stopping_maxiter,
627650
"xtol": self.convergence_xtol_rel,
628651
"initial_tr_radius": trustregion_initial_radius,
652+
"disp": self.display,
629653
}
630654

631655
# cannot handle equality constraints

0 commit comments

Comments
 (0)