Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ repos:
#args: ["--write-changes"] # uncomment if you want to get automatic fixing

- repo: https://github.com/PyCQA/docformatter
rev: 06907d0267368b49b9180eed423fae5697c1e909 # todo: fix for docformatter after last 1.7.5
rev: v1.7.7
hooks:
- id: docformatter
additional_dependencies: [tomli]
Expand All @@ -70,7 +70,7 @@ repos:
- id: sphinx-lint

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.11.4
rev: v0.12.2
hooks:
# try to fix what is possible
- id: ruff
Expand Down
2 changes: 1 addition & 1 deletion src/lightning/pytorch/core/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ def _configure_schedulers_automatic_opt(schedulers: list, monitor: Optional[str]
scheduler["reduce_on_plateau"] = scheduler.get(
"reduce_on_plateau", isinstance(scheduler["scheduler"], optim.lr_scheduler.ReduceLROnPlateau)
)
if scheduler["reduce_on_plateau"] and scheduler.get("monitor", None) is None:
if scheduler["reduce_on_plateau"] and scheduler.get("monitor") is None:
raise MisconfigurationException(
"The lr scheduler dict must include a monitor when a `ReduceLROnPlateau` scheduler is used."
' For example: {"optimizer": optimizer, "lr_scheduler":'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,8 @@ def configure_optimizers(self):
model.validation_step = None
callback = TestBackboneFinetuningWarningCallback(unfreeze_backbone_at_epoch=3, verbose=False)

trainer = Trainer(limit_train_batches=1, default_root_dir=tmp_path, callbacks=[callback, chk], max_epochs=2)
with pytest.warns(UserWarning, match="Did you init your optimizer in"):
trainer = Trainer(limit_train_batches=1, default_root_dir=tmp_path, callbacks=[callback, chk], max_epochs=2)
trainer.fit(model)

assert model.backbone.has_been_used
Expand Down
11 changes: 6 additions & 5 deletions tests/tests_pytorch/profilers/test_profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,9 @@ def test_simple_profiler_durations(simple_profiler, action: str, expected: list)
np.testing.assert_allclose(simple_profiler.recorded_durations[action], expected, rtol=0.2)


def test_simple_profiler_overhead(simple_profiler, n_iter=5):
def test_simple_profiler_overhead(simple_profiler):
"""Ensure that the profiler doesn't introduce too much overhead during training."""
for _ in range(n_iter):
for _ in range(5):
with simple_profiler.profile("no-op"):
pass

Expand Down Expand Up @@ -284,8 +284,9 @@ def test_advanced_profiler_durations(advanced_profiler, action: str, expected: l


@pytest.mark.flaky(reruns=3)
def test_advanced_profiler_overhead(advanced_profiler, n_iter=5):
def test_advanced_profiler_overhead(advanced_profiler):
"""Ensure that the profiler doesn't introduce too much overhead during training."""
n_iter = 5
for _ in range(n_iter):
with advanced_profiler.profile("no-op"):
pass
Expand Down Expand Up @@ -620,8 +621,8 @@ def test_pytorch_profiler_raises_warning_for_limited_steps(tmp_path, trainer_con
warning_cache.clear()
with pytest.warns(UserWarning, match="not enough steps to properly record traces"):
getattr(trainer, trainer_fn)(model)
assert trainer.profiler._schedule is None
warning_cache.clear()
assert trainer.profiler._schedule is None
warning_cache.clear()


def test_profile_callbacks(tmp_path):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -234,9 +234,9 @@ def on_test_epoch_end(self):


@pytest.mark.parametrize("suffix", [False, True])
def test_multi_dataloaders_add_suffix_properly(tmp_path, suffix):
def test_multi_dataloaders_add_suffix_properly(suffix, tmp_path):
class TestModel(BoringModel):
def test_step(self, batch, batch_idx, dataloader_idx=0):
def test_step(self, batch, batch_idx, dataloader_idx=0): # noqa: PT028
out = super().test_step(batch, batch_idx)
self.log("test_loss", out["y"], on_step=True, on_epoch=True)
return out
Expand Down Expand Up @@ -441,7 +441,7 @@ def on_test_epoch_end(self, _, pl_module):
class TestModel(BoringModel):
seen_losses = {i: [] for i in range(num_dataloaders)}

def test_step(self, batch, batch_idx, dataloader_idx=0):
def test_step(self, batch, batch_idx, dataloader_idx=0): # noqa: PT028
loss = super().test_step(batch, batch_idx)["y"]
self.log("test_loss", loss)
self.seen_losses[dataloader_idx].append(loss)
Expand Down
22 changes: 10 additions & 12 deletions tests/tests_pytorch/trainer/test_config_validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
import pytest
import torch

from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.pytorch import LightningDataModule, LightningModule, Trainer
from lightning.pytorch.demos.boring_classes import BoringModel, RandomDataset
from lightning.pytorch.trainer.configuration_validator import (
Expand Down Expand Up @@ -46,20 +45,19 @@ def test_wrong_configure_optimizers(tmp_path):
trainer.fit(model)


def test_fit_val_loop_config(tmp_path):
@pytest.mark.parametrize("model_attrib", ["validation_step", "val_dataloader"])
def test_fit_val_loop_config(model_attrib, tmp_path):
"""When either val loop or val data are missing raise warning."""
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1)

# no val data has val loop
with pytest.warns(UserWarning, match=r"You passed in a `val_dataloader` but have no `validation_step`"):
model = BoringModel()
model.validation_step = None
trainer.fit(model)

# has val loop but no val data
with pytest.warns(PossibleUserWarning, match=r"You defined a `validation_step` but have no `val_dataloader`"):
model = BoringModel()
model.val_dataloader = None
model = BoringModel()
setattr(model, model_attrib, None)
match_msg = (
r"You passed in a `val_dataloader` but have no `validation_step`"
if model_attrib == "validation_step"
else "You defined a `validation_step` but have no `val_dataloader`"
)
with pytest.warns(UserWarning, match=match_msg):
trainer.fit(model)


Expand Down
63 changes: 35 additions & 28 deletions tests/tests_pytorch/trainer/test_dataloaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -545,13 +545,14 @@ def test_warning_with_few_workers(_, tmp_path, ckpt_path, stage):

trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, limit_val_batches=0.1, limit_train_batches=0.2)

with pytest.warns(UserWarning, match=f"The '{stage}_dataloader' does not have many workers"):
if stage == "test":
if ckpt_path in ("specific", "best"):
trainer.fit(model, train_dataloaders=train_dl, val_dataloaders=val_dl)
ckpt_path = trainer.checkpoint_callback.best_model_path if ckpt_path == "specific" else ckpt_path
if stage == "test":
if ckpt_path in ("specific", "best"):
trainer.fit(model, train_dataloaders=train_dl, val_dataloaders=val_dl)
ckpt_path = trainer.checkpoint_callback.best_model_path if ckpt_path == "specific" else ckpt_path
with pytest.warns(UserWarning, match=f"The '{stage}_dataloader' does not have many workers"):
trainer.test(model, dataloaders=train_dl, ckpt_path=ckpt_path)
else:
else:
with pytest.warns(UserWarning, match=f"The '{stage}_dataloader' does not have many workers"):
trainer.fit(model, train_dataloaders=train_dl, val_dataloaders=val_dl)


Expand Down Expand Up @@ -579,16 +580,15 @@ def training_step(self, batch, batch_idx):

trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, limit_val_batches=0.1, limit_train_batches=0.2)

with pytest.warns(
UserWarning,
match=f"The '{stage}_dataloader' does not have many workers",
):
if stage == "test":
if ckpt_path in ("specific", "best"):
trainer.fit(model, train_dataloaders=train_multi_dl, val_dataloaders=val_multi_dl)
ckpt_path = trainer.checkpoint_callback.best_model_path if ckpt_path == "specific" else ckpt_path
if stage == "test":
if ckpt_path in ("specific", "best"):
trainer.fit(model, train_dataloaders=train_multi_dl, val_dataloaders=val_multi_dl)
ckpt_path = trainer.checkpoint_callback.best_model_path if ckpt_path == "specific" else ckpt_path

with pytest.warns(UserWarning, match=f"The '{stage}_dataloader' does not have many workers"):
trainer.test(model, dataloaders=test_multi_dl, ckpt_path=ckpt_path)
else:
else:
with pytest.warns(UserWarning, match=f"The '{stage}_dataloader' does not have many workers"):
trainer.fit(model, train_dataloaders=train_multi_dl, val_dataloaders=val_multi_dl)


Expand Down Expand Up @@ -669,28 +669,35 @@ def test_auto_add_worker_init_fn_distributed(tmp_path, monkeypatch):
trainer.fit(model, train_dataloaders=dataloader)


def test_warning_with_small_dataloader_and_logging_interval(tmp_path):
@pytest.mark.parametrize("log_interval", [2, 11])
def test_warning_with_small_dataloader_and_logging_interval(log_interval, tmp_path):
"""Test that a warning message is shown if the dataloader length is too short for the chosen logging interval."""
model = BoringModel()
dataloader = DataLoader(RandomDataset(32, length=10))
model.train_dataloader = lambda: dataloader

with pytest.warns(UserWarning, match=r"The number of training batches \(10\) is smaller than the logging interval"):
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, log_every_n_steps=11, logger=CSVLogger(tmp_path))
trainer = Trainer(
default_root_dir=tmp_path,
max_epochs=1,
log_every_n_steps=log_interval,
limit_train_batches=1 if log_interval < 10 else None,
logger=CSVLogger(tmp_path),
)
with pytest.warns(
UserWarning,
match=rf"The number of training batches \({log_interval - 1}\) is smaller than the logging interval",
):
trainer.fit(model)

with pytest.warns(UserWarning, match=r"The number of training batches \(1\) is smaller than the logging interval"):
trainer = Trainer(
default_root_dir=tmp_path,
max_epochs=1,
log_every_n_steps=2,
limit_train_batches=1,
logger=CSVLogger(tmp_path),
)
trainer.fit(model)

def test_warning_with_small_dataloader_and_fast_dev_run(tmp_path):
"""Test that a warning message is shown if the dataloader length is too short for the chosen logging interval."""
model = BoringModel()
dataloader = DataLoader(RandomDataset(32, length=10))
model.train_dataloader = lambda: dataloader

trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=True, log_every_n_steps=2)
with no_warning_call(UserWarning, match="The number of training batches"):
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=True, log_every_n_steps=2)
trainer.fit(model)


Expand Down
4 changes: 3 additions & 1 deletion tests/tests_pytorch/utilities/test_model_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import pytest
import torch
import torch.nn as nn
from lightning_utilities.test.warning import no_warning_call

from lightning.pytorch import LightningModule, Trainer
from lightning.pytorch.demos.boring_classes import BoringModel
Expand Down Expand Up @@ -348,7 +349,7 @@ def test_model_size_warning_on_unsupported_precision(tmp_path):

with pytest.warns(UserWarning, match="Precision .* is not supported by the model summary.*"):
summary = summarize(model)
assert model.pre_calculated_model_size == summary.model_size
assert model.pre_calculated_model_size == summary.model_size


def test_lazy_model_summary():
Expand All @@ -358,6 +359,7 @@ def test_lazy_model_summary():

with pytest.warns(UserWarning, match="The total number of parameters detected may be inaccurate."):
assert summary.total_parameters == 0
with no_warning_call():
assert summary.trainable_parameters == 0


Expand Down
Loading