diff --git a/torchtnt/framework/callbacks/dcp_saver.py b/torchtnt/framework/callbacks/dcp_saver.py index 006ac8e4d0..6b76a4b6f7 100644 --- a/torchtnt/framework/callbacks/dcp_saver.py +++ b/torchtnt/framework/callbacks/dcp_saver.py @@ -221,7 +221,7 @@ def _wait(self, log_warning: bool = True) -> None: computes and logs the time spent waiting on the previous checkpoint to finish, and a toggable warning for the user to modify checkpointing frequency. - If the previous checkpoing has already finished, this is a no-op. + If the previous checkpoint has already finished, this is a no-op. Args: log_warning: Toggle for logging a warning to the user to modify checkpointing frequency. Sometimes @@ -323,7 +323,7 @@ def restore_with_id( Note: If torch.distributed is available and a process group is initialized, dcp assumes the intention is to save/load checkpoints in distributed fashion. restore_options: Controls what to filter when restoring the state. knob_options: Additional keyword options for StorageWriter and StorageReader - planner: Instance of LoadPlanner. If this is not specificed, the default planner will be used. (Default: ``None``) + planner: Instance of LoadPlanner. If this is not specified, the default planner will be used. (Default: ``None``) storage_reader: Instance of StorageReader used to perform reads. If this is not specified, it will automatically infer the reader based on the checkpoint_id. If checkpoint_id is also None, an exception will be raised. (Default: ``None``) """ @@ -419,7 +419,7 @@ def _maybe_add_dataloader_to_app_state( ignore_phases.add(phase) elif not isinstance(dl, Stateful): - logger.warn( + logger.warning( f"dataloader for {phase} phase was passed to `restore` but it does not implement the Stateful protocol to load states" ) ignore_phases.add(phase) @@ -448,7 +448,7 @@ def _maybe_add_dataloader_to_app_state( for phase in candidate_dataloaders.keys() if _PHASE_DL_STATE_KEY_MAPPING[phase] not in app_state ]: - logger.warn( + logger.warning( f"dataloader ({','.join(str(k) for k in dl_missing_in_ckpt)}) was passed to `restore` " "but no dataloader exists in checkpoint metadata." ) diff --git a/torchtnt/utils/memory_snapshot_profiler.py b/torchtnt/utils/memory_snapshot_profiler.py index a94890c390..f58ae84ebf 100644 --- a/torchtnt/utils/memory_snapshot_profiler.py +++ b/torchtnt/utils/memory_snapshot_profiler.py @@ -147,7 +147,7 @@ def start(self) -> None: if self.is_started: return if not torch.cuda.is_available(): - logger.warn("CUDA unavailable. Not recording memory history.") + logger.warning("CUDA unavailable. Not recording memory history.") return logger.info("Starting to record memory history.") @@ -158,7 +158,7 @@ def stop(self) -> None: if not self.is_started: return if not torch.cuda.is_available(): - logger.warn("CUDA unavailable. Not recording memory history.") + logger.warning("CUDA unavailable. Not recording memory history.") return logger.info("Stopping recording memory history.")